1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ 3 4 #include <crypto/internal/aead.h> 5 #include <crypto/authenc.h> 6 #include <crypto/scatterwalk.h> 7 #include <linux/dmapool.h> 8 #include <linux/dma-mapping.h> 9 10 #include "cc_buffer_mgr.h" 11 #include "cc_lli_defs.h" 12 #include "cc_cipher.h" 13 #include "cc_hash.h" 14 #include "cc_aead.h" 15 16 union buffer_array_entry { 17 struct scatterlist *sgl; 18 dma_addr_t buffer_dma; 19 }; 20 21 struct buffer_array { 22 unsigned int num_of_buffers; 23 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; 24 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; 25 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; 26 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; 27 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; 28 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; 29 }; 30 31 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) 32 { 33 switch (type) { 34 case CC_DMA_BUF_NULL: 35 return "BUF_NULL"; 36 case CC_DMA_BUF_DLLI: 37 return "BUF_DLLI"; 38 case CC_DMA_BUF_MLLI: 39 return "BUF_MLLI"; 40 default: 41 return "BUF_INVALID"; 42 } 43 } 44 45 /** 46 * cc_copy_mac() - Copy MAC to temporary location 47 * 48 * @dev: device object 49 * @req: aead request object 50 * @dir: [IN] copy from/to sgl 51 */ 52 static void cc_copy_mac(struct device *dev, struct aead_request *req, 53 enum cc_sg_cpy_direct dir) 54 { 55 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 56 u32 skip = req->assoclen + req->cryptlen; 57 58 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, 59 (skip - areq_ctx->req_authsize), skip, dir); 60 } 61 62 /** 63 * cc_get_sgl_nents() - Get scatterlist number of entries. 64 * 65 * @dev: Device object 66 * @sg_list: SG list 67 * @nbytes: [IN] Total SGL data bytes. 68 * @lbytes: [OUT] Returns the amount of bytes at the last entry 69 * 70 * Return: 71 * Number of entries in the scatterlist 72 */ 73 static unsigned int cc_get_sgl_nents(struct device *dev, 74 struct scatterlist *sg_list, 75 unsigned int nbytes, u32 *lbytes) 76 { 77 unsigned int nents = 0; 78 79 *lbytes = 0; 80 81 while (nbytes && sg_list) { 82 nents++; 83 /* get the number of bytes in the last entry */ 84 *lbytes = nbytes; 85 nbytes -= (sg_list->length > nbytes) ? 86 nbytes : sg_list->length; 87 sg_list = sg_next(sg_list); 88 } 89 90 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); 91 return nents; 92 } 93 94 /** 95 * cc_copy_sg_portion() - Copy scatter list data, 96 * from to_skip to end, to dest and vice versa 97 * 98 * @dev: Device object 99 * @dest: Buffer to copy to/from 100 * @sg: SG list 101 * @to_skip: Number of bytes to skip before copying 102 * @end: Offset of last byte to copy 103 * @direct: Transfer direction (true == from SG list to buffer, false == from 104 * buffer to SG list) 105 */ 106 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, 107 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) 108 { 109 u32 nents; 110 111 nents = sg_nents_for_len(sg, end); 112 sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, 113 (direct == CC_SG_TO_BUF)); 114 } 115 116 static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, 117 u32 buff_size, u32 *curr_nents, 118 u32 **mlli_entry_pp) 119 { 120 u32 *mlli_entry_p = *mlli_entry_pp; 121 u32 new_nents; 122 123 /* Verify there is no memory overflow*/ 124 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); 125 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { 126 dev_err(dev, "Too many mlli entries. current %d max %d\n", 127 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); 128 return -ENOMEM; 129 } 130 131 /*handle buffer longer than 64 kbytes */ 132 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { 133 cc_lli_set_addr(mlli_entry_p, buff_dma); 134 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); 135 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", 136 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], 137 mlli_entry_p[LLI_WORD1_OFFSET]); 138 buff_dma += CC_MAX_MLLI_ENTRY_SIZE; 139 buff_size -= CC_MAX_MLLI_ENTRY_SIZE; 140 mlli_entry_p = mlli_entry_p + 2; 141 (*curr_nents)++; 142 } 143 /*Last entry */ 144 cc_lli_set_addr(mlli_entry_p, buff_dma); 145 cc_lli_set_size(mlli_entry_p, buff_size); 146 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", 147 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], 148 mlli_entry_p[LLI_WORD1_OFFSET]); 149 mlli_entry_p = mlli_entry_p + 2; 150 *mlli_entry_pp = mlli_entry_p; 151 (*curr_nents)++; 152 return 0; 153 } 154 155 static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl, 156 u32 sgl_data_len, u32 sgl_offset, 157 u32 *curr_nents, u32 **mlli_entry_pp) 158 { 159 struct scatterlist *curr_sgl = sgl; 160 u32 *mlli_entry_p = *mlli_entry_pp; 161 s32 rc = 0; 162 163 for ( ; (curr_sgl && sgl_data_len); 164 curr_sgl = sg_next(curr_sgl)) { 165 u32 entry_data_len = 166 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? 167 sg_dma_len(curr_sgl) - sgl_offset : 168 sgl_data_len; 169 sgl_data_len -= entry_data_len; 170 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) + 171 sgl_offset, entry_data_len, 172 curr_nents, &mlli_entry_p); 173 if (rc) 174 return rc; 175 176 sgl_offset = 0; 177 } 178 *mlli_entry_pp = mlli_entry_p; 179 return 0; 180 } 181 182 static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, 183 struct mlli_params *mlli_params, gfp_t flags) 184 { 185 u32 *mlli_p; 186 u32 total_nents = 0, prev_total_nents = 0; 187 int rc = 0, i; 188 189 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers); 190 191 /* Allocate memory from the pointed pool */ 192 mlli_params->mlli_virt_addr = 193 dma_pool_alloc(mlli_params->curr_pool, flags, 194 &mlli_params->mlli_dma_addr); 195 if (!mlli_params->mlli_virt_addr) { 196 dev_err(dev, "dma_pool_alloc() failed\n"); 197 rc = -ENOMEM; 198 goto build_mlli_exit; 199 } 200 /* Point to start of MLLI */ 201 mlli_p = mlli_params->mlli_virt_addr; 202 /* go over all SG's and link it to one MLLI table */ 203 for (i = 0; i < sg_data->num_of_buffers; i++) { 204 union buffer_array_entry *entry = &sg_data->entry[i]; 205 u32 tot_len = sg_data->total_data_len[i]; 206 u32 offset = sg_data->offset[i]; 207 208 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, 209 &total_nents, &mlli_p); 210 if (rc) 211 return rc; 212 213 /* set last bit in the current table */ 214 if (sg_data->mlli_nents[i]) { 215 /*Calculate the current MLLI table length for the 216 *length field in the descriptor 217 */ 218 *sg_data->mlli_nents[i] += 219 (total_nents - prev_total_nents); 220 prev_total_nents = total_nents; 221 } 222 } 223 224 /* Set MLLI size for the bypass operation */ 225 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); 226 227 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", 228 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, 229 mlli_params->mlli_len); 230 231 build_mlli_exit: 232 return rc; 233 } 234 235 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, 236 unsigned int nents, struct scatterlist *sgl, 237 unsigned int data_len, unsigned int data_offset, 238 bool is_last_table, u32 *mlli_nents) 239 { 240 unsigned int index = sgl_data->num_of_buffers; 241 242 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", 243 index, nents, sgl, data_len, is_last_table); 244 sgl_data->nents[index] = nents; 245 sgl_data->entry[index].sgl = sgl; 246 sgl_data->offset[index] = data_offset; 247 sgl_data->total_data_len[index] = data_len; 248 sgl_data->is_last[index] = is_last_table; 249 sgl_data->mlli_nents[index] = mlli_nents; 250 if (sgl_data->mlli_nents[index]) 251 *sgl_data->mlli_nents[index] = 0; 252 sgl_data->num_of_buffers++; 253 } 254 255 static int cc_map_sg(struct device *dev, struct scatterlist *sg, 256 unsigned int nbytes, int direction, u32 *nents, 257 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) 258 { 259 int ret = 0; 260 261 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); 262 if (*nents > max_sg_nents) { 263 *nents = 0; 264 dev_err(dev, "Too many fragments. current %d max %d\n", 265 *nents, max_sg_nents); 266 return -ENOMEM; 267 } 268 269 ret = dma_map_sg(dev, sg, *nents, direction); 270 if (dma_mapping_error(dev, ret)) { 271 *nents = 0; 272 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); 273 return -ENOMEM; 274 } 275 276 *mapped_nents = ret; 277 278 return 0; 279 } 280 281 static int 282 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, 283 u8 *config_data, struct buffer_array *sg_data, 284 unsigned int assoclen) 285 { 286 dev_dbg(dev, " handle additional data config set to DLLI\n"); 287 /* create sg for the current buffer */ 288 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, 289 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); 290 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { 291 dev_err(dev, "dma_map_sg() config buffer failed\n"); 292 return -ENOMEM; 293 } 294 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 295 &sg_dma_address(&areq_ctx->ccm_adata_sg), 296 sg_page(&areq_ctx->ccm_adata_sg), 297 sg_virt(&areq_ctx->ccm_adata_sg), 298 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); 299 /* prepare for case of MLLI */ 300 if (assoclen > 0) { 301 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, 302 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), 303 0, false, NULL); 304 } 305 return 0; 306 } 307 308 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, 309 u8 *curr_buff, u32 curr_buff_cnt, 310 struct buffer_array *sg_data) 311 { 312 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); 313 /* create sg for the current buffer */ 314 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); 315 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { 316 dev_err(dev, "dma_map_sg() src buffer failed\n"); 317 return -ENOMEM; 318 } 319 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 320 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), 321 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, 322 areq_ctx->buff_sg->length); 323 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; 324 areq_ctx->curr_sg = areq_ctx->buff_sg; 325 areq_ctx->in_nents = 0; 326 /* prepare for case of MLLI */ 327 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, 328 false, NULL); 329 return 0; 330 } 331 332 void cc_unmap_cipher_request(struct device *dev, void *ctx, 333 unsigned int ivsize, struct scatterlist *src, 334 struct scatterlist *dst) 335 { 336 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; 337 338 if (req_ctx->gen_ctx.iv_dma_addr) { 339 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", 340 &req_ctx->gen_ctx.iv_dma_addr, ivsize); 341 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 342 ivsize, DMA_BIDIRECTIONAL); 343 } 344 /* Release pool */ 345 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && 346 req_ctx->mlli_params.mlli_virt_addr) { 347 dma_pool_free(req_ctx->mlli_params.curr_pool, 348 req_ctx->mlli_params.mlli_virt_addr, 349 req_ctx->mlli_params.mlli_dma_addr); 350 } 351 352 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); 353 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 354 355 if (src != dst) { 356 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); 357 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); 358 } 359 } 360 361 int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, 362 unsigned int ivsize, unsigned int nbytes, 363 void *info, struct scatterlist *src, 364 struct scatterlist *dst, gfp_t flags) 365 { 366 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; 367 struct mlli_params *mlli_params = &req_ctx->mlli_params; 368 struct device *dev = drvdata_to_dev(drvdata); 369 struct buffer_array sg_data; 370 u32 dummy = 0; 371 int rc = 0; 372 u32 mapped_nents = 0; 373 374 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; 375 mlli_params->curr_pool = NULL; 376 sg_data.num_of_buffers = 0; 377 378 /* Map IV buffer */ 379 if (ivsize) { 380 dump_byte_array("iv", info, ivsize); 381 req_ctx->gen_ctx.iv_dma_addr = 382 dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); 383 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { 384 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 385 ivsize, info); 386 return -ENOMEM; 387 } 388 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", 389 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); 390 } else { 391 req_ctx->gen_ctx.iv_dma_addr = 0; 392 } 393 394 /* Map the src SGL */ 395 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, 396 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); 397 if (rc) 398 goto cipher_exit; 399 if (mapped_nents > 1) 400 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; 401 402 if (src == dst) { 403 /* Handle inplace operation */ 404 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { 405 req_ctx->out_nents = 0; 406 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, 407 nbytes, 0, true, 408 &req_ctx->in_mlli_nents); 409 } 410 } else { 411 /* Map the dst sg */ 412 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, 413 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 414 &dummy, &mapped_nents); 415 if (rc) 416 goto cipher_exit; 417 if (mapped_nents > 1) 418 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; 419 420 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { 421 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, 422 nbytes, 0, true, 423 &req_ctx->in_mlli_nents); 424 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst, 425 nbytes, 0, true, 426 &req_ctx->out_mlli_nents); 427 } 428 } 429 430 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { 431 mlli_params->curr_pool = drvdata->mlli_buffs_pool; 432 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 433 if (rc) 434 goto cipher_exit; 435 } 436 437 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n", 438 cc_dma_buf_type(req_ctx->dma_buf_type)); 439 440 return 0; 441 442 cipher_exit: 443 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 444 return rc; 445 } 446 447 void cc_unmap_aead_request(struct device *dev, struct aead_request *req) 448 { 449 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 450 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 451 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 452 453 if (areq_ctx->mac_buf_dma_addr) { 454 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, 455 MAX_MAC_SIZE, DMA_BIDIRECTIONAL); 456 } 457 458 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { 459 if (areq_ctx->hkey_dma_addr) { 460 dma_unmap_single(dev, areq_ctx->hkey_dma_addr, 461 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); 462 } 463 464 if (areq_ctx->gcm_block_len_dma_addr) { 465 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, 466 AES_BLOCK_SIZE, DMA_TO_DEVICE); 467 } 468 469 if (areq_ctx->gcm_iv_inc1_dma_addr) { 470 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, 471 AES_BLOCK_SIZE, DMA_TO_DEVICE); 472 } 473 474 if (areq_ctx->gcm_iv_inc2_dma_addr) { 475 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, 476 AES_BLOCK_SIZE, DMA_TO_DEVICE); 477 } 478 } 479 480 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { 481 if (areq_ctx->ccm_iv0_dma_addr) { 482 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, 483 AES_BLOCK_SIZE, DMA_TO_DEVICE); 484 } 485 486 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); 487 } 488 if (areq_ctx->gen_ctx.iv_dma_addr) { 489 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, 490 hw_iv_size, DMA_BIDIRECTIONAL); 491 kzfree(areq_ctx->gen_ctx.iv); 492 } 493 494 /* Release pool */ 495 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 496 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && 497 (areq_ctx->mlli_params.mlli_virt_addr)) { 498 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", 499 &areq_ctx->mlli_params.mlli_dma_addr, 500 areq_ctx->mlli_params.mlli_virt_addr); 501 dma_pool_free(areq_ctx->mlli_params.curr_pool, 502 areq_ctx->mlli_params.mlli_virt_addr, 503 areq_ctx->mlli_params.mlli_dma_addr); 504 } 505 506 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 507 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 508 areq_ctx->assoclen, req->cryptlen); 509 510 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, 511 DMA_BIDIRECTIONAL); 512 if (req->src != req->dst) { 513 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 514 sg_virt(req->dst)); 515 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, 516 DMA_BIDIRECTIONAL); 517 } 518 if (drvdata->coherent && 519 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && 520 req->src == req->dst) { 521 /* copy back mac from temporary location to deal with possible 522 * data memory overriding that caused by cache coherence 523 * problem. 524 */ 525 cc_copy_mac(dev, req, CC_SG_FROM_BUF); 526 } 527 } 528 529 static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, 530 u32 last_entry_data_size) 531 { 532 return ((sgl_nents > 1) && (last_entry_data_size < authsize)); 533 } 534 535 static int cc_aead_chain_iv(struct cc_drvdata *drvdata, 536 struct aead_request *req, 537 struct buffer_array *sg_data, 538 bool is_last, bool do_chain) 539 { 540 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 541 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 542 struct device *dev = drvdata_to_dev(drvdata); 543 gfp_t flags = cc_gfp_flags(&req->base); 544 int rc = 0; 545 546 if (!req->iv) { 547 areq_ctx->gen_ctx.iv_dma_addr = 0; 548 areq_ctx->gen_ctx.iv = NULL; 549 goto chain_iv_exit; 550 } 551 552 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); 553 if (!areq_ctx->gen_ctx.iv) 554 return -ENOMEM; 555 556 areq_ctx->gen_ctx.iv_dma_addr = 557 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, 558 DMA_BIDIRECTIONAL); 559 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { 560 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 561 hw_iv_size, req->iv); 562 kzfree(areq_ctx->gen_ctx.iv); 563 areq_ctx->gen_ctx.iv = NULL; 564 rc = -ENOMEM; 565 goto chain_iv_exit; 566 } 567 568 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", 569 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); 570 571 chain_iv_exit: 572 return rc; 573 } 574 575 static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, 576 struct aead_request *req, 577 struct buffer_array *sg_data, 578 bool is_last, bool do_chain) 579 { 580 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 581 int rc = 0; 582 int mapped_nents = 0; 583 struct device *dev = drvdata_to_dev(drvdata); 584 585 if (!sg_data) { 586 rc = -EINVAL; 587 goto chain_assoc_exit; 588 } 589 590 if (areq_ctx->assoclen == 0) { 591 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; 592 areq_ctx->assoc.nents = 0; 593 areq_ctx->assoc.mlli_nents = 0; 594 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n", 595 cc_dma_buf_type(areq_ctx->assoc_buff_type), 596 areq_ctx->assoc.nents); 597 goto chain_assoc_exit; 598 } 599 600 mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); 601 if (mapped_nents < 0) 602 return mapped_nents; 603 604 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { 605 dev_err(dev, "Too many fragments. current %d max %d\n", 606 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); 607 return -ENOMEM; 608 } 609 areq_ctx->assoc.nents = mapped_nents; 610 611 /* in CCM case we have additional entry for 612 * ccm header configurations 613 */ 614 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { 615 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { 616 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", 617 (areq_ctx->assoc.nents + 1), 618 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); 619 rc = -ENOMEM; 620 goto chain_assoc_exit; 621 } 622 } 623 624 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) 625 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; 626 else 627 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 628 629 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { 630 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", 631 cc_dma_buf_type(areq_ctx->assoc_buff_type), 632 areq_ctx->assoc.nents); 633 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, 634 areq_ctx->assoclen, 0, is_last, 635 &areq_ctx->assoc.mlli_nents); 636 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 637 } 638 639 chain_assoc_exit: 640 return rc; 641 } 642 643 static void cc_prepare_aead_data_dlli(struct aead_request *req, 644 u32 *src_last_bytes, u32 *dst_last_bytes) 645 { 646 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 647 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 648 unsigned int authsize = areq_ctx->req_authsize; 649 struct scatterlist *sg; 650 ssize_t offset; 651 652 areq_ctx->is_icv_fragmented = false; 653 654 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { 655 sg = areq_ctx->src_sgl; 656 offset = *src_last_bytes - authsize; 657 } else { 658 sg = areq_ctx->dst_sgl; 659 offset = *dst_last_bytes - authsize; 660 } 661 662 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; 663 areq_ctx->icv_virt_addr = sg_virt(sg) + offset; 664 } 665 666 static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, 667 struct aead_request *req, 668 struct buffer_array *sg_data, 669 u32 *src_last_bytes, u32 *dst_last_bytes, 670 bool is_last_table) 671 { 672 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 673 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 674 unsigned int authsize = areq_ctx->req_authsize; 675 struct device *dev = drvdata_to_dev(drvdata); 676 struct scatterlist *sg; 677 678 if (req->src == req->dst) { 679 /*INPLACE*/ 680 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, 681 areq_ctx->src_sgl, areq_ctx->cryptlen, 682 areq_ctx->src_offset, is_last_table, 683 &areq_ctx->src.mlli_nents); 684 685 areq_ctx->is_icv_fragmented = 686 cc_is_icv_frag(areq_ctx->src.nents, authsize, 687 *src_last_bytes); 688 689 if (areq_ctx->is_icv_fragmented) { 690 /* Backup happens only when ICV is fragmented, ICV 691 * verification is made by CPU compare in order to 692 * simplify MAC verification upon request completion 693 */ 694 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { 695 /* In coherent platforms (e.g. ACP) 696 * already copying ICV for any 697 * INPLACE-DECRYPT operation, hence 698 * we must neglect this code. 699 */ 700 if (!drvdata->coherent) 701 cc_copy_mac(dev, req, CC_SG_TO_BUF); 702 703 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; 704 } else { 705 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; 706 areq_ctx->icv_dma_addr = 707 areq_ctx->mac_buf_dma_addr; 708 } 709 } else { /* Contig. ICV */ 710 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; 711 /*Should hanlde if the sg is not contig.*/ 712 areq_ctx->icv_dma_addr = sg_dma_address(sg) + 713 (*src_last_bytes - authsize); 714 areq_ctx->icv_virt_addr = sg_virt(sg) + 715 (*src_last_bytes - authsize); 716 } 717 718 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { 719 /*NON-INPLACE and DECRYPT*/ 720 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, 721 areq_ctx->src_sgl, areq_ctx->cryptlen, 722 areq_ctx->src_offset, is_last_table, 723 &areq_ctx->src.mlli_nents); 724 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, 725 areq_ctx->dst_sgl, areq_ctx->cryptlen, 726 areq_ctx->dst_offset, is_last_table, 727 &areq_ctx->dst.mlli_nents); 728 729 areq_ctx->is_icv_fragmented = 730 cc_is_icv_frag(areq_ctx->src.nents, authsize, 731 *src_last_bytes); 732 /* Backup happens only when ICV is fragmented, ICV 733 734 * verification is made by CPU compare in order to simplify 735 * MAC verification upon request completion 736 */ 737 if (areq_ctx->is_icv_fragmented) { 738 cc_copy_mac(dev, req, CC_SG_TO_BUF); 739 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; 740 741 } else { /* Contig. ICV */ 742 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; 743 /*Should hanlde if the sg is not contig.*/ 744 areq_ctx->icv_dma_addr = sg_dma_address(sg) + 745 (*src_last_bytes - authsize); 746 areq_ctx->icv_virt_addr = sg_virt(sg) + 747 (*src_last_bytes - authsize); 748 } 749 750 } else { 751 /*NON-INPLACE and ENCRYPT*/ 752 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, 753 areq_ctx->dst_sgl, areq_ctx->cryptlen, 754 areq_ctx->dst_offset, is_last_table, 755 &areq_ctx->dst.mlli_nents); 756 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, 757 areq_ctx->src_sgl, areq_ctx->cryptlen, 758 areq_ctx->src_offset, is_last_table, 759 &areq_ctx->src.mlli_nents); 760 761 areq_ctx->is_icv_fragmented = 762 cc_is_icv_frag(areq_ctx->dst.nents, authsize, 763 *dst_last_bytes); 764 765 if (!areq_ctx->is_icv_fragmented) { 766 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; 767 /* Contig. ICV */ 768 areq_ctx->icv_dma_addr = sg_dma_address(sg) + 769 (*dst_last_bytes - authsize); 770 areq_ctx->icv_virt_addr = sg_virt(sg) + 771 (*dst_last_bytes - authsize); 772 } else { 773 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; 774 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; 775 } 776 } 777 } 778 779 static int cc_aead_chain_data(struct cc_drvdata *drvdata, 780 struct aead_request *req, 781 struct buffer_array *sg_data, 782 bool is_last_table, bool do_chain) 783 { 784 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 785 struct device *dev = drvdata_to_dev(drvdata); 786 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 787 unsigned int authsize = areq_ctx->req_authsize; 788 unsigned int src_last_bytes = 0, dst_last_bytes = 0; 789 int rc = 0; 790 u32 src_mapped_nents = 0, dst_mapped_nents = 0; 791 u32 offset = 0; 792 /* non-inplace mode */ 793 unsigned int size_for_map = req->assoclen + req->cryptlen; 794 u32 sg_index = 0; 795 u32 size_to_skip = req->assoclen; 796 struct scatterlist *sgl; 797 798 offset = size_to_skip; 799 800 if (!sg_data) 801 return -EINVAL; 802 803 areq_ctx->src_sgl = req->src; 804 areq_ctx->dst_sgl = req->dst; 805 806 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 807 authsize : 0; 808 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, 809 &src_last_bytes); 810 sg_index = areq_ctx->src_sgl->length; 811 //check where the data starts 812 while (src_mapped_nents && (sg_index <= size_to_skip)) { 813 src_mapped_nents--; 814 offset -= areq_ctx->src_sgl->length; 815 sgl = sg_next(areq_ctx->src_sgl); 816 if (!sgl) 817 break; 818 areq_ctx->src_sgl = sgl; 819 sg_index += areq_ctx->src_sgl->length; 820 } 821 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { 822 dev_err(dev, "Too many fragments. current %d max %d\n", 823 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); 824 return -ENOMEM; 825 } 826 827 areq_ctx->src.nents = src_mapped_nents; 828 829 areq_ctx->src_offset = offset; 830 831 if (req->src != req->dst) { 832 size_for_map = req->assoclen + req->cryptlen; 833 834 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) 835 size_for_map += authsize; 836 else 837 size_for_map -= authsize; 838 839 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, 840 &areq_ctx->dst.mapped_nents, 841 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, 842 &dst_mapped_nents); 843 if (rc) 844 goto chain_data_exit; 845 } 846 847 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, 848 &dst_last_bytes); 849 sg_index = areq_ctx->dst_sgl->length; 850 offset = size_to_skip; 851 852 //check where the data starts 853 while (dst_mapped_nents && sg_index <= size_to_skip) { 854 dst_mapped_nents--; 855 offset -= areq_ctx->dst_sgl->length; 856 sgl = sg_next(areq_ctx->dst_sgl); 857 if (!sgl) 858 break; 859 areq_ctx->dst_sgl = sgl; 860 sg_index += areq_ctx->dst_sgl->length; 861 } 862 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { 863 dev_err(dev, "Too many fragments. current %d max %d\n", 864 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); 865 return -ENOMEM; 866 } 867 areq_ctx->dst.nents = dst_mapped_nents; 868 areq_ctx->dst_offset = offset; 869 if (src_mapped_nents > 1 || 870 dst_mapped_nents > 1 || 871 do_chain) { 872 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; 873 cc_prepare_aead_data_mlli(drvdata, req, sg_data, 874 &src_last_bytes, &dst_last_bytes, 875 is_last_table); 876 } else { 877 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; 878 cc_prepare_aead_data_dlli(req, &src_last_bytes, 879 &dst_last_bytes); 880 } 881 882 chain_data_exit: 883 return rc; 884 } 885 886 static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, 887 struct aead_request *req) 888 { 889 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 890 u32 curr_mlli_size = 0; 891 892 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { 893 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; 894 curr_mlli_size = areq_ctx->assoc.mlli_nents * 895 LLI_ENTRY_BYTE_SIZE; 896 } 897 898 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { 899 /*Inplace case dst nents equal to src nents*/ 900 if (req->src == req->dst) { 901 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; 902 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + 903 curr_mlli_size; 904 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; 905 if (!areq_ctx->is_single_pass) 906 areq_ctx->assoc.mlli_nents += 907 areq_ctx->src.mlli_nents; 908 } else { 909 if (areq_ctx->gen_ctx.op_type == 910 DRV_CRYPTO_DIRECTION_DECRYPT) { 911 areq_ctx->src.sram_addr = 912 drvdata->mlli_sram_addr + 913 curr_mlli_size; 914 areq_ctx->dst.sram_addr = 915 areq_ctx->src.sram_addr + 916 areq_ctx->src.mlli_nents * 917 LLI_ENTRY_BYTE_SIZE; 918 if (!areq_ctx->is_single_pass) 919 areq_ctx->assoc.mlli_nents += 920 areq_ctx->src.mlli_nents; 921 } else { 922 areq_ctx->dst.sram_addr = 923 drvdata->mlli_sram_addr + 924 curr_mlli_size; 925 areq_ctx->src.sram_addr = 926 areq_ctx->dst.sram_addr + 927 areq_ctx->dst.mlli_nents * 928 LLI_ENTRY_BYTE_SIZE; 929 if (!areq_ctx->is_single_pass) 930 areq_ctx->assoc.mlli_nents += 931 areq_ctx->dst.mlli_nents; 932 } 933 } 934 } 935 } 936 937 int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) 938 { 939 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 940 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 941 struct device *dev = drvdata_to_dev(drvdata); 942 struct buffer_array sg_data; 943 unsigned int authsize = areq_ctx->req_authsize; 944 int rc = 0; 945 dma_addr_t dma_addr; 946 u32 mapped_nents = 0; 947 u32 dummy = 0; /*used for the assoc data fragments */ 948 u32 size_to_map; 949 gfp_t flags = cc_gfp_flags(&req->base); 950 951 mlli_params->curr_pool = NULL; 952 sg_data.num_of_buffers = 0; 953 954 /* copy mac to a temporary location to deal with possible 955 * data memory overriding that caused by cache coherence problem. 956 */ 957 if (drvdata->coherent && 958 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && 959 req->src == req->dst) 960 cc_copy_mac(dev, req, CC_SG_TO_BUF); 961 962 /* cacluate the size for cipher remove ICV in decrypt*/ 963 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == 964 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 965 req->cryptlen : 966 (req->cryptlen - authsize); 967 968 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, 969 DMA_BIDIRECTIONAL); 970 if (dma_mapping_error(dev, dma_addr)) { 971 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", 972 MAX_MAC_SIZE, areq_ctx->mac_buf); 973 rc = -ENOMEM; 974 goto aead_map_failure; 975 } 976 areq_ctx->mac_buf_dma_addr = dma_addr; 977 978 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { 979 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; 980 981 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, 982 DMA_TO_DEVICE); 983 984 if (dma_mapping_error(dev, dma_addr)) { 985 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", 986 AES_BLOCK_SIZE, addr); 987 areq_ctx->ccm_iv0_dma_addr = 0; 988 rc = -ENOMEM; 989 goto aead_map_failure; 990 } 991 areq_ctx->ccm_iv0_dma_addr = dma_addr; 992 993 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, 994 &sg_data, areq_ctx->assoclen); 995 if (rc) 996 goto aead_map_failure; 997 } 998 999 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { 1000 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, 1001 DMA_BIDIRECTIONAL); 1002 if (dma_mapping_error(dev, dma_addr)) { 1003 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", 1004 AES_BLOCK_SIZE, areq_ctx->hkey); 1005 rc = -ENOMEM; 1006 goto aead_map_failure; 1007 } 1008 areq_ctx->hkey_dma_addr = dma_addr; 1009 1010 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, 1011 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1012 if (dma_mapping_error(dev, dma_addr)) { 1013 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", 1014 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); 1015 rc = -ENOMEM; 1016 goto aead_map_failure; 1017 } 1018 areq_ctx->gcm_block_len_dma_addr = dma_addr; 1019 1020 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, 1021 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1022 1023 if (dma_mapping_error(dev, dma_addr)) { 1024 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", 1025 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); 1026 areq_ctx->gcm_iv_inc1_dma_addr = 0; 1027 rc = -ENOMEM; 1028 goto aead_map_failure; 1029 } 1030 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; 1031 1032 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, 1033 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1034 1035 if (dma_mapping_error(dev, dma_addr)) { 1036 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", 1037 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); 1038 areq_ctx->gcm_iv_inc2_dma_addr = 0; 1039 rc = -ENOMEM; 1040 goto aead_map_failure; 1041 } 1042 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; 1043 } 1044 1045 size_to_map = req->cryptlen + req->assoclen; 1046 /* If we do in-place encryption, we also need the auth tag */ 1047 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && 1048 (req->src == req->dst)) { 1049 size_to_map += authsize; 1050 } 1051 1052 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, 1053 &areq_ctx->src.mapped_nents, 1054 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + 1055 LLI_MAX_NUM_OF_DATA_ENTRIES), 1056 &dummy, &mapped_nents); 1057 if (rc) 1058 goto aead_map_failure; 1059 1060 if (areq_ctx->is_single_pass) { 1061 /* 1062 * Create MLLI table for: 1063 * (1) Assoc. data 1064 * (2) Src/Dst SGLs 1065 * Note: IV is contg. buffer (not an SGL) 1066 */ 1067 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); 1068 if (rc) 1069 goto aead_map_failure; 1070 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); 1071 if (rc) 1072 goto aead_map_failure; 1073 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); 1074 if (rc) 1075 goto aead_map_failure; 1076 } else { /* DOUBLE-PASS flow */ 1077 /* 1078 * Prepare MLLI table(s) in this order: 1079 * 1080 * If ENCRYPT/DECRYPT (inplace): 1081 * (1) MLLI table for assoc 1082 * (2) IV entry (chained right after end of assoc) 1083 * (3) MLLI for src/dst (inplace operation) 1084 * 1085 * If ENCRYPT (non-inplace) 1086 * (1) MLLI table for assoc 1087 * (2) IV entry (chained right after end of assoc) 1088 * (3) MLLI for dst 1089 * (4) MLLI for src 1090 * 1091 * If DECRYPT (non-inplace) 1092 * (1) MLLI table for assoc 1093 * (2) IV entry (chained right after end of assoc) 1094 * (3) MLLI for src 1095 * (4) MLLI for dst 1096 */ 1097 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); 1098 if (rc) 1099 goto aead_map_failure; 1100 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); 1101 if (rc) 1102 goto aead_map_failure; 1103 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); 1104 if (rc) 1105 goto aead_map_failure; 1106 } 1107 1108 /* Mlli support -start building the MLLI according to the above 1109 * results 1110 */ 1111 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 1112 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { 1113 mlli_params->curr_pool = drvdata->mlli_buffs_pool; 1114 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 1115 if (rc) 1116 goto aead_map_failure; 1117 1118 cc_update_aead_mlli_nents(drvdata, req); 1119 dev_dbg(dev, "assoc params mn %d\n", 1120 areq_ctx->assoc.mlli_nents); 1121 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); 1122 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); 1123 } 1124 return 0; 1125 1126 aead_map_failure: 1127 cc_unmap_aead_request(dev, req); 1128 return rc; 1129 } 1130 1131 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, 1132 struct scatterlist *src, unsigned int nbytes, 1133 bool do_update, gfp_t flags) 1134 { 1135 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 1136 struct device *dev = drvdata_to_dev(drvdata); 1137 u8 *curr_buff = cc_hash_buf(areq_ctx); 1138 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); 1139 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 1140 struct buffer_array sg_data; 1141 int rc = 0; 1142 u32 dummy = 0; 1143 u32 mapped_nents = 0; 1144 1145 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", 1146 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); 1147 /* Init the type of the dma buffer */ 1148 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; 1149 mlli_params->curr_pool = NULL; 1150 sg_data.num_of_buffers = 0; 1151 areq_ctx->in_nents = 0; 1152 1153 if (nbytes == 0 && *curr_buff_cnt == 0) { 1154 /* nothing to do */ 1155 return 0; 1156 } 1157 1158 /* map the previous buffer */ 1159 if (*curr_buff_cnt) { 1160 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, 1161 &sg_data); 1162 if (rc) 1163 return rc; 1164 } 1165 1166 if (src && nbytes > 0 && do_update) { 1167 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, 1168 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 1169 &dummy, &mapped_nents); 1170 if (rc) 1171 goto unmap_curr_buff; 1172 if (src && mapped_nents == 1 && 1173 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { 1174 memcpy(areq_ctx->buff_sg, src, 1175 sizeof(struct scatterlist)); 1176 areq_ctx->buff_sg->length = nbytes; 1177 areq_ctx->curr_sg = areq_ctx->buff_sg; 1178 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; 1179 } else { 1180 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; 1181 } 1182 } 1183 1184 /*build mlli */ 1185 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { 1186 mlli_params->curr_pool = drvdata->mlli_buffs_pool; 1187 /* add the src data to the sg_data */ 1188 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 1189 0, true, &areq_ctx->mlli_nents); 1190 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 1191 if (rc) 1192 goto fail_unmap_din; 1193 } 1194 /* change the buffer index for the unmap function */ 1195 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); 1196 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", 1197 cc_dma_buf_type(areq_ctx->data_dma_buf_type)); 1198 return 0; 1199 1200 fail_unmap_din: 1201 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); 1202 1203 unmap_curr_buff: 1204 if (*curr_buff_cnt) 1205 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1206 1207 return rc; 1208 } 1209 1210 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, 1211 struct scatterlist *src, unsigned int nbytes, 1212 unsigned int block_size, gfp_t flags) 1213 { 1214 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 1215 struct device *dev = drvdata_to_dev(drvdata); 1216 u8 *curr_buff = cc_hash_buf(areq_ctx); 1217 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); 1218 u8 *next_buff = cc_next_buf(areq_ctx); 1219 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); 1220 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 1221 unsigned int update_data_len; 1222 u32 total_in_len = nbytes + *curr_buff_cnt; 1223 struct buffer_array sg_data; 1224 unsigned int swap_index = 0; 1225 int rc = 0; 1226 u32 dummy = 0; 1227 u32 mapped_nents = 0; 1228 1229 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", 1230 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); 1231 /* Init the type of the dma buffer */ 1232 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; 1233 mlli_params->curr_pool = NULL; 1234 areq_ctx->curr_sg = NULL; 1235 sg_data.num_of_buffers = 0; 1236 areq_ctx->in_nents = 0; 1237 1238 if (total_in_len < block_size) { 1239 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", 1240 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); 1241 areq_ctx->in_nents = sg_nents_for_len(src, nbytes); 1242 sg_copy_to_buffer(src, areq_ctx->in_nents, 1243 &curr_buff[*curr_buff_cnt], nbytes); 1244 *curr_buff_cnt += nbytes; 1245 return 1; 1246 } 1247 1248 /* Calculate the residue size*/ 1249 *next_buff_cnt = total_in_len & (block_size - 1); 1250 /* update data len */ 1251 update_data_len = total_in_len - *next_buff_cnt; 1252 1253 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", 1254 *next_buff_cnt, update_data_len); 1255 1256 /* Copy the new residue to next buffer */ 1257 if (*next_buff_cnt) { 1258 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", 1259 next_buff, (update_data_len - *curr_buff_cnt), 1260 *next_buff_cnt); 1261 cc_copy_sg_portion(dev, next_buff, src, 1262 (update_data_len - *curr_buff_cnt), 1263 nbytes, CC_SG_TO_BUF); 1264 /* change the buffer index for next operation */ 1265 swap_index = 1; 1266 } 1267 1268 if (*curr_buff_cnt) { 1269 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, 1270 &sg_data); 1271 if (rc) 1272 return rc; 1273 /* change the buffer index for next operation */ 1274 swap_index = 1; 1275 } 1276 1277 if (update_data_len > *curr_buff_cnt) { 1278 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), 1279 DMA_TO_DEVICE, &areq_ctx->in_nents, 1280 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, 1281 &mapped_nents); 1282 if (rc) 1283 goto unmap_curr_buff; 1284 if (mapped_nents == 1 && 1285 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { 1286 /* only one entry in the SG and no previous data */ 1287 memcpy(areq_ctx->buff_sg, src, 1288 sizeof(struct scatterlist)); 1289 areq_ctx->buff_sg->length = update_data_len; 1290 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; 1291 areq_ctx->curr_sg = areq_ctx->buff_sg; 1292 } else { 1293 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; 1294 } 1295 } 1296 1297 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { 1298 mlli_params->curr_pool = drvdata->mlli_buffs_pool; 1299 /* add the src data to the sg_data */ 1300 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, 1301 (update_data_len - *curr_buff_cnt), 0, true, 1302 &areq_ctx->mlli_nents); 1303 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 1304 if (rc) 1305 goto fail_unmap_din; 1306 } 1307 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); 1308 1309 return 0; 1310 1311 fail_unmap_din: 1312 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); 1313 1314 unmap_curr_buff: 1315 if (*curr_buff_cnt) 1316 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1317 1318 return rc; 1319 } 1320 1321 void cc_unmap_hash_request(struct device *dev, void *ctx, 1322 struct scatterlist *src, bool do_revert) 1323 { 1324 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 1325 u32 *prev_len = cc_next_buf_cnt(areq_ctx); 1326 1327 /*In case a pool was set, a table was 1328 *allocated and should be released 1329 */ 1330 if (areq_ctx->mlli_params.curr_pool) { 1331 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", 1332 &areq_ctx->mlli_params.mlli_dma_addr, 1333 areq_ctx->mlli_params.mlli_virt_addr); 1334 dma_pool_free(areq_ctx->mlli_params.curr_pool, 1335 areq_ctx->mlli_params.mlli_virt_addr, 1336 areq_ctx->mlli_params.mlli_dma_addr); 1337 } 1338 1339 if (src && areq_ctx->in_nents) { 1340 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", 1341 sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); 1342 dma_unmap_sg(dev, src, 1343 areq_ctx->in_nents, DMA_TO_DEVICE); 1344 } 1345 1346 if (*prev_len) { 1347 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", 1348 sg_virt(areq_ctx->buff_sg), 1349 &sg_dma_address(areq_ctx->buff_sg), 1350 sg_dma_len(areq_ctx->buff_sg)); 1351 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1352 if (!do_revert) { 1353 /* clean the previous data length for update 1354 * operation 1355 */ 1356 *prev_len = 0; 1357 } else { 1358 areq_ctx->buff_index ^= 1; 1359 } 1360 } 1361 } 1362 1363 int cc_buffer_mgr_init(struct cc_drvdata *drvdata) 1364 { 1365 struct device *dev = drvdata_to_dev(drvdata); 1366 1367 drvdata->mlli_buffs_pool = 1368 dma_pool_create("dx_single_mlli_tables", dev, 1369 MAX_NUM_OF_TOTAL_MLLI_ENTRIES * 1370 LLI_ENTRY_BYTE_SIZE, 1371 MLLI_TABLE_MIN_ALIGNMENT, 0); 1372 1373 if (!drvdata->mlli_buffs_pool) 1374 return -ENOMEM; 1375 1376 return 0; 1377 } 1378 1379 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) 1380 { 1381 dma_pool_destroy(drvdata->mlli_buffs_pool); 1382 return 0; 1383 } 1384