1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ 3 4 #include <crypto/internal/aead.h> 5 #include <crypto/authenc.h> 6 #include <crypto/scatterwalk.h> 7 #include <linux/dmapool.h> 8 #include <linux/dma-mapping.h> 9 10 #include "cc_buffer_mgr.h" 11 #include "cc_lli_defs.h" 12 #include "cc_cipher.h" 13 #include "cc_hash.h" 14 #include "cc_aead.h" 15 16 enum dma_buffer_type { 17 DMA_NULL_TYPE = -1, 18 DMA_SGL_TYPE = 1, 19 DMA_BUFF_TYPE = 2, 20 }; 21 22 struct buff_mgr_handle { 23 struct dma_pool *mlli_buffs_pool; 24 }; 25 26 union buffer_array_entry { 27 struct scatterlist *sgl; 28 dma_addr_t buffer_dma; 29 }; 30 31 struct buffer_array { 32 unsigned int num_of_buffers; 33 union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; 34 unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; 35 int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; 36 int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; 37 enum dma_buffer_type type[MAX_NUM_OF_BUFFERS_IN_MLLI]; 38 bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; 39 u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; 40 }; 41 42 static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) 43 { 44 switch (type) { 45 case CC_DMA_BUF_NULL: 46 return "BUF_NULL"; 47 case CC_DMA_BUF_DLLI: 48 return "BUF_DLLI"; 49 case CC_DMA_BUF_MLLI: 50 return "BUF_MLLI"; 51 default: 52 return "BUF_INVALID"; 53 } 54 } 55 56 /** 57 * cc_copy_mac() - Copy MAC to temporary location 58 * 59 * @dev: device object 60 * @req: aead request object 61 * @dir: [IN] copy from/to sgl 62 */ 63 static void cc_copy_mac(struct device *dev, struct aead_request *req, 64 enum cc_sg_cpy_direct dir) 65 { 66 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 67 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 68 u32 skip = areq_ctx->assoclen + req->cryptlen; 69 70 if (areq_ctx->is_gcm4543) 71 skip += crypto_aead_ivsize(tfm); 72 73 cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, 74 (skip - areq_ctx->req_authsize), skip, dir); 75 } 76 77 /** 78 * cc_get_sgl_nents() - Get scatterlist number of entries. 79 * 80 * @sg_list: SG list 81 * @nbytes: [IN] Total SGL data bytes. 82 * @lbytes: [OUT] Returns the amount of bytes at the last entry 83 */ 84 static unsigned int cc_get_sgl_nents(struct device *dev, 85 struct scatterlist *sg_list, 86 unsigned int nbytes, u32 *lbytes) 87 { 88 unsigned int nents = 0; 89 90 *lbytes = 0; 91 92 while (nbytes && sg_list) { 93 nents++; 94 /* get the number of bytes in the last entry */ 95 *lbytes = nbytes; 96 nbytes -= (sg_list->length > nbytes) ? 97 nbytes : sg_list->length; 98 sg_list = sg_next(sg_list); 99 } 100 101 dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); 102 return nents; 103 } 104 105 /** 106 * cc_copy_sg_portion() - Copy scatter list data, 107 * from to_skip to end, to dest and vice versa 108 * 109 * @dest: 110 * @sg: 111 * @to_skip: 112 * @end: 113 * @direct: 114 */ 115 void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, 116 u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) 117 { 118 u32 nents; 119 120 nents = sg_nents_for_len(sg, end); 121 sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, 122 (direct == CC_SG_TO_BUF)); 123 } 124 125 static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, 126 u32 buff_size, u32 *curr_nents, 127 u32 **mlli_entry_pp) 128 { 129 u32 *mlli_entry_p = *mlli_entry_pp; 130 u32 new_nents; 131 132 /* Verify there is no memory overflow*/ 133 new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); 134 if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { 135 dev_err(dev, "Too many mlli entries. current %d max %d\n", 136 new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); 137 return -ENOMEM; 138 } 139 140 /*handle buffer longer than 64 kbytes */ 141 while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { 142 cc_lli_set_addr(mlli_entry_p, buff_dma); 143 cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); 144 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", 145 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], 146 mlli_entry_p[LLI_WORD1_OFFSET]); 147 buff_dma += CC_MAX_MLLI_ENTRY_SIZE; 148 buff_size -= CC_MAX_MLLI_ENTRY_SIZE; 149 mlli_entry_p = mlli_entry_p + 2; 150 (*curr_nents)++; 151 } 152 /*Last entry */ 153 cc_lli_set_addr(mlli_entry_p, buff_dma); 154 cc_lli_set_size(mlli_entry_p, buff_size); 155 dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", 156 *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], 157 mlli_entry_p[LLI_WORD1_OFFSET]); 158 mlli_entry_p = mlli_entry_p + 2; 159 *mlli_entry_pp = mlli_entry_p; 160 (*curr_nents)++; 161 return 0; 162 } 163 164 static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl, 165 u32 sgl_data_len, u32 sgl_offset, 166 u32 *curr_nents, u32 **mlli_entry_pp) 167 { 168 struct scatterlist *curr_sgl = sgl; 169 u32 *mlli_entry_p = *mlli_entry_pp; 170 s32 rc = 0; 171 172 for ( ; (curr_sgl && sgl_data_len); 173 curr_sgl = sg_next(curr_sgl)) { 174 u32 entry_data_len = 175 (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? 176 sg_dma_len(curr_sgl) - sgl_offset : 177 sgl_data_len; 178 sgl_data_len -= entry_data_len; 179 rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) + 180 sgl_offset, entry_data_len, 181 curr_nents, &mlli_entry_p); 182 if (rc) 183 return rc; 184 185 sgl_offset = 0; 186 } 187 *mlli_entry_pp = mlli_entry_p; 188 return 0; 189 } 190 191 static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, 192 struct mlli_params *mlli_params, gfp_t flags) 193 { 194 u32 *mlli_p; 195 u32 total_nents = 0, prev_total_nents = 0; 196 int rc = 0, i; 197 198 dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers); 199 200 /* Allocate memory from the pointed pool */ 201 mlli_params->mlli_virt_addr = 202 dma_pool_alloc(mlli_params->curr_pool, flags, 203 &mlli_params->mlli_dma_addr); 204 if (!mlli_params->mlli_virt_addr) { 205 dev_err(dev, "dma_pool_alloc() failed\n"); 206 rc = -ENOMEM; 207 goto build_mlli_exit; 208 } 209 /* Point to start of MLLI */ 210 mlli_p = (u32 *)mlli_params->mlli_virt_addr; 211 /* go over all SG's and link it to one MLLI table */ 212 for (i = 0; i < sg_data->num_of_buffers; i++) { 213 union buffer_array_entry *entry = &sg_data->entry[i]; 214 u32 tot_len = sg_data->total_data_len[i]; 215 u32 offset = sg_data->offset[i]; 216 217 if (sg_data->type[i] == DMA_SGL_TYPE) 218 rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, 219 offset, &total_nents, 220 &mlli_p); 221 else /*DMA_BUFF_TYPE*/ 222 rc = cc_render_buff_to_mlli(dev, entry->buffer_dma, 223 tot_len, &total_nents, 224 &mlli_p); 225 if (rc) 226 return rc; 227 228 /* set last bit in the current table */ 229 if (sg_data->mlli_nents[i]) { 230 /*Calculate the current MLLI table length for the 231 *length field in the descriptor 232 */ 233 *sg_data->mlli_nents[i] += 234 (total_nents - prev_total_nents); 235 prev_total_nents = total_nents; 236 } 237 } 238 239 /* Set MLLI size for the bypass operation */ 240 mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); 241 242 dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", 243 mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, 244 mlli_params->mlli_len); 245 246 build_mlli_exit: 247 return rc; 248 } 249 250 static void cc_add_buffer_entry(struct device *dev, 251 struct buffer_array *sgl_data, 252 dma_addr_t buffer_dma, unsigned int buffer_len, 253 bool is_last_entry, u32 *mlli_nents) 254 { 255 unsigned int index = sgl_data->num_of_buffers; 256 257 dev_dbg(dev, "index=%u single_buff=%pad buffer_len=0x%08X is_last=%d\n", 258 index, &buffer_dma, buffer_len, is_last_entry); 259 sgl_data->nents[index] = 1; 260 sgl_data->entry[index].buffer_dma = buffer_dma; 261 sgl_data->offset[index] = 0; 262 sgl_data->total_data_len[index] = buffer_len; 263 sgl_data->type[index] = DMA_BUFF_TYPE; 264 sgl_data->is_last[index] = is_last_entry; 265 sgl_data->mlli_nents[index] = mlli_nents; 266 if (sgl_data->mlli_nents[index]) 267 *sgl_data->mlli_nents[index] = 0; 268 sgl_data->num_of_buffers++; 269 } 270 271 static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, 272 unsigned int nents, struct scatterlist *sgl, 273 unsigned int data_len, unsigned int data_offset, 274 bool is_last_table, u32 *mlli_nents) 275 { 276 unsigned int index = sgl_data->num_of_buffers; 277 278 dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", 279 index, nents, sgl, data_len, is_last_table); 280 sgl_data->nents[index] = nents; 281 sgl_data->entry[index].sgl = sgl; 282 sgl_data->offset[index] = data_offset; 283 sgl_data->total_data_len[index] = data_len; 284 sgl_data->type[index] = DMA_SGL_TYPE; 285 sgl_data->is_last[index] = is_last_table; 286 sgl_data->mlli_nents[index] = mlli_nents; 287 if (sgl_data->mlli_nents[index]) 288 *sgl_data->mlli_nents[index] = 0; 289 sgl_data->num_of_buffers++; 290 } 291 292 static int cc_map_sg(struct device *dev, struct scatterlist *sg, 293 unsigned int nbytes, int direction, u32 *nents, 294 u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) 295 { 296 int ret = 0; 297 298 *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); 299 if (*nents > max_sg_nents) { 300 *nents = 0; 301 dev_err(dev, "Too many fragments. current %d max %d\n", 302 *nents, max_sg_nents); 303 return -ENOMEM; 304 } 305 306 ret = dma_map_sg(dev, sg, *nents, direction); 307 if (dma_mapping_error(dev, ret)) { 308 *nents = 0; 309 dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); 310 return -ENOMEM; 311 } 312 313 *mapped_nents = ret; 314 315 return 0; 316 } 317 318 static int 319 cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, 320 u8 *config_data, struct buffer_array *sg_data, 321 unsigned int assoclen) 322 { 323 dev_dbg(dev, " handle additional data config set to DLLI\n"); 324 /* create sg for the current buffer */ 325 sg_init_one(&areq_ctx->ccm_adata_sg, config_data, 326 AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); 327 if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { 328 dev_err(dev, "dma_map_sg() config buffer failed\n"); 329 return -ENOMEM; 330 } 331 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 332 &sg_dma_address(&areq_ctx->ccm_adata_sg), 333 sg_page(&areq_ctx->ccm_adata_sg), 334 sg_virt(&areq_ctx->ccm_adata_sg), 335 areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); 336 /* prepare for case of MLLI */ 337 if (assoclen > 0) { 338 cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, 339 (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), 340 0, false, NULL); 341 } 342 return 0; 343 } 344 345 static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, 346 u8 *curr_buff, u32 curr_buff_cnt, 347 struct buffer_array *sg_data) 348 { 349 dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); 350 /* create sg for the current buffer */ 351 sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); 352 if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { 353 dev_err(dev, "dma_map_sg() src buffer failed\n"); 354 return -ENOMEM; 355 } 356 dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", 357 &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), 358 sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, 359 areq_ctx->buff_sg->length); 360 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; 361 areq_ctx->curr_sg = areq_ctx->buff_sg; 362 areq_ctx->in_nents = 0; 363 /* prepare for case of MLLI */ 364 cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, 365 false, NULL); 366 return 0; 367 } 368 369 void cc_unmap_cipher_request(struct device *dev, void *ctx, 370 unsigned int ivsize, struct scatterlist *src, 371 struct scatterlist *dst) 372 { 373 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; 374 375 if (req_ctx->gen_ctx.iv_dma_addr) { 376 dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", 377 &req_ctx->gen_ctx.iv_dma_addr, ivsize); 378 dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, 379 ivsize, DMA_BIDIRECTIONAL); 380 } 381 /* Release pool */ 382 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && 383 req_ctx->mlli_params.mlli_virt_addr) { 384 dma_pool_free(req_ctx->mlli_params.curr_pool, 385 req_ctx->mlli_params.mlli_virt_addr, 386 req_ctx->mlli_params.mlli_dma_addr); 387 } 388 389 dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); 390 dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); 391 392 if (src != dst) { 393 dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); 394 dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); 395 } 396 } 397 398 int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, 399 unsigned int ivsize, unsigned int nbytes, 400 void *info, struct scatterlist *src, 401 struct scatterlist *dst, gfp_t flags) 402 { 403 struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; 404 struct mlli_params *mlli_params = &req_ctx->mlli_params; 405 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 406 struct device *dev = drvdata_to_dev(drvdata); 407 struct buffer_array sg_data; 408 u32 dummy = 0; 409 int rc = 0; 410 u32 mapped_nents = 0; 411 412 req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; 413 mlli_params->curr_pool = NULL; 414 sg_data.num_of_buffers = 0; 415 416 /* Map IV buffer */ 417 if (ivsize) { 418 dump_byte_array("iv", (u8 *)info, ivsize); 419 req_ctx->gen_ctx.iv_dma_addr = 420 dma_map_single(dev, (void *)info, 421 ivsize, DMA_BIDIRECTIONAL); 422 if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { 423 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 424 ivsize, info); 425 return -ENOMEM; 426 } 427 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", 428 ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); 429 } else { 430 req_ctx->gen_ctx.iv_dma_addr = 0; 431 } 432 433 /* Map the src SGL */ 434 rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, 435 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); 436 if (rc) 437 goto cipher_exit; 438 if (mapped_nents > 1) 439 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; 440 441 if (src == dst) { 442 /* Handle inplace operation */ 443 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { 444 req_ctx->out_nents = 0; 445 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, 446 nbytes, 0, true, 447 &req_ctx->in_mlli_nents); 448 } 449 } else { 450 /* Map the dst sg */ 451 rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, 452 &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 453 &dummy, &mapped_nents); 454 if (rc) 455 goto cipher_exit; 456 if (mapped_nents > 1) 457 req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; 458 459 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { 460 cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, 461 nbytes, 0, true, 462 &req_ctx->in_mlli_nents); 463 cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst, 464 nbytes, 0, true, 465 &req_ctx->out_mlli_nents); 466 } 467 } 468 469 if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { 470 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; 471 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 472 if (rc) 473 goto cipher_exit; 474 } 475 476 dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n", 477 cc_dma_buf_type(req_ctx->dma_buf_type)); 478 479 return 0; 480 481 cipher_exit: 482 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 483 return rc; 484 } 485 486 void cc_unmap_aead_request(struct device *dev, struct aead_request *req) 487 { 488 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 489 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 490 struct cc_drvdata *drvdata = dev_get_drvdata(dev); 491 492 if (areq_ctx->mac_buf_dma_addr) { 493 dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, 494 MAX_MAC_SIZE, DMA_BIDIRECTIONAL); 495 } 496 497 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { 498 if (areq_ctx->hkey_dma_addr) { 499 dma_unmap_single(dev, areq_ctx->hkey_dma_addr, 500 AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); 501 } 502 503 if (areq_ctx->gcm_block_len_dma_addr) { 504 dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, 505 AES_BLOCK_SIZE, DMA_TO_DEVICE); 506 } 507 508 if (areq_ctx->gcm_iv_inc1_dma_addr) { 509 dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, 510 AES_BLOCK_SIZE, DMA_TO_DEVICE); 511 } 512 513 if (areq_ctx->gcm_iv_inc2_dma_addr) { 514 dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, 515 AES_BLOCK_SIZE, DMA_TO_DEVICE); 516 } 517 } 518 519 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { 520 if (areq_ctx->ccm_iv0_dma_addr) { 521 dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, 522 AES_BLOCK_SIZE, DMA_TO_DEVICE); 523 } 524 525 dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); 526 } 527 if (areq_ctx->gen_ctx.iv_dma_addr) { 528 dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, 529 hw_iv_size, DMA_BIDIRECTIONAL); 530 kzfree(areq_ctx->gen_ctx.iv); 531 } 532 533 /* Release pool */ 534 if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 535 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && 536 (areq_ctx->mlli_params.mlli_virt_addr)) { 537 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", 538 &areq_ctx->mlli_params.mlli_dma_addr, 539 areq_ctx->mlli_params.mlli_virt_addr); 540 dma_pool_free(areq_ctx->mlli_params.curr_pool, 541 areq_ctx->mlli_params.mlli_virt_addr, 542 areq_ctx->mlli_params.mlli_dma_addr); 543 } 544 545 dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", 546 sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, 547 areq_ctx->assoclen, req->cryptlen); 548 549 dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, 550 DMA_BIDIRECTIONAL); 551 if (req->src != req->dst) { 552 dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", 553 sg_virt(req->dst)); 554 dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, 555 DMA_BIDIRECTIONAL); 556 } 557 if (drvdata->coherent && 558 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && 559 req->src == req->dst) { 560 /* copy back mac from temporary location to deal with possible 561 * data memory overriding that caused by cache coherence 562 * problem. 563 */ 564 cc_copy_mac(dev, req, CC_SG_FROM_BUF); 565 } 566 } 567 568 static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, 569 u32 last_entry_data_size) 570 { 571 return ((sgl_nents > 1) && (last_entry_data_size < authsize)); 572 } 573 574 static int cc_aead_chain_iv(struct cc_drvdata *drvdata, 575 struct aead_request *req, 576 struct buffer_array *sg_data, 577 bool is_last, bool do_chain) 578 { 579 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 580 unsigned int hw_iv_size = areq_ctx->hw_iv_size; 581 struct device *dev = drvdata_to_dev(drvdata); 582 gfp_t flags = cc_gfp_flags(&req->base); 583 int rc = 0; 584 585 if (!req->iv) { 586 areq_ctx->gen_ctx.iv_dma_addr = 0; 587 areq_ctx->gen_ctx.iv = NULL; 588 goto chain_iv_exit; 589 } 590 591 areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); 592 if (!areq_ctx->gen_ctx.iv) 593 return -ENOMEM; 594 595 areq_ctx->gen_ctx.iv_dma_addr = 596 dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, 597 DMA_BIDIRECTIONAL); 598 if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { 599 dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", 600 hw_iv_size, req->iv); 601 kzfree(areq_ctx->gen_ctx.iv); 602 areq_ctx->gen_ctx.iv = NULL; 603 rc = -ENOMEM; 604 goto chain_iv_exit; 605 } 606 607 dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", 608 hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); 609 // TODO: what about CTR?? ask Ron 610 if (do_chain && areq_ctx->plaintext_authenticate_only) { 611 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 612 unsigned int iv_size_to_authenc = crypto_aead_ivsize(tfm); 613 unsigned int iv_ofs = GCM_BLOCK_RFC4_IV_OFFSET; 614 /* Chain to given list */ 615 cc_add_buffer_entry(dev, sg_data, 616 (areq_ctx->gen_ctx.iv_dma_addr + iv_ofs), 617 iv_size_to_authenc, is_last, 618 &areq_ctx->assoc.mlli_nents); 619 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 620 } 621 622 chain_iv_exit: 623 return rc; 624 } 625 626 static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, 627 struct aead_request *req, 628 struct buffer_array *sg_data, 629 bool is_last, bool do_chain) 630 { 631 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 632 int rc = 0; 633 int mapped_nents = 0; 634 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 635 unsigned int size_of_assoc = areq_ctx->assoclen; 636 struct device *dev = drvdata_to_dev(drvdata); 637 638 if (areq_ctx->is_gcm4543) 639 size_of_assoc += crypto_aead_ivsize(tfm); 640 641 if (!sg_data) { 642 rc = -EINVAL; 643 goto chain_assoc_exit; 644 } 645 646 if (areq_ctx->assoclen == 0) { 647 areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; 648 areq_ctx->assoc.nents = 0; 649 areq_ctx->assoc.mlli_nents = 0; 650 dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n", 651 cc_dma_buf_type(areq_ctx->assoc_buff_type), 652 areq_ctx->assoc.nents); 653 goto chain_assoc_exit; 654 } 655 656 mapped_nents = sg_nents_for_len(req->src, size_of_assoc); 657 if (mapped_nents < 0) 658 return mapped_nents; 659 660 if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { 661 dev_err(dev, "Too many fragments. current %d max %d\n", 662 mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); 663 return -ENOMEM; 664 } 665 areq_ctx->assoc.nents = mapped_nents; 666 667 /* in CCM case we have additional entry for 668 * ccm header configurations 669 */ 670 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { 671 if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { 672 dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", 673 (areq_ctx->assoc.nents + 1), 674 LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); 675 rc = -ENOMEM; 676 goto chain_assoc_exit; 677 } 678 } 679 680 if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) 681 areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; 682 else 683 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 684 685 if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { 686 dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", 687 cc_dma_buf_type(areq_ctx->assoc_buff_type), 688 areq_ctx->assoc.nents); 689 cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, 690 areq_ctx->assoclen, 0, is_last, 691 &areq_ctx->assoc.mlli_nents); 692 areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; 693 } 694 695 chain_assoc_exit: 696 return rc; 697 } 698 699 static void cc_prepare_aead_data_dlli(struct aead_request *req, 700 u32 *src_last_bytes, u32 *dst_last_bytes) 701 { 702 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 703 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 704 unsigned int authsize = areq_ctx->req_authsize; 705 struct scatterlist *sg; 706 ssize_t offset; 707 708 areq_ctx->is_icv_fragmented = false; 709 710 if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { 711 sg = areq_ctx->src_sgl; 712 offset = *src_last_bytes - authsize; 713 } else { 714 sg = areq_ctx->dst_sgl; 715 offset = *dst_last_bytes - authsize; 716 } 717 718 areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; 719 areq_ctx->icv_virt_addr = sg_virt(sg) + offset; 720 } 721 722 static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, 723 struct aead_request *req, 724 struct buffer_array *sg_data, 725 u32 *src_last_bytes, u32 *dst_last_bytes, 726 bool is_last_table) 727 { 728 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 729 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 730 unsigned int authsize = areq_ctx->req_authsize; 731 struct device *dev = drvdata_to_dev(drvdata); 732 struct scatterlist *sg; 733 734 if (req->src == req->dst) { 735 /*INPLACE*/ 736 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, 737 areq_ctx->src_sgl, areq_ctx->cryptlen, 738 areq_ctx->src_offset, is_last_table, 739 &areq_ctx->src.mlli_nents); 740 741 areq_ctx->is_icv_fragmented = 742 cc_is_icv_frag(areq_ctx->src.nents, authsize, 743 *src_last_bytes); 744 745 if (areq_ctx->is_icv_fragmented) { 746 /* Backup happens only when ICV is fragmented, ICV 747 * verification is made by CPU compare in order to 748 * simplify MAC verification upon request completion 749 */ 750 if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { 751 /* In coherent platforms (e.g. ACP) 752 * already copying ICV for any 753 * INPLACE-DECRYPT operation, hence 754 * we must neglect this code. 755 */ 756 if (!drvdata->coherent) 757 cc_copy_mac(dev, req, CC_SG_TO_BUF); 758 759 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; 760 } else { 761 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; 762 areq_ctx->icv_dma_addr = 763 areq_ctx->mac_buf_dma_addr; 764 } 765 } else { /* Contig. ICV */ 766 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; 767 /*Should hanlde if the sg is not contig.*/ 768 areq_ctx->icv_dma_addr = sg_dma_address(sg) + 769 (*src_last_bytes - authsize); 770 areq_ctx->icv_virt_addr = sg_virt(sg) + 771 (*src_last_bytes - authsize); 772 } 773 774 } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { 775 /*NON-INPLACE and DECRYPT*/ 776 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, 777 areq_ctx->src_sgl, areq_ctx->cryptlen, 778 areq_ctx->src_offset, is_last_table, 779 &areq_ctx->src.mlli_nents); 780 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, 781 areq_ctx->dst_sgl, areq_ctx->cryptlen, 782 areq_ctx->dst_offset, is_last_table, 783 &areq_ctx->dst.mlli_nents); 784 785 areq_ctx->is_icv_fragmented = 786 cc_is_icv_frag(areq_ctx->src.nents, authsize, 787 *src_last_bytes); 788 /* Backup happens only when ICV is fragmented, ICV 789 790 * verification is made by CPU compare in order to simplify 791 * MAC verification upon request completion 792 */ 793 if (areq_ctx->is_icv_fragmented) { 794 cc_copy_mac(dev, req, CC_SG_TO_BUF); 795 areq_ctx->icv_virt_addr = areq_ctx->backup_mac; 796 797 } else { /* Contig. ICV */ 798 sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; 799 /*Should hanlde if the sg is not contig.*/ 800 areq_ctx->icv_dma_addr = sg_dma_address(sg) + 801 (*src_last_bytes - authsize); 802 areq_ctx->icv_virt_addr = sg_virt(sg) + 803 (*src_last_bytes - authsize); 804 } 805 806 } else { 807 /*NON-INPLACE and ENCRYPT*/ 808 cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, 809 areq_ctx->dst_sgl, areq_ctx->cryptlen, 810 areq_ctx->dst_offset, is_last_table, 811 &areq_ctx->dst.mlli_nents); 812 cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, 813 areq_ctx->src_sgl, areq_ctx->cryptlen, 814 areq_ctx->src_offset, is_last_table, 815 &areq_ctx->src.mlli_nents); 816 817 areq_ctx->is_icv_fragmented = 818 cc_is_icv_frag(areq_ctx->dst.nents, authsize, 819 *dst_last_bytes); 820 821 if (!areq_ctx->is_icv_fragmented) { 822 sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; 823 /* Contig. ICV */ 824 areq_ctx->icv_dma_addr = sg_dma_address(sg) + 825 (*dst_last_bytes - authsize); 826 areq_ctx->icv_virt_addr = sg_virt(sg) + 827 (*dst_last_bytes - authsize); 828 } else { 829 areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; 830 areq_ctx->icv_virt_addr = areq_ctx->mac_buf; 831 } 832 } 833 } 834 835 static int cc_aead_chain_data(struct cc_drvdata *drvdata, 836 struct aead_request *req, 837 struct buffer_array *sg_data, 838 bool is_last_table, bool do_chain) 839 { 840 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 841 struct device *dev = drvdata_to_dev(drvdata); 842 enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; 843 unsigned int authsize = areq_ctx->req_authsize; 844 unsigned int src_last_bytes = 0, dst_last_bytes = 0; 845 int rc = 0; 846 u32 src_mapped_nents = 0, dst_mapped_nents = 0; 847 u32 offset = 0; 848 /* non-inplace mode */ 849 unsigned int size_for_map = areq_ctx->assoclen + req->cryptlen; 850 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 851 u32 sg_index = 0; 852 bool is_gcm4543 = areq_ctx->is_gcm4543; 853 u32 size_to_skip = areq_ctx->assoclen; 854 struct scatterlist *sgl; 855 856 if (is_gcm4543) 857 size_to_skip += crypto_aead_ivsize(tfm); 858 859 offset = size_to_skip; 860 861 if (!sg_data) 862 return -EINVAL; 863 864 areq_ctx->src_sgl = req->src; 865 areq_ctx->dst_sgl = req->dst; 866 867 if (is_gcm4543) 868 size_for_map += crypto_aead_ivsize(tfm); 869 870 size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 871 authsize : 0; 872 src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, 873 &src_last_bytes); 874 sg_index = areq_ctx->src_sgl->length; 875 //check where the data starts 876 while (src_mapped_nents && (sg_index <= size_to_skip)) { 877 src_mapped_nents--; 878 offset -= areq_ctx->src_sgl->length; 879 sgl = sg_next(areq_ctx->src_sgl); 880 if (!sgl) 881 break; 882 areq_ctx->src_sgl = sgl; 883 sg_index += areq_ctx->src_sgl->length; 884 } 885 if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { 886 dev_err(dev, "Too many fragments. current %d max %d\n", 887 src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); 888 return -ENOMEM; 889 } 890 891 areq_ctx->src.nents = src_mapped_nents; 892 893 areq_ctx->src_offset = offset; 894 895 if (req->src != req->dst) { 896 size_for_map = areq_ctx->assoclen + req->cryptlen; 897 898 if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) 899 size_for_map += authsize; 900 else 901 size_for_map -= authsize; 902 903 if (is_gcm4543) 904 size_for_map += crypto_aead_ivsize(tfm); 905 906 rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, 907 &areq_ctx->dst.mapped_nents, 908 LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, 909 &dst_mapped_nents); 910 if (rc) 911 goto chain_data_exit; 912 } 913 914 dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, 915 &dst_last_bytes); 916 sg_index = areq_ctx->dst_sgl->length; 917 offset = size_to_skip; 918 919 //check where the data starts 920 while (dst_mapped_nents && sg_index <= size_to_skip) { 921 dst_mapped_nents--; 922 offset -= areq_ctx->dst_sgl->length; 923 sgl = sg_next(areq_ctx->dst_sgl); 924 if (!sgl) 925 break; 926 areq_ctx->dst_sgl = sgl; 927 sg_index += areq_ctx->dst_sgl->length; 928 } 929 if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { 930 dev_err(dev, "Too many fragments. current %d max %d\n", 931 dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); 932 return -ENOMEM; 933 } 934 areq_ctx->dst.nents = dst_mapped_nents; 935 areq_ctx->dst_offset = offset; 936 if (src_mapped_nents > 1 || 937 dst_mapped_nents > 1 || 938 do_chain) { 939 areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; 940 cc_prepare_aead_data_mlli(drvdata, req, sg_data, 941 &src_last_bytes, &dst_last_bytes, 942 is_last_table); 943 } else { 944 areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; 945 cc_prepare_aead_data_dlli(req, &src_last_bytes, 946 &dst_last_bytes); 947 } 948 949 chain_data_exit: 950 return rc; 951 } 952 953 static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, 954 struct aead_request *req) 955 { 956 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 957 u32 curr_mlli_size = 0; 958 959 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { 960 areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; 961 curr_mlli_size = areq_ctx->assoc.mlli_nents * 962 LLI_ENTRY_BYTE_SIZE; 963 } 964 965 if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { 966 /*Inplace case dst nents equal to src nents*/ 967 if (req->src == req->dst) { 968 areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; 969 areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + 970 curr_mlli_size; 971 areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; 972 if (!areq_ctx->is_single_pass) 973 areq_ctx->assoc.mlli_nents += 974 areq_ctx->src.mlli_nents; 975 } else { 976 if (areq_ctx->gen_ctx.op_type == 977 DRV_CRYPTO_DIRECTION_DECRYPT) { 978 areq_ctx->src.sram_addr = 979 drvdata->mlli_sram_addr + 980 curr_mlli_size; 981 areq_ctx->dst.sram_addr = 982 areq_ctx->src.sram_addr + 983 areq_ctx->src.mlli_nents * 984 LLI_ENTRY_BYTE_SIZE; 985 if (!areq_ctx->is_single_pass) 986 areq_ctx->assoc.mlli_nents += 987 areq_ctx->src.mlli_nents; 988 } else { 989 areq_ctx->dst.sram_addr = 990 drvdata->mlli_sram_addr + 991 curr_mlli_size; 992 areq_ctx->src.sram_addr = 993 areq_ctx->dst.sram_addr + 994 areq_ctx->dst.mlli_nents * 995 LLI_ENTRY_BYTE_SIZE; 996 if (!areq_ctx->is_single_pass) 997 areq_ctx->assoc.mlli_nents += 998 areq_ctx->dst.mlli_nents; 999 } 1000 } 1001 } 1002 } 1003 1004 int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) 1005 { 1006 struct aead_req_ctx *areq_ctx = aead_request_ctx(req); 1007 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 1008 struct device *dev = drvdata_to_dev(drvdata); 1009 struct buffer_array sg_data; 1010 unsigned int authsize = areq_ctx->req_authsize; 1011 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 1012 int rc = 0; 1013 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1014 bool is_gcm4543 = areq_ctx->is_gcm4543; 1015 dma_addr_t dma_addr; 1016 u32 mapped_nents = 0; 1017 u32 dummy = 0; /*used for the assoc data fragments */ 1018 u32 size_to_map = 0; 1019 gfp_t flags = cc_gfp_flags(&req->base); 1020 1021 mlli_params->curr_pool = NULL; 1022 sg_data.num_of_buffers = 0; 1023 1024 /* copy mac to a temporary location to deal with possible 1025 * data memory overriding that caused by cache coherence problem. 1026 */ 1027 if (drvdata->coherent && 1028 areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && 1029 req->src == req->dst) 1030 cc_copy_mac(dev, req, CC_SG_TO_BUF); 1031 1032 /* cacluate the size for cipher remove ICV in decrypt*/ 1033 areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == 1034 DRV_CRYPTO_DIRECTION_ENCRYPT) ? 1035 req->cryptlen : 1036 (req->cryptlen - authsize); 1037 1038 dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, 1039 DMA_BIDIRECTIONAL); 1040 if (dma_mapping_error(dev, dma_addr)) { 1041 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", 1042 MAX_MAC_SIZE, areq_ctx->mac_buf); 1043 rc = -ENOMEM; 1044 goto aead_map_failure; 1045 } 1046 areq_ctx->mac_buf_dma_addr = dma_addr; 1047 1048 if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { 1049 void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; 1050 1051 dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, 1052 DMA_TO_DEVICE); 1053 1054 if (dma_mapping_error(dev, dma_addr)) { 1055 dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", 1056 AES_BLOCK_SIZE, addr); 1057 areq_ctx->ccm_iv0_dma_addr = 0; 1058 rc = -ENOMEM; 1059 goto aead_map_failure; 1060 } 1061 areq_ctx->ccm_iv0_dma_addr = dma_addr; 1062 1063 rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, 1064 &sg_data, areq_ctx->assoclen); 1065 if (rc) 1066 goto aead_map_failure; 1067 } 1068 1069 if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { 1070 dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, 1071 DMA_BIDIRECTIONAL); 1072 if (dma_mapping_error(dev, dma_addr)) { 1073 dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", 1074 AES_BLOCK_SIZE, areq_ctx->hkey); 1075 rc = -ENOMEM; 1076 goto aead_map_failure; 1077 } 1078 areq_ctx->hkey_dma_addr = dma_addr; 1079 1080 dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, 1081 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1082 if (dma_mapping_error(dev, dma_addr)) { 1083 dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", 1084 AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); 1085 rc = -ENOMEM; 1086 goto aead_map_failure; 1087 } 1088 areq_ctx->gcm_block_len_dma_addr = dma_addr; 1089 1090 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, 1091 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1092 1093 if (dma_mapping_error(dev, dma_addr)) { 1094 dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", 1095 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); 1096 areq_ctx->gcm_iv_inc1_dma_addr = 0; 1097 rc = -ENOMEM; 1098 goto aead_map_failure; 1099 } 1100 areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; 1101 1102 dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, 1103 AES_BLOCK_SIZE, DMA_TO_DEVICE); 1104 1105 if (dma_mapping_error(dev, dma_addr)) { 1106 dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", 1107 AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); 1108 areq_ctx->gcm_iv_inc2_dma_addr = 0; 1109 rc = -ENOMEM; 1110 goto aead_map_failure; 1111 } 1112 areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; 1113 } 1114 1115 size_to_map = req->cryptlen + areq_ctx->assoclen; 1116 /* If we do in-place encryption, we also need the auth tag */ 1117 if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && 1118 (req->src == req->dst)) { 1119 size_to_map += authsize; 1120 } 1121 if (is_gcm4543) 1122 size_to_map += crypto_aead_ivsize(tfm); 1123 rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, 1124 &areq_ctx->src.mapped_nents, 1125 (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + 1126 LLI_MAX_NUM_OF_DATA_ENTRIES), 1127 &dummy, &mapped_nents); 1128 if (rc) 1129 goto aead_map_failure; 1130 1131 if (areq_ctx->is_single_pass) { 1132 /* 1133 * Create MLLI table for: 1134 * (1) Assoc. data 1135 * (2) Src/Dst SGLs 1136 * Note: IV is contg. buffer (not an SGL) 1137 */ 1138 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); 1139 if (rc) 1140 goto aead_map_failure; 1141 rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); 1142 if (rc) 1143 goto aead_map_failure; 1144 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); 1145 if (rc) 1146 goto aead_map_failure; 1147 } else { /* DOUBLE-PASS flow */ 1148 /* 1149 * Prepare MLLI table(s) in this order: 1150 * 1151 * If ENCRYPT/DECRYPT (inplace): 1152 * (1) MLLI table for assoc 1153 * (2) IV entry (chained right after end of assoc) 1154 * (3) MLLI for src/dst (inplace operation) 1155 * 1156 * If ENCRYPT (non-inplace) 1157 * (1) MLLI table for assoc 1158 * (2) IV entry (chained right after end of assoc) 1159 * (3) MLLI for dst 1160 * (4) MLLI for src 1161 * 1162 * If DECRYPT (non-inplace) 1163 * (1) MLLI table for assoc 1164 * (2) IV entry (chained right after end of assoc) 1165 * (3) MLLI for src 1166 * (4) MLLI for dst 1167 */ 1168 rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); 1169 if (rc) 1170 goto aead_map_failure; 1171 rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); 1172 if (rc) 1173 goto aead_map_failure; 1174 rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); 1175 if (rc) 1176 goto aead_map_failure; 1177 } 1178 1179 /* Mlli support -start building the MLLI according to the above 1180 * results 1181 */ 1182 if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || 1183 areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { 1184 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; 1185 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 1186 if (rc) 1187 goto aead_map_failure; 1188 1189 cc_update_aead_mlli_nents(drvdata, req); 1190 dev_dbg(dev, "assoc params mn %d\n", 1191 areq_ctx->assoc.mlli_nents); 1192 dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); 1193 dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); 1194 } 1195 return 0; 1196 1197 aead_map_failure: 1198 cc_unmap_aead_request(dev, req); 1199 return rc; 1200 } 1201 1202 int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, 1203 struct scatterlist *src, unsigned int nbytes, 1204 bool do_update, gfp_t flags) 1205 { 1206 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 1207 struct device *dev = drvdata_to_dev(drvdata); 1208 u8 *curr_buff = cc_hash_buf(areq_ctx); 1209 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); 1210 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 1211 struct buffer_array sg_data; 1212 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 1213 int rc = 0; 1214 u32 dummy = 0; 1215 u32 mapped_nents = 0; 1216 1217 dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", 1218 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); 1219 /* Init the type of the dma buffer */ 1220 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; 1221 mlli_params->curr_pool = NULL; 1222 sg_data.num_of_buffers = 0; 1223 areq_ctx->in_nents = 0; 1224 1225 if (nbytes == 0 && *curr_buff_cnt == 0) { 1226 /* nothing to do */ 1227 return 0; 1228 } 1229 1230 /*TODO: copy data in case that buffer is enough for operation */ 1231 /* map the previous buffer */ 1232 if (*curr_buff_cnt) { 1233 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, 1234 &sg_data); 1235 if (rc) 1236 return rc; 1237 } 1238 1239 if (src && nbytes > 0 && do_update) { 1240 rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, 1241 &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, 1242 &dummy, &mapped_nents); 1243 if (rc) 1244 goto unmap_curr_buff; 1245 if (src && mapped_nents == 1 && 1246 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { 1247 memcpy(areq_ctx->buff_sg, src, 1248 sizeof(struct scatterlist)); 1249 areq_ctx->buff_sg->length = nbytes; 1250 areq_ctx->curr_sg = areq_ctx->buff_sg; 1251 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; 1252 } else { 1253 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; 1254 } 1255 } 1256 1257 /*build mlli */ 1258 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { 1259 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; 1260 /* add the src data to the sg_data */ 1261 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 1262 0, true, &areq_ctx->mlli_nents); 1263 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 1264 if (rc) 1265 goto fail_unmap_din; 1266 } 1267 /* change the buffer index for the unmap function */ 1268 areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); 1269 dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", 1270 cc_dma_buf_type(areq_ctx->data_dma_buf_type)); 1271 return 0; 1272 1273 fail_unmap_din: 1274 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); 1275 1276 unmap_curr_buff: 1277 if (*curr_buff_cnt) 1278 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1279 1280 return rc; 1281 } 1282 1283 int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, 1284 struct scatterlist *src, unsigned int nbytes, 1285 unsigned int block_size, gfp_t flags) 1286 { 1287 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 1288 struct device *dev = drvdata_to_dev(drvdata); 1289 u8 *curr_buff = cc_hash_buf(areq_ctx); 1290 u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); 1291 u8 *next_buff = cc_next_buf(areq_ctx); 1292 u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); 1293 struct mlli_params *mlli_params = &areq_ctx->mlli_params; 1294 unsigned int update_data_len; 1295 u32 total_in_len = nbytes + *curr_buff_cnt; 1296 struct buffer_array sg_data; 1297 struct buff_mgr_handle *buff_mgr = drvdata->buff_mgr_handle; 1298 unsigned int swap_index = 0; 1299 int rc = 0; 1300 u32 dummy = 0; 1301 u32 mapped_nents = 0; 1302 1303 dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", 1304 curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); 1305 /* Init the type of the dma buffer */ 1306 areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; 1307 mlli_params->curr_pool = NULL; 1308 areq_ctx->curr_sg = NULL; 1309 sg_data.num_of_buffers = 0; 1310 areq_ctx->in_nents = 0; 1311 1312 if (total_in_len < block_size) { 1313 dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", 1314 curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); 1315 areq_ctx->in_nents = sg_nents_for_len(src, nbytes); 1316 sg_copy_to_buffer(src, areq_ctx->in_nents, 1317 &curr_buff[*curr_buff_cnt], nbytes); 1318 *curr_buff_cnt += nbytes; 1319 return 1; 1320 } 1321 1322 /* Calculate the residue size*/ 1323 *next_buff_cnt = total_in_len & (block_size - 1); 1324 /* update data len */ 1325 update_data_len = total_in_len - *next_buff_cnt; 1326 1327 dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", 1328 *next_buff_cnt, update_data_len); 1329 1330 /* Copy the new residue to next buffer */ 1331 if (*next_buff_cnt) { 1332 dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", 1333 next_buff, (update_data_len - *curr_buff_cnt), 1334 *next_buff_cnt); 1335 cc_copy_sg_portion(dev, next_buff, src, 1336 (update_data_len - *curr_buff_cnt), 1337 nbytes, CC_SG_TO_BUF); 1338 /* change the buffer index for next operation */ 1339 swap_index = 1; 1340 } 1341 1342 if (*curr_buff_cnt) { 1343 rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, 1344 &sg_data); 1345 if (rc) 1346 return rc; 1347 /* change the buffer index for next operation */ 1348 swap_index = 1; 1349 } 1350 1351 if (update_data_len > *curr_buff_cnt) { 1352 rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), 1353 DMA_TO_DEVICE, &areq_ctx->in_nents, 1354 LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, 1355 &mapped_nents); 1356 if (rc) 1357 goto unmap_curr_buff; 1358 if (mapped_nents == 1 && 1359 areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { 1360 /* only one entry in the SG and no previous data */ 1361 memcpy(areq_ctx->buff_sg, src, 1362 sizeof(struct scatterlist)); 1363 areq_ctx->buff_sg->length = update_data_len; 1364 areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; 1365 areq_ctx->curr_sg = areq_ctx->buff_sg; 1366 } else { 1367 areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; 1368 } 1369 } 1370 1371 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { 1372 mlli_params->curr_pool = buff_mgr->mlli_buffs_pool; 1373 /* add the src data to the sg_data */ 1374 cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, 1375 (update_data_len - *curr_buff_cnt), 0, true, 1376 &areq_ctx->mlli_nents); 1377 rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); 1378 if (rc) 1379 goto fail_unmap_din; 1380 } 1381 areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); 1382 1383 return 0; 1384 1385 fail_unmap_din: 1386 dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); 1387 1388 unmap_curr_buff: 1389 if (*curr_buff_cnt) 1390 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1391 1392 return rc; 1393 } 1394 1395 void cc_unmap_hash_request(struct device *dev, void *ctx, 1396 struct scatterlist *src, bool do_revert) 1397 { 1398 struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; 1399 u32 *prev_len = cc_next_buf_cnt(areq_ctx); 1400 1401 /*In case a pool was set, a table was 1402 *allocated and should be released 1403 */ 1404 if (areq_ctx->mlli_params.curr_pool) { 1405 dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", 1406 &areq_ctx->mlli_params.mlli_dma_addr, 1407 areq_ctx->mlli_params.mlli_virt_addr); 1408 dma_pool_free(areq_ctx->mlli_params.curr_pool, 1409 areq_ctx->mlli_params.mlli_virt_addr, 1410 areq_ctx->mlli_params.mlli_dma_addr); 1411 } 1412 1413 if (src && areq_ctx->in_nents) { 1414 dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", 1415 sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); 1416 dma_unmap_sg(dev, src, 1417 areq_ctx->in_nents, DMA_TO_DEVICE); 1418 } 1419 1420 if (*prev_len) { 1421 dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", 1422 sg_virt(areq_ctx->buff_sg), 1423 &sg_dma_address(areq_ctx->buff_sg), 1424 sg_dma_len(areq_ctx->buff_sg)); 1425 dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); 1426 if (!do_revert) { 1427 /* clean the previous data length for update 1428 * operation 1429 */ 1430 *prev_len = 0; 1431 } else { 1432 areq_ctx->buff_index ^= 1; 1433 } 1434 } 1435 } 1436 1437 int cc_buffer_mgr_init(struct cc_drvdata *drvdata) 1438 { 1439 struct buff_mgr_handle *buff_mgr_handle; 1440 struct device *dev = drvdata_to_dev(drvdata); 1441 1442 buff_mgr_handle = kmalloc(sizeof(*buff_mgr_handle), GFP_KERNEL); 1443 if (!buff_mgr_handle) 1444 return -ENOMEM; 1445 1446 drvdata->buff_mgr_handle = buff_mgr_handle; 1447 1448 buff_mgr_handle->mlli_buffs_pool = 1449 dma_pool_create("dx_single_mlli_tables", dev, 1450 MAX_NUM_OF_TOTAL_MLLI_ENTRIES * 1451 LLI_ENTRY_BYTE_SIZE, 1452 MLLI_TABLE_MIN_ALIGNMENT, 0); 1453 1454 if (!buff_mgr_handle->mlli_buffs_pool) 1455 goto error; 1456 1457 return 0; 1458 1459 error: 1460 cc_buffer_mgr_fini(drvdata); 1461 return -ENOMEM; 1462 } 1463 1464 int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) 1465 { 1466 struct buff_mgr_handle *buff_mgr_handle = drvdata->buff_mgr_handle; 1467 1468 if (buff_mgr_handle) { 1469 dma_pool_destroy(buff_mgr_handle->mlli_buffs_pool); 1470 kfree(drvdata->buff_mgr_handle); 1471 drvdata->buff_mgr_handle = NULL; 1472 } 1473 return 0; 1474 } 1475