Lines Matching +full:crypto +full:- +full:engine

1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <crypto/internal/hash.h>
6 #include <crypto/internal/skcipher.h>
8 #include <linux/dma-direction.h>
70 * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA
124 * /-----------\ 0
126 * |-----------| 0x20
128 * |-----------| 0x40
130 * |-----------| 0x40 (inplace)
132 * |-----------| 0x80
133 * | DATA IN | 16 * x (max ->max_req_size)
134 * |-----------| 0x80 (inplace operation)
135 * | DATA OUT | 16 * x (max ->max_req_size)
136 * \-----------/ SRAM size
141 * /-----------\ 0
143 * |-----------| 0x20
145 * |-----------| 0x40
147 * |-----------| 0x60
149 * |-----------| 0x80
150 * | DATA IN | 64 * x (max ->max_req_size)
151 * \-----------/ SRAM size
196 #define CESA_SA_SRAM_PAYLOAD_SIZE (cesa_dev->sram_size - \
202 #define CESA_SA_SRAM_MSK (2048 - 1)
205 #define CESA_HASH_BLOCK_SIZE_MSK (CESA_MAX_HASH_BLOCK_SIZE - 1)
208 * struct mv_cesa_sec_accel_desc - security accelerator descriptor
209 * @config: engine config
218 * Structure passed to the CESA engine to describe the crypto operation
233 * struct mv_cesa_skcipher_op_ctx - cipher operation context
245 * struct mv_cesa_hash_op_ctx - hash or hmac operation context
257 * struct mv_cesa_op_ctx - crypto operation context
259 * @ctx: context associated to the crypto operation
261 * Context associated to a crypto operation.
284 * struct mv_cesa_tdma_desc - TDMA descriptor
296 * TDMA descriptor used to create a transfer chain describing a crypto
322 * struct mv_cesa_sg_dma_iter - scatter-gather iterator
326 * @op_offset: current position in the crypto operation
329 * a crypto operation.
339 * struct mv_cesa_dma_iter - crypto operation iterator
340 * @len: the crypto operation length
341 * @offset: current position in the crypto operation
342 * @op_len: sub-operation length (the crypto engine can only act on 2kb
345 * Iterator used to create a TDMA chain for a given crypto operation.
354 * struct mv_cesa_tdma_chain - TDMA chain
358 * Stores a TDMA chain for a specific crypto operation.
368 * struct mv_cesa_caps - CESA device capabilities
388 * struct mv_cesa_dev_dma - DMA pools
390 * @op_pool: crypto operation pool
406 * struct mv_cesa_dev - CESA device
427 * struct mv_cesa_engine - CESA engine
428 * @id: engine id
429 * @regs: engine registers
433 * @lock: engine lock
434 * @req: current crypto request
435 * @clk: engine clk
436 * @zclk: engine zclk
441 * @queue: fifo of the pending crypto requests
442 * @load: engine load counter, useful for load balancing
447 * @complete_queue: fifo of the processed requests by the engine
449 * Structure storing CESA engine information.
475 * struct mv_cesa_req_ops - CESA request operations
477 * operation, -EINPROGRESS if it needs more steps or an error
479 * @step: launch the crypto operation on the next chunk
480 * @cleanup: cleanup the crypto request (release associated data)
492 * struct mv_cesa_ctx - CESA operation context
493 * @ops: crypto operations
502 * struct mv_cesa_hash_ctx - CESA hash operation context
512 * struct mv_cesa_hash_ctx - CESA hmac operation context
524 * enum mv_cesa_req_type - request type definitions
534 * struct mv_cesa_req - CESA request
535 * @engine: engine associated with this request
539 struct mv_cesa_engine *engine; member
544 * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard
555 * struct mv_cesa_skcipher_std_req - cipher standard request
558 * @size: size of the crypto operation
568 * struct mv_cesa_skcipher_req - cipher request
581 * struct mv_cesa_ahash_std_req - standard hash request
589 * struct mv_cesa_ahash_dma_req - DMA hash request
602 * struct mv_cesa_ahash_req - hash request
634 mv_cesa_engine_enqueue_complete_request(struct mv_cesa_engine *engine, in mv_cesa_engine_enqueue_complete_request() argument
637 list_add_tail(&req->list, &engine->complete_queue); in mv_cesa_engine_enqueue_complete_request()
641 mv_cesa_engine_dequeue_complete_request(struct mv_cesa_engine *engine) in mv_cesa_engine_dequeue_complete_request() argument
645 req = list_first_entry_or_null(&engine->complete_queue, in mv_cesa_engine_dequeue_complete_request()
649 list_del(&req->list); in mv_cesa_engine_dequeue_complete_request()
658 return req->chain.first ? CESA_DMA_REQ : CESA_STD_REQ; in mv_cesa_req_get_type()
664 op->desc.config &= cpu_to_le32(~mask); in mv_cesa_update_op_cfg()
665 op->desc.config |= cpu_to_le32(cfg); in mv_cesa_update_op_cfg()
670 return le32_to_cpu(op->desc.config); in mv_cesa_get_op_cfg()
675 op->desc.config = cpu_to_le32(cfg); in mv_cesa_set_op_cfg()
678 static inline void mv_cesa_adjust_op(struct mv_cesa_engine *engine, in mv_cesa_adjust_op() argument
681 u32 offset = engine->sram_dma & CESA_SA_SRAM_MSK; in mv_cesa_adjust_op()
683 op->desc.enc_p = CESA_SA_DESC_CRYPT_DATA(offset); in mv_cesa_adjust_op()
684 op->desc.enc_key_p = CESA_SA_DESC_CRYPT_KEY(offset); in mv_cesa_adjust_op()
685 op->desc.enc_iv = CESA_SA_DESC_CRYPT_IV(offset); in mv_cesa_adjust_op()
686 op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_DATA_MSK; in mv_cesa_adjust_op()
687 op->desc.mac_src_p |= CESA_SA_DESC_MAC_DATA(offset); in mv_cesa_adjust_op()
688 op->desc.mac_digest &= ~CESA_SA_DESC_MAC_DIGEST_MSK; in mv_cesa_adjust_op()
689 op->desc.mac_digest |= CESA_SA_DESC_MAC_DIGEST(offset); in mv_cesa_adjust_op()
690 op->desc.mac_iv = CESA_SA_DESC_MAC_IV(offset); in mv_cesa_adjust_op()
695 op->desc.enc_len = cpu_to_le32(len); in mv_cesa_set_crypt_op_len()
701 op->desc.mac_src_p &= ~CESA_SA_DESC_MAC_TOTAL_LEN_MSK; in mv_cesa_set_mac_op_total_len()
702 op->desc.mac_src_p |= CESA_SA_DESC_MAC_TOTAL_LEN(len); in mv_cesa_set_mac_op_total_len()
708 op->desc.mac_digest &= ~CESA_SA_DESC_MAC_FRAG_LEN_MSK; in mv_cesa_set_mac_op_frag_len()
709 op->desc.mac_digest |= CESA_SA_DESC_MAC_FRAG_LEN(len); in mv_cesa_set_mac_op_frag_len()
712 static inline void mv_cesa_set_int_mask(struct mv_cesa_engine *engine, in mv_cesa_set_int_mask() argument
715 if (int_mask == engine->int_mask) in mv_cesa_set_int_mask()
718 writel_relaxed(int_mask, engine->regs + CESA_SA_INT_MSK); in mv_cesa_set_int_mask()
719 engine->int_mask = int_mask; in mv_cesa_set_int_mask()
722 static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine) in mv_cesa_get_int_mask() argument
724 return engine->int_mask; in mv_cesa_get_int_mask()
737 mv_cesa_dequeue_req_locked(struct mv_cesa_engine *engine,
746 for (i = 0; i < cesa_dev->caps->nengines; i++) { in mv_cesa_select_engine()
747 struct mv_cesa_engine *engine = cesa_dev->engines + i; in mv_cesa_select_engine() local
748 u32 load = atomic_read(&engine->load); in mv_cesa_select_engine()
752 selected = engine; in mv_cesa_select_engine()
756 atomic_add(weight, &selected->load); in mv_cesa_select_engine()
762 * Helper function that indicates whether a crypto request needs to be
772 if (ret == -EINPROGRESS) in mv_cesa_req_needs_cleanup()
781 if (ret == -EBUSY) in mv_cesa_req_needs_cleanup()
793 iter->len = len; in mv_cesa_req_dma_iter_init()
794 iter->op_len = min(len, CESA_SA_SRAM_PAYLOAD_SIZE); in mv_cesa_req_dma_iter_init()
795 iter->offset = 0; in mv_cesa_req_dma_iter_init()
802 iter->op_offset = 0; in mv_cesa_sg_dma_iter_init()
803 iter->offset = 0; in mv_cesa_sg_dma_iter_init()
804 iter->sg = sg; in mv_cesa_sg_dma_iter_init()
805 iter->dir = dir; in mv_cesa_sg_dma_iter_init()
812 return min(iter->op_len - sgiter->op_offset, in mv_cesa_req_dma_iter_transfer_len()
813 sg_dma_len(sgiter->sg) - sgiter->offset); in mv_cesa_req_dma_iter_transfer_len()
822 iter->offset += iter->op_len; in mv_cesa_req_dma_iter_next_op()
823 iter->op_len = min(iter->len - iter->offset, in mv_cesa_req_dma_iter_next_op()
826 return iter->op_len; in mv_cesa_req_dma_iter_next_op()
835 return -EINPROGRESS; in mv_cesa_dma_process()
838 return -EINVAL; in mv_cesa_dma_process()
844 struct mv_cesa_engine *engine);
846 void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
848 int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status);
877 size_t mv_cesa_sg_copy(struct mv_cesa_engine *engine,
882 static inline size_t mv_cesa_sg_copy_to_sram(struct mv_cesa_engine *engine, in mv_cesa_sg_copy_to_sram() argument
888 return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip, in mv_cesa_sg_copy_to_sram()
892 static inline size_t mv_cesa_sg_copy_from_sram(struct mv_cesa_engine *engine, in mv_cesa_sg_copy_from_sram() argument
898 return mv_cesa_sg_copy(engine, sgl, nents, sram_off, buflen, skip, in mv_cesa_sg_copy_from_sram()