1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 3 4 /* \file cc_driver.h 5 * ARM CryptoCell Linux Crypto Driver 6 */ 7 8 #ifndef __CC_DRIVER_H__ 9 #define __CC_DRIVER_H__ 10 11 #ifdef COMP_IN_WQ 12 #include <linux/workqueue.h> 13 #else 14 #include <linux/interrupt.h> 15 #endif 16 #include <linux/dma-mapping.h> 17 #include <crypto/algapi.h> 18 #include <crypto/internal/skcipher.h> 19 #include <crypto/aes.h> 20 #include <crypto/sha.h> 21 #include <crypto/aead.h> 22 #include <crypto/authenc.h> 23 #include <crypto/hash.h> 24 #include <crypto/skcipher.h> 25 #include <linux/version.h> 26 #include <linux/clk.h> 27 #include <linux/platform_device.h> 28 29 /* Registers definitions from shared/hw/ree_include */ 30 #include "cc_host_regs.h" 31 #define CC_DEV_SHA_MAX 512 32 #include "cc_crypto_ctx.h" 33 #include "cc_hw_queue_defs.h" 34 #include "cc_sram_mgr.h" 35 36 extern bool cc_dump_desc; 37 extern bool cc_dump_bytes; 38 39 #define DRV_MODULE_VERSION "4.0" 40 41 enum cc_hw_rev { 42 CC_HW_REV_630 = 630, 43 CC_HW_REV_710 = 710, 44 CC_HW_REV_712 = 712 45 }; 46 47 #define CC_COHERENT_CACHE_PARAMS 0xEEE 48 49 /* Maximum DMA mask supported by IP */ 50 #define DMA_BIT_MASK_LEN 48 51 52 #define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \ 53 (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \ 54 (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \ 55 (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT)) 56 57 #define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT) 58 59 #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT) 60 61 #define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \ 62 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \ 63 CC_AXIM_MON_COMP_VALUE_BIT_SHIFT) 64 65 /* Register name mangling macro */ 66 #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET 67 68 /* TEE FIPS status interrupt */ 69 #define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT) 70 71 #define CC_CRA_PRIO 400 72 73 #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */ 74 75 #define MAX_REQUEST_QUEUE_SIZE 4096 76 #define MAX_MLLI_BUFF_SIZE 2080 77 #define MAX_ICV_NENTS_SUPPORTED 2 78 79 /* Definitions for HW descriptors DIN/DOUT fields */ 80 #define NS_BIT 1 81 #define AXI_ID 0 82 /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID 83 * field in the HW descriptor. The DMA engine +8 that value. 84 */ 85 86 #define CC_MAX_IVGEN_DMA_ADDRESSES 3 87 struct cc_crypto_req { 88 void (*user_cb)(struct device *dev, void *req, int err); 89 void *user_arg; 90 dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES]; 91 /* For the first 'ivgen_dma_addr_len' addresses of this array, 92 * generated IV would be placed in it by send_request(). 93 * Same generated IV for all addresses! 94 */ 95 /* Amount of 'ivgen_dma_addr' elements to be filled. */ 96 unsigned int ivgen_dma_addr_len; 97 /* The generated IV size required, 8/16 B allowed. */ 98 unsigned int ivgen_size; 99 struct completion seq_compl; /* request completion */ 100 }; 101 102 /** 103 * struct cc_drvdata - driver private data context 104 * @cc_base: virt address of the CC registers 105 * @irq: device IRQ number 106 * @irq_mask: Interrupt mask shadow (1 for masked interrupts) 107 * @fw_ver: SeP loaded firmware version 108 */ 109 struct cc_drvdata { 110 void __iomem *cc_base; 111 int irq; 112 u32 irq_mask; 113 u32 fw_ver; 114 struct completion hw_queue_avail; /* wait for HW queue availability */ 115 struct platform_device *plat_dev; 116 cc_sram_addr_t mlli_sram_addr; 117 void *buff_mgr_handle; 118 void *cipher_handle; 119 void *hash_handle; 120 void *aead_handle; 121 void *request_mgr_handle; 122 void *fips_handle; 123 void *ivgen_handle; 124 void *sram_mgr_handle; 125 void *debugfs; 126 struct clk *clk; 127 bool coherent; 128 char *hw_rev_name; 129 enum cc_hw_rev hw_rev; 130 u32 hash_len_sz; 131 u32 axim_mon_offset; 132 u32 sig_offset; 133 u32 ver_offset; 134 }; 135 136 struct cc_crypto_alg { 137 struct list_head entry; 138 int cipher_mode; 139 int flow_mode; /* Note: currently, refers to the cipher mode only. */ 140 int auth_mode; 141 unsigned int data_unit; 142 struct cc_drvdata *drvdata; 143 struct skcipher_alg skcipher_alg; 144 struct aead_alg aead_alg; 145 }; 146 147 struct cc_alg_template { 148 char name[CRYPTO_MAX_ALG_NAME]; 149 char driver_name[CRYPTO_MAX_ALG_NAME]; 150 unsigned int blocksize; 151 union { 152 struct skcipher_alg skcipher; 153 struct aead_alg aead; 154 } template_u; 155 int cipher_mode; 156 int flow_mode; /* Note: currently, refers to the cipher mode only. */ 157 int auth_mode; 158 u32 min_hw_rev; 159 unsigned int data_unit; 160 struct cc_drvdata *drvdata; 161 }; 162 163 struct async_gen_req_ctx { 164 dma_addr_t iv_dma_addr; 165 enum drv_crypto_direction op_type; 166 }; 167 168 static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata) 169 { 170 return &drvdata->plat_dev->dev; 171 } 172 173 void __dump_byte_array(const char *name, const u8 *buf, size_t len); 174 static inline void dump_byte_array(const char *name, const u8 *the_array, 175 size_t size) 176 { 177 if (cc_dump_bytes) 178 __dump_byte_array(name, the_array, size); 179 } 180 181 int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe); 182 void fini_cc_regs(struct cc_drvdata *drvdata); 183 int cc_clk_on(struct cc_drvdata *drvdata); 184 void cc_clk_off(struct cc_drvdata *drvdata); 185 186 static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val) 187 { 188 iowrite32(val, (drvdata->cc_base + reg)); 189 } 190 191 static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg) 192 { 193 return ioread32(drvdata->cc_base + reg); 194 } 195 196 static inline gfp_t cc_gfp_flags(struct crypto_async_request *req) 197 { 198 return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 199 GFP_KERNEL : GFP_ATOMIC; 200 } 201 202 static inline void set_queue_last_ind(struct cc_drvdata *drvdata, 203 struct cc_hw_desc *pdesc) 204 { 205 if (drvdata->hw_rev >= CC_HW_REV_712) 206 set_queue_last_ind_bit(pdesc); 207 } 208 209 #endif /*__CC_DRIVER_H__*/ 210