xref: /openbmc/linux/drivers/crypto/ccree/cc_driver.h (revision 22d55f02)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
3 
4 /* \file cc_driver.h
5  * ARM CryptoCell Linux Crypto Driver
6  */
7 
8 #ifndef __CC_DRIVER_H__
9 #define __CC_DRIVER_H__
10 
11 #ifdef COMP_IN_WQ
12 #include <linux/workqueue.h>
13 #else
14 #include <linux/interrupt.h>
15 #endif
16 #include <linux/dma-mapping.h>
17 #include <crypto/algapi.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/aes.h>
20 #include <crypto/sha.h>
21 #include <crypto/aead.h>
22 #include <crypto/authenc.h>
23 #include <crypto/hash.h>
24 #include <crypto/skcipher.h>
25 #include <linux/version.h>
26 #include <linux/clk.h>
27 #include <linux/platform_device.h>
28 
29 /* Registers definitions from shared/hw/ree_include */
30 #include "cc_host_regs.h"
31 #define CC_DEV_SHA_MAX 512
32 #include "cc_crypto_ctx.h"
33 #include "cc_hw_queue_defs.h"
34 #include "cc_sram_mgr.h"
35 
36 extern bool cc_dump_desc;
37 extern bool cc_dump_bytes;
38 
39 #define DRV_MODULE_VERSION "5.0"
40 
41 enum cc_hw_rev {
42 	CC_HW_REV_630 = 630,
43 	CC_HW_REV_710 = 710,
44 	CC_HW_REV_712 = 712,
45 	CC_HW_REV_713 = 713
46 };
47 
48 enum cc_std_body {
49 	CC_STD_NIST = 0x1,
50 	CC_STD_OSCCA = 0x2,
51 	CC_STD_ALL = 0x3
52 };
53 
54 #define CC_COHERENT_CACHE_PARAMS 0xEEE
55 
56 /* Maximum DMA mask supported by IP */
57 #define DMA_BIT_MASK_LEN 48
58 
59 #define CC_AXI_IRQ_MASK ((1 << CC_AXIM_CFG_BRESPMASK_BIT_SHIFT) | \
60 			  (1 << CC_AXIM_CFG_RRESPMASK_BIT_SHIFT) | \
61 			  (1 << CC_AXIM_CFG_INFLTMASK_BIT_SHIFT) | \
62 			  (1 << CC_AXIM_CFG_COMPMASK_BIT_SHIFT))
63 
64 #define CC_AXI_ERR_IRQ_MASK BIT(CC_HOST_IRR_AXI_ERR_INT_BIT_SHIFT)
65 
66 #define CC_COMP_IRQ_MASK BIT(CC_HOST_IRR_AXIM_COMP_INT_BIT_SHIFT)
67 
68 #define CC_SECURITY_DISABLED_MASK BIT(CC_SECURITY_DISABLED_VALUE_BIT_SHIFT)
69 
70 #define AXIM_MON_COMP_VALUE GENMASK(CC_AXIM_MON_COMP_VALUE_BIT_SIZE + \
71 				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT, \
72 				    CC_AXIM_MON_COMP_VALUE_BIT_SHIFT)
73 
74 #define CC_CPP_AES_ABORT_MASK ( \
75 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_0_MASK_BIT_SHIFT) | \
76 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_1_MASK_BIT_SHIFT) | \
77 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_2_MASK_BIT_SHIFT) | \
78 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_3_MASK_BIT_SHIFT) | \
79 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_4_MASK_BIT_SHIFT) | \
80 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_5_MASK_BIT_SHIFT) | \
81 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_6_MASK_BIT_SHIFT) | \
82 	BIT(CC_HOST_IMR_REE_OP_ABORTED_AES_7_MASK_BIT_SHIFT))
83 
84 #define CC_CPP_SM4_ABORT_MASK ( \
85 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_0_MASK_BIT_SHIFT) | \
86 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_1_MASK_BIT_SHIFT) | \
87 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_2_MASK_BIT_SHIFT) | \
88 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_3_MASK_BIT_SHIFT) | \
89 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_4_MASK_BIT_SHIFT) | \
90 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_5_MASK_BIT_SHIFT) | \
91 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_6_MASK_BIT_SHIFT) | \
92 	BIT(CC_HOST_IMR_REE_OP_ABORTED_SM_7_MASK_BIT_SHIFT))
93 
94 /* Register name mangling macro */
95 #define CC_REG(reg_name) CC_ ## reg_name ## _REG_OFFSET
96 
97 /* TEE FIPS status interrupt */
98 #define CC_GPR0_IRQ_MASK BIT(CC_HOST_IRR_GPR0_BIT_SHIFT)
99 
100 #define CC_CRA_PRIO 400
101 
102 #define MIN_HW_QUEUE_SIZE 50 /* Minimum size required for proper function */
103 
104 #define MAX_REQUEST_QUEUE_SIZE 4096
105 #define MAX_MLLI_BUFF_SIZE 2080
106 
107 /* Definitions for HW descriptors DIN/DOUT fields */
108 #define NS_BIT 1
109 #define AXI_ID 0
110 /* AXI_ID is not actually the AXI ID of the transaction but the value of AXI_ID
111  * field in the HW descriptor. The DMA engine +8 that value.
112  */
113 
114 struct cc_cpp_req {
115 	bool is_cpp;
116 	enum cc_cpp_alg alg;
117 	u8 slot;
118 };
119 
120 #define CC_MAX_IVGEN_DMA_ADDRESSES	3
121 struct cc_crypto_req {
122 	void (*user_cb)(struct device *dev, void *req, int err);
123 	void *user_arg;
124 	dma_addr_t ivgen_dma_addr[CC_MAX_IVGEN_DMA_ADDRESSES];
125 	/* For the first 'ivgen_dma_addr_len' addresses of this array,
126 	 * generated IV would be placed in it by send_request().
127 	 * Same generated IV for all addresses!
128 	 */
129 	/* Amount of 'ivgen_dma_addr' elements to be filled. */
130 	unsigned int ivgen_dma_addr_len;
131 	/* The generated IV size required, 8/16 B allowed. */
132 	unsigned int ivgen_size;
133 	struct completion seq_compl; /* request completion */
134 	struct cc_cpp_req cpp;
135 };
136 
137 /**
138  * struct cc_drvdata - driver private data context
139  * @cc_base:	virt address of the CC registers
140  * @irq:	device IRQ number
141  * @irq_mask:	Interrupt mask shadow (1 for masked interrupts)
142  */
143 struct cc_drvdata {
144 	void __iomem *cc_base;
145 	int irq;
146 	u32 irq_mask;
147 	struct completion hw_queue_avail; /* wait for HW queue availability */
148 	struct platform_device *plat_dev;
149 	cc_sram_addr_t mlli_sram_addr;
150 	void *buff_mgr_handle;
151 	void *cipher_handle;
152 	void *hash_handle;
153 	void *aead_handle;
154 	void *request_mgr_handle;
155 	void *fips_handle;
156 	void *ivgen_handle;
157 	void *sram_mgr_handle;
158 	void *debugfs;
159 	struct clk *clk;
160 	bool coherent;
161 	char *hw_rev_name;
162 	enum cc_hw_rev hw_rev;
163 	u32 axim_mon_offset;
164 	u32 sig_offset;
165 	u32 ver_offset;
166 	int std_bodies;
167 	bool sec_disabled;
168 	u32 comp_mask;
169 };
170 
171 struct cc_crypto_alg {
172 	struct list_head entry;
173 	int cipher_mode;
174 	int flow_mode; /* Note: currently, refers to the cipher mode only. */
175 	int auth_mode;
176 	unsigned int data_unit;
177 	struct cc_drvdata *drvdata;
178 	struct skcipher_alg skcipher_alg;
179 	struct aead_alg aead_alg;
180 };
181 
182 struct cc_alg_template {
183 	char name[CRYPTO_MAX_ALG_NAME];
184 	char driver_name[CRYPTO_MAX_ALG_NAME];
185 	unsigned int blocksize;
186 	union {
187 		struct skcipher_alg skcipher;
188 		struct aead_alg aead;
189 	} template_u;
190 	int cipher_mode;
191 	int flow_mode; /* Note: currently, refers to the cipher mode only. */
192 	int auth_mode;
193 	u32 min_hw_rev;
194 	enum cc_std_body std_body;
195 	bool sec_func;
196 	unsigned int data_unit;
197 	struct cc_drvdata *drvdata;
198 };
199 
200 struct async_gen_req_ctx {
201 	dma_addr_t iv_dma_addr;
202 	u8 *iv;
203 	enum drv_crypto_direction op_type;
204 };
205 
206 static inline struct device *drvdata_to_dev(struct cc_drvdata *drvdata)
207 {
208 	return &drvdata->plat_dev->dev;
209 }
210 
211 void __dump_byte_array(const char *name, const u8 *buf, size_t len);
212 static inline void dump_byte_array(const char *name, const u8 *the_array,
213 				   size_t size)
214 {
215 	if (cc_dump_bytes)
216 		__dump_byte_array(name, the_array, size);
217 }
218 
219 int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe);
220 void fini_cc_regs(struct cc_drvdata *drvdata);
221 int cc_clk_on(struct cc_drvdata *drvdata);
222 void cc_clk_off(struct cc_drvdata *drvdata);
223 unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata);
224 
225 static inline void cc_iowrite(struct cc_drvdata *drvdata, u32 reg, u32 val)
226 {
227 	iowrite32(val, (drvdata->cc_base + reg));
228 }
229 
230 static inline u32 cc_ioread(struct cc_drvdata *drvdata, u32 reg)
231 {
232 	return ioread32(drvdata->cc_base + reg);
233 }
234 
235 static inline gfp_t cc_gfp_flags(struct crypto_async_request *req)
236 {
237 	return (req->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
238 			GFP_KERNEL : GFP_ATOMIC;
239 }
240 
241 static inline void set_queue_last_ind(struct cc_drvdata *drvdata,
242 				      struct cc_hw_desc *pdesc)
243 {
244 	if (drvdata->hw_rev >= CC_HW_REV_712)
245 		set_queue_last_ind_bit(pdesc);
246 }
247 
248 #endif /*__CC_DRIVER_H__*/
249