xref: /openbmc/linux/drivers/crypto/ccp/ccp-dev.h (revision 956ee21a6df08afd9c1c64e0f394a9a1b65e897d)
1 /*
2  * AMD Cryptographic Coprocessor (CCP) driver
3  *
4  * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  * Author: Gary R Hook <gary.hook@amd.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #ifndef __CCP_DEV_H__
15 #define __CCP_DEV_H__
16 
17 #include <linux/device.h>
18 #include <linux/pci.h>
19 #include <linux/spinlock.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/wait.h>
23 #include <linux/dmapool.h>
24 #include <linux/hw_random.h>
25 #include <linux/bitops.h>
26 #include <linux/interrupt.h>
27 #include <linux/irqreturn.h>
28 #include <linux/dmaengine.h>
29 
30 #define MAX_CCP_NAME_LEN		16
31 #define MAX_DMAPOOL_NAME_LEN		32
32 
33 #define MAX_HW_QUEUES			5
34 #define MAX_CMD_QLEN			100
35 
36 #define TRNG_RETRIES			10
37 
38 #define CACHE_NONE			0x00
39 #define CACHE_WB_NO_ALLOC		0xb7
40 
41 /****** Register Mappings ******/
42 #define Q_MASK_REG			0x000
43 #define TRNG_OUT_REG			0x00c
44 #define IRQ_MASK_REG			0x040
45 #define IRQ_STATUS_REG			0x200
46 
47 #define DEL_CMD_Q_JOB			0x124
48 #define DEL_Q_ACTIVE			0x00000200
49 #define DEL_Q_ID_SHIFT			6
50 
51 #define CMD_REQ0			0x180
52 #define CMD_REQ_INCR			0x04
53 
54 #define CMD_Q_STATUS_BASE		0x210
55 #define CMD_Q_INT_STATUS_BASE		0x214
56 #define CMD_Q_STATUS_INCR		0x20
57 
58 #define CMD_Q_CACHE_BASE		0x228
59 #define CMD_Q_CACHE_INC			0x20
60 
61 #define CMD_Q_ERROR(__qs)		((__qs) & 0x0000003f)
62 #define CMD_Q_DEPTH(__qs)		(((__qs) >> 12) & 0x0000000f)
63 
64 /****** REQ0 Related Values ******/
65 #define REQ0_WAIT_FOR_WRITE		0x00000004
66 #define REQ0_INT_ON_COMPLETE		0x00000002
67 #define REQ0_STOP_ON_COMPLETE		0x00000001
68 
69 #define REQ0_CMD_Q_SHIFT		9
70 #define REQ0_JOBID_SHIFT		3
71 
72 /****** REQ1 Related Values ******/
73 #define REQ1_PROTECT_SHIFT		27
74 #define REQ1_ENGINE_SHIFT		23
75 #define REQ1_KEY_KSB_SHIFT		2
76 
77 #define REQ1_EOM			0x00000002
78 #define REQ1_INIT			0x00000001
79 
80 /* AES Related Values */
81 #define REQ1_AES_TYPE_SHIFT		21
82 #define REQ1_AES_MODE_SHIFT		18
83 #define REQ1_AES_ACTION_SHIFT		17
84 #define REQ1_AES_CFB_SIZE_SHIFT		10
85 
86 /* XTS-AES Related Values */
87 #define REQ1_XTS_AES_SIZE_SHIFT		10
88 
89 /* SHA Related Values */
90 #define REQ1_SHA_TYPE_SHIFT		21
91 
92 /* RSA Related Values */
93 #define REQ1_RSA_MOD_SIZE_SHIFT		10
94 
95 /* Pass-Through Related Values */
96 #define REQ1_PT_BW_SHIFT		12
97 #define REQ1_PT_BS_SHIFT		10
98 
99 /* ECC Related Values */
100 #define REQ1_ECC_AFFINE_CONVERT		0x00200000
101 #define REQ1_ECC_FUNCTION_SHIFT		18
102 
103 /****** REQ4 Related Values ******/
104 #define REQ4_KSB_SHIFT			18
105 #define REQ4_MEMTYPE_SHIFT		16
106 
107 /****** REQ6 Related Values ******/
108 #define REQ6_MEMTYPE_SHIFT		16
109 
110 /****** Key Storage Block ******/
111 #define KSB_START			77
112 #define KSB_END				127
113 #define KSB_COUNT			(KSB_END - KSB_START + 1)
114 #define CCP_SB_BITS			256
115 
116 #define CCP_JOBID_MASK			0x0000003f
117 
118 #define CCP_DMAPOOL_MAX_SIZE		64
119 #define CCP_DMAPOOL_ALIGN		BIT(5)
120 
121 #define CCP_REVERSE_BUF_SIZE		64
122 
123 #define CCP_AES_KEY_SB_COUNT		1
124 #define CCP_AES_CTX_SB_COUNT		1
125 
126 #define CCP_XTS_AES_KEY_SB_COUNT	1
127 #define CCP_XTS_AES_CTX_SB_COUNT	1
128 
129 #define CCP_SHA_SB_COUNT		1
130 
131 #define CCP_RSA_MAX_WIDTH		4096
132 
133 #define CCP_PASSTHRU_BLOCKSIZE		256
134 #define CCP_PASSTHRU_MASKSIZE		32
135 #define CCP_PASSTHRU_SB_COUNT		1
136 
137 #define CCP_ECC_MODULUS_BYTES		48      /* 384-bits */
138 #define CCP_ECC_MAX_OPERANDS		6
139 #define CCP_ECC_MAX_OUTPUTS		3
140 #define CCP_ECC_SRC_BUF_SIZE		448
141 #define CCP_ECC_DST_BUF_SIZE		192
142 #define CCP_ECC_OPERAND_SIZE		64
143 #define CCP_ECC_OUTPUT_SIZE		64
144 #define CCP_ECC_RESULT_OFFSET		60
145 #define CCP_ECC_RESULT_SUCCESS		0x0001
146 
147 #define CCP_SB_BYTES			32
148 
149 struct ccp_op;
150 
151 /* Structure for computation functions that are device-specific */
152 struct ccp_actions {
153 	int (*aes)(struct ccp_op *);
154 	int (*xts_aes)(struct ccp_op *);
155 	int (*sha)(struct ccp_op *);
156 	int (*rsa)(struct ccp_op *);
157 	int (*passthru)(struct ccp_op *);
158 	int (*ecc)(struct ccp_op *);
159 	int (*init)(struct ccp_device *);
160 	void (*destroy)(struct ccp_device *);
161 	irqreturn_t (*irqhandler)(int, void *);
162 };
163 
164 /* Structure to hold CCP version-specific values */
165 struct ccp_vdata {
166 	unsigned int version;
167 	const struct ccp_actions *perform;
168 	const unsigned int bar;
169 	const unsigned int offset;
170 };
171 
172 extern struct ccp_vdata ccpv3;
173 
174 struct ccp_device;
175 struct ccp_cmd;
176 
177 struct ccp_dma_cmd {
178 	struct list_head entry;
179 
180 	struct ccp_cmd ccp_cmd;
181 };
182 
183 struct ccp_dma_desc {
184 	struct list_head entry;
185 
186 	struct ccp_device *ccp;
187 
188 	struct list_head pending;
189 	struct list_head active;
190 
191 	enum dma_status status;
192 	struct dma_async_tx_descriptor tx_desc;
193 	size_t len;
194 };
195 
196 struct ccp_dma_chan {
197 	struct ccp_device *ccp;
198 
199 	spinlock_t lock;
200 	struct list_head pending;
201 	struct list_head active;
202 	struct list_head complete;
203 
204 	struct tasklet_struct cleanup_tasklet;
205 
206 	enum dma_status status;
207 	struct dma_chan dma_chan;
208 };
209 
210 struct ccp_cmd_queue {
211 	struct ccp_device *ccp;
212 
213 	/* Queue identifier */
214 	u32 id;
215 
216 	/* Queue dma pool */
217 	struct dma_pool *dma_pool;
218 
219 	/* Per-queue reserved storage block(s) */
220 	u32 sb_key;
221 	u32 sb_ctx;
222 
223 	/* Queue processing thread */
224 	struct task_struct *kthread;
225 	unsigned int active;
226 	unsigned int suspended;
227 
228 	/* Number of free command slots available */
229 	unsigned int free_slots;
230 
231 	/* Interrupt masks */
232 	u32 int_ok;
233 	u32 int_err;
234 
235 	/* Register addresses for queue */
236 	void __iomem *reg_status;
237 	void __iomem *reg_int_status;
238 
239 	/* Status values from job */
240 	u32 int_status;
241 	u32 q_status;
242 	u32 q_int_status;
243 	u32 cmd_error;
244 
245 	/* Interrupt wait queue */
246 	wait_queue_head_t int_queue;
247 	unsigned int int_rcvd;
248 } ____cacheline_aligned;
249 
250 struct ccp_device {
251 	struct list_head entry;
252 
253 	struct ccp_vdata *vdata;
254 	unsigned int ord;
255 	char name[MAX_CCP_NAME_LEN];
256 	char rngname[MAX_CCP_NAME_LEN];
257 
258 	struct device *dev;
259 
260 	/* Bus specific device information
261 	 */
262 	void *dev_specific;
263 	int (*get_irq)(struct ccp_device *ccp);
264 	void (*free_irq)(struct ccp_device *ccp);
265 	unsigned int irq;
266 
267 	/* I/O area used for device communication. The register mapping
268 	 * starts at an offset into the mapped bar.
269 	 *   The CMD_REQx registers and the Delete_Cmd_Queue_Job register
270 	 *   need to be protected while a command queue thread is accessing
271 	 *   them.
272 	 */
273 	struct mutex req_mutex ____cacheline_aligned;
274 	void __iomem *io_map;
275 	void __iomem *io_regs;
276 
277 	/* Master lists that all cmds are queued on. Because there can be
278 	 * more than one CCP command queue that can process a cmd a separate
279 	 * backlog list is neeeded so that the backlog completion call
280 	 * completes before the cmd is available for execution.
281 	 */
282 	spinlock_t cmd_lock ____cacheline_aligned;
283 	unsigned int cmd_count;
284 	struct list_head cmd;
285 	struct list_head backlog;
286 
287 	/* The command queues. These represent the queues available on the
288 	 * CCP that are available for processing cmds
289 	 */
290 	struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES];
291 	unsigned int cmd_q_count;
292 
293 	/* Support for the CCP True RNG
294 	 */
295 	struct hwrng hwrng;
296 	unsigned int hwrng_retries;
297 
298 	/* Support for the CCP DMA capabilities
299 	 */
300 	struct dma_device dma_dev;
301 	struct ccp_dma_chan *ccp_dma_chan;
302 	struct kmem_cache *dma_cmd_cache;
303 	struct kmem_cache *dma_desc_cache;
304 
305 	/* A counter used to generate job-ids for cmds submitted to the CCP
306 	 */
307 	atomic_t current_id ____cacheline_aligned;
308 
309 	/* The CCP uses key storage blocks (KSB) to maintain context for certain
310 	 * operations. To prevent multiple cmds from using the same KSB range
311 	 * a command queue reserves a KSB range for the duration of the cmd.
312 	 * Each queue, will however, reserve 2 KSB blocks for operations that
313 	 * only require single KSB entries (eg. AES context/iv and key) in order
314 	 * to avoid allocation contention.  This will reserve at most 10 KSB
315 	 * entries, leaving 40 KSB entries available for dynamic allocation.
316 	 */
317 	struct mutex sb_mutex ____cacheline_aligned;
318 	DECLARE_BITMAP(sb, KSB_COUNT);
319 	wait_queue_head_t sb_queue;
320 	unsigned int sb_avail;
321 	unsigned int sb_count;
322 	u32 sb_start;
323 
324 	/* Suspend support */
325 	unsigned int suspending;
326 	wait_queue_head_t suspend_queue;
327 
328 	/* DMA caching attribute support */
329 	unsigned int axcache;
330 };
331 
332 enum ccp_memtype {
333 	CCP_MEMTYPE_SYSTEM = 0,
334 	CCP_MEMTYPE_SB,
335 	CCP_MEMTYPE_LOCAL,
336 	CCP_MEMTYPE__LAST,
337 };
338 
339 struct ccp_dma_info {
340 	dma_addr_t address;
341 	unsigned int offset;
342 	unsigned int length;
343 	enum dma_data_direction dir;
344 };
345 
346 struct ccp_dm_workarea {
347 	struct device *dev;
348 	struct dma_pool *dma_pool;
349 	unsigned int length;
350 
351 	u8 *address;
352 	struct ccp_dma_info dma;
353 };
354 
355 struct ccp_sg_workarea {
356 	struct scatterlist *sg;
357 	int nents;
358 
359 	struct scatterlist *dma_sg;
360 	struct device *dma_dev;
361 	unsigned int dma_count;
362 	enum dma_data_direction dma_dir;
363 
364 	unsigned int sg_used;
365 
366 	u64 bytes_left;
367 };
368 
369 struct ccp_data {
370 	struct ccp_sg_workarea sg_wa;
371 	struct ccp_dm_workarea dm_wa;
372 };
373 
374 struct ccp_mem {
375 	enum ccp_memtype type;
376 	union {
377 		struct ccp_dma_info dma;
378 		u32 sb;
379 	} u;
380 };
381 
382 struct ccp_aes_op {
383 	enum ccp_aes_type type;
384 	enum ccp_aes_mode mode;
385 	enum ccp_aes_action action;
386 };
387 
388 struct ccp_xts_aes_op {
389 	enum ccp_aes_action action;
390 	enum ccp_xts_aes_unit_size unit_size;
391 };
392 
393 struct ccp_sha_op {
394 	enum ccp_sha_type type;
395 	u64 msg_bits;
396 };
397 
398 struct ccp_rsa_op {
399 	u32 mod_size;
400 	u32 input_len;
401 };
402 
403 struct ccp_passthru_op {
404 	enum ccp_passthru_bitwise bit_mod;
405 	enum ccp_passthru_byteswap byte_swap;
406 };
407 
408 struct ccp_ecc_op {
409 	enum ccp_ecc_function function;
410 };
411 
412 struct ccp_op {
413 	struct ccp_cmd_queue *cmd_q;
414 
415 	u32 jobid;
416 	u32 ioc;
417 	u32 soc;
418 	u32 sb_key;
419 	u32 sb_ctx;
420 	u32 init;
421 	u32 eom;
422 
423 	struct ccp_mem src;
424 	struct ccp_mem dst;
425 
426 	union {
427 		struct ccp_aes_op aes;
428 		struct ccp_xts_aes_op xts;
429 		struct ccp_sha_op sha;
430 		struct ccp_rsa_op rsa;
431 		struct ccp_passthru_op passthru;
432 		struct ccp_ecc_op ecc;
433 	} u;
434 };
435 
436 static inline u32 ccp_addr_lo(struct ccp_dma_info *info)
437 {
438 	return lower_32_bits(info->address + info->offset);
439 }
440 
441 static inline u32 ccp_addr_hi(struct ccp_dma_info *info)
442 {
443 	return upper_32_bits(info->address + info->offset) & 0x0000ffff;
444 }
445 
446 int ccp_pci_init(void);
447 void ccp_pci_exit(void);
448 
449 int ccp_platform_init(void);
450 void ccp_platform_exit(void);
451 
452 void ccp_add_device(struct ccp_device *ccp);
453 void ccp_del_device(struct ccp_device *ccp);
454 
455 struct ccp_device *ccp_alloc_struct(struct device *dev);
456 bool ccp_queues_suspended(struct ccp_device *ccp);
457 int ccp_cmd_queue_thread(void *data);
458 
459 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
460 
461 int ccp_dmaengine_register(struct ccp_device *ccp);
462 void ccp_dmaengine_unregister(struct ccp_device *ccp);
463 
464 #endif
465