xref: /openbmc/linux/drivers/crypto/hisilicon/sec/sec_drv.h (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1  /* SPDX-License-Identifier: GPL-2.0 */
2  /* Copyright (c) 2016-2017 HiSilicon Limited. */
3  
4  #ifndef _SEC_DRV_H_
5  #define _SEC_DRV_H_
6  
7  #include <crypto/algapi.h>
8  #include <linux/kfifo.h>
9  
10  #define SEC_MAX_SGE_NUM			64
11  #define SEC_HW_RING_NUM			3
12  
13  #define SEC_CMD_RING			0
14  #define SEC_OUTORDER_RING		1
15  #define SEC_DBG_RING			2
16  
17  /* A reasonable length to balance memory use against flexibility */
18  #define SEC_QUEUE_LEN			512
19  
20  #define SEC_MAX_SGE_NUM   64
21  
22  struct sec_bd_info {
23  #define SEC_BD_W0_T_LEN_M			GENMASK(4, 0)
24  #define SEC_BD_W0_T_LEN_S			0
25  
26  #define SEC_BD_W0_C_WIDTH_M			GENMASK(6, 5)
27  #define SEC_BD_W0_C_WIDTH_S			5
28  #define   SEC_C_WIDTH_AES_128BIT		0
29  #define   SEC_C_WIDTH_AES_8BIT		1
30  #define   SEC_C_WIDTH_AES_1BIT		2
31  #define   SEC_C_WIDTH_DES_64BIT		0
32  #define   SEC_C_WIDTH_DES_8BIT		1
33  #define   SEC_C_WIDTH_DES_1BIT		2
34  
35  #define SEC_BD_W0_C_MODE_M			GENMASK(9, 7)
36  #define SEC_BD_W0_C_MODE_S			7
37  #define   SEC_C_MODE_ECB			0
38  #define   SEC_C_MODE_CBC			1
39  #define   SEC_C_MODE_CTR			4
40  #define   SEC_C_MODE_CCM			5
41  #define   SEC_C_MODE_GCM			6
42  #define   SEC_C_MODE_XTS			7
43  
44  #define SEC_BD_W0_SEQ				BIT(10)
45  #define SEC_BD_W0_DE				BIT(11)
46  #define SEC_BD_W0_DAT_SKIP_M			GENMASK(13, 12)
47  #define SEC_BD_W0_DAT_SKIP_S			12
48  #define SEC_BD_W0_C_GRAN_SIZE_19_16_M		GENMASK(17, 14)
49  #define SEC_BD_W0_C_GRAN_SIZE_19_16_S		14
50  
51  #define SEC_BD_W0_CIPHER_M			GENMASK(19, 18)
52  #define SEC_BD_W0_CIPHER_S			18
53  #define   SEC_CIPHER_NULL			0
54  #define   SEC_CIPHER_ENCRYPT			1
55  #define   SEC_CIPHER_DECRYPT			2
56  
57  #define SEC_BD_W0_AUTH_M			GENMASK(21, 20)
58  #define SEC_BD_W0_AUTH_S			20
59  #define   SEC_AUTH_NULL				0
60  #define   SEC_AUTH_MAC				1
61  #define   SEC_AUTH_VERIF			2
62  
63  #define SEC_BD_W0_AI_GEN			BIT(22)
64  #define SEC_BD_W0_CI_GEN			BIT(23)
65  #define SEC_BD_W0_NO_HPAD			BIT(24)
66  #define SEC_BD_W0_HM_M				GENMASK(26, 25)
67  #define SEC_BD_W0_HM_S				25
68  #define SEC_BD_W0_ICV_OR_SKEY_EN_M		GENMASK(28, 27)
69  #define SEC_BD_W0_ICV_OR_SKEY_EN_S		27
70  
71  /* Multi purpose field - gran size bits for send, flag for recv */
72  #define SEC_BD_W0_FLAG_M			GENMASK(30, 29)
73  #define SEC_BD_W0_C_GRAN_SIZE_21_20_M		GENMASK(30, 29)
74  #define SEC_BD_W0_FLAG_S			29
75  #define SEC_BD_W0_C_GRAN_SIZE_21_20_S		29
76  
77  #define SEC_BD_W0_DONE				BIT(31)
78  	u32 w0;
79  
80  #define SEC_BD_W1_AUTH_GRAN_SIZE_M		GENMASK(21, 0)
81  #define SEC_BD_W1_AUTH_GRAN_SIZE_S		0
82  #define SEC_BD_W1_M_KEY_EN			BIT(22)
83  #define SEC_BD_W1_BD_INVALID			BIT(23)
84  #define SEC_BD_W1_ADDR_TYPE			BIT(24)
85  
86  #define SEC_BD_W1_A_ALG_M			GENMASK(28, 25)
87  #define SEC_BD_W1_A_ALG_S			25
88  #define   SEC_A_ALG_SHA1			0
89  #define   SEC_A_ALG_SHA256			1
90  #define   SEC_A_ALG_MD5				2
91  #define   SEC_A_ALG_SHA224			3
92  #define   SEC_A_ALG_HMAC_SHA1			8
93  #define   SEC_A_ALG_HMAC_SHA224			10
94  #define   SEC_A_ALG_HMAC_SHA256			11
95  #define   SEC_A_ALG_HMAC_MD5			12
96  #define   SEC_A_ALG_AES_XCBC			13
97  #define   SEC_A_ALG_AES_CMAC			14
98  
99  #define SEC_BD_W1_C_ALG_M			GENMASK(31, 29)
100  #define SEC_BD_W1_C_ALG_S			29
101  #define   SEC_C_ALG_DES				0
102  #define   SEC_C_ALG_3DES			1
103  #define   SEC_C_ALG_AES				2
104  
105  	u32 w1;
106  
107  #define SEC_BD_W2_C_GRAN_SIZE_15_0_M		GENMASK(15, 0)
108  #define SEC_BD_W2_C_GRAN_SIZE_15_0_S		0
109  #define SEC_BD_W2_GRAN_NUM_M			GENMASK(31, 16)
110  #define SEC_BD_W2_GRAN_NUM_S			16
111  	u32 w2;
112  
113  #define SEC_BD_W3_AUTH_LEN_OFFSET_M		GENMASK(9, 0)
114  #define SEC_BD_W3_AUTH_LEN_OFFSET_S		0
115  #define SEC_BD_W3_CIPHER_LEN_OFFSET_M		GENMASK(19, 10)
116  #define SEC_BD_W3_CIPHER_LEN_OFFSET_S		10
117  #define SEC_BD_W3_MAC_LEN_M			GENMASK(24, 20)
118  #define SEC_BD_W3_MAC_LEN_S			20
119  #define SEC_BD_W3_A_KEY_LEN_M			GENMASK(29, 25)
120  #define SEC_BD_W3_A_KEY_LEN_S			25
121  #define SEC_BD_W3_C_KEY_LEN_M			GENMASK(31, 30)
122  #define SEC_BD_W3_C_KEY_LEN_S			30
123  #define   SEC_KEY_LEN_AES_128			0
124  #define   SEC_KEY_LEN_AES_192			1
125  #define   SEC_KEY_LEN_AES_256			2
126  #define   SEC_KEY_LEN_DES			1
127  #define   SEC_KEY_LEN_3DES_3_KEY		1
128  #define   SEC_KEY_LEN_3DES_2_KEY		3
129  	u32 w3;
130  
131  	/* W4,5 */
132  	union {
133  		u32 authkey_addr_lo;
134  		u32 authiv_addr_lo;
135  	};
136  	union {
137  		u32 authkey_addr_hi;
138  		u32 authiv_addr_hi;
139  	};
140  
141  	/* W6,7 */
142  	u32 cipher_key_addr_lo;
143  	u32 cipher_key_addr_hi;
144  
145  	/* W8,9 */
146  	u32 cipher_iv_addr_lo;
147  	u32 cipher_iv_addr_hi;
148  
149  	/* W10,11 */
150  	u32 data_addr_lo;
151  	u32 data_addr_hi;
152  
153  	/* W12,13 */
154  	u32 mac_addr_lo;
155  	u32 mac_addr_hi;
156  
157  	/* W14,15 */
158  	u32 cipher_destin_addr_lo;
159  	u32 cipher_destin_addr_hi;
160  };
161  
162  enum sec_mem_region {
163  	SEC_COMMON = 0,
164  	SEC_SAA,
165  	SEC_NUM_ADDR_REGIONS
166  };
167  
168  #define SEC_NAME_SIZE				64
169  #define SEC_Q_NUM				16
170  
171  
172  /**
173   * struct sec_queue_ring_cmd - store information about a SEC HW cmd ring
174   * @used: Local counter used to cheaply establish if the ring is empty.
175   * @lock: Protect against simultaneous adjusting of the read and write pointers.
176   * @vaddr: Virtual address for the ram pages used for the ring.
177   * @paddr: Physical address of the dma mapped region of ram used for the ring.
178   * @callback: Callback function called on a ring element completing.
179   */
180  struct sec_queue_ring_cmd {
181  	atomic_t used;
182  	struct mutex lock;
183  	struct sec_bd_info *vaddr;
184  	dma_addr_t paddr;
185  	void (*callback)(struct sec_bd_info *resp, void *ctx);
186  };
187  
188  struct sec_debug_bd_info;
189  struct sec_queue_ring_db {
190  	struct sec_debug_bd_info *vaddr;
191  	dma_addr_t paddr;
192  };
193  
194  struct sec_out_bd_info;
195  struct sec_queue_ring_cq {
196  	struct sec_out_bd_info *vaddr;
197  	dma_addr_t paddr;
198  };
199  
200  struct sec_dev_info;
201  
202  enum sec_cipher_alg {
203  	SEC_C_DES_ECB_64,
204  	SEC_C_DES_CBC_64,
205  
206  	SEC_C_3DES_ECB_192_3KEY,
207  	SEC_C_3DES_ECB_192_2KEY,
208  
209  	SEC_C_3DES_CBC_192_3KEY,
210  	SEC_C_3DES_CBC_192_2KEY,
211  
212  	SEC_C_AES_ECB_128,
213  	SEC_C_AES_ECB_192,
214  	SEC_C_AES_ECB_256,
215  
216  	SEC_C_AES_CBC_128,
217  	SEC_C_AES_CBC_192,
218  	SEC_C_AES_CBC_256,
219  
220  	SEC_C_AES_CTR_128,
221  	SEC_C_AES_CTR_192,
222  	SEC_C_AES_CTR_256,
223  
224  	SEC_C_AES_XTS_128,
225  	SEC_C_AES_XTS_256,
226  
227  	SEC_C_NULL,
228  };
229  
230  /**
231   * struct sec_alg_tfm_ctx - hardware specific tranformation context
232   * @cipher_alg: Cipher algorithm enabled include encryption mode.
233   * @key: Key storage if required.
234   * @pkey: DMA address for the key storage.
235   * @req_template: Request template to save time on setup.
236   * @queue: The hardware queue associated with this tfm context.
237   * @lock: Protect key and pkey to ensure they are consistent
238   * @auth_buf: Current context buffer for auth operations.
239   * @backlog: The backlog queue used for cases where our buffers aren't
240   * large enough.
241   */
242  struct sec_alg_tfm_ctx {
243  	enum sec_cipher_alg cipher_alg;
244  	u8 *key;
245  	dma_addr_t pkey;
246  	struct sec_bd_info req_template;
247  	struct sec_queue *queue;
248  	struct mutex lock;
249  	u8 *auth_buf;
250  	struct list_head backlog;
251  };
252  
253  /**
254   * struct sec_request - data associate with a single crypto request
255   * @elements: List of subparts of this request (hardware size restriction)
256   * @num_elements: The number of subparts (used as an optimization)
257   * @lock: Protect elements of this structure against concurrent change.
258   * @tfm_ctx: hardware specific context.
259   * @len_in: length of in sgl from upper layers
260   * @len_out: length of out sgl from upper layers
261   * @dma_iv: initialization vector - phsyical address
262   * @err: store used to track errors across subelements of this request.
263   * @req_base: pointer to base element of associate crypto context.
264   * This is needed to allow shared handling skcipher, ahash etc.
265   * @cb: completion callback.
266   * @backlog_head: list head to allow backlog maintenance.
267   *
268   * The hardware is limited in the maximum size of data that it can
269   * process from a single BD.  Typically this is fairly large (32MB)
270   * but still requires the complexity of splitting the incoming
271   * skreq up into a number of elements complete with appropriate
272   * iv chaining.
273   */
274  struct sec_request {
275  	struct list_head elements;
276  	int num_elements;
277  	struct mutex lock;
278  	struct sec_alg_tfm_ctx *tfm_ctx;
279  	int len_in;
280  	int len_out;
281  	dma_addr_t dma_iv;
282  	int err;
283  	struct crypto_async_request *req_base;
284  	void (*cb)(struct sec_bd_info *resp, struct crypto_async_request *req);
285  	struct list_head backlog_head;
286  };
287  
288  /**
289   * struct sec_request_el - A subpart of a request.
290   * @head: allow us to attach this to the list in the sec_request
291   * @req: hardware block descriptor corresponding to this request subpart
292   * @in: hardware sgl for input - virtual address
293   * @dma_in: hardware sgl for input - physical address
294   * @sgl_in: scatterlist for this request subpart
295   * @out: hardware sgl for output - virtual address
296   * @dma_out: hardware sgl for output - physical address
297   * @sgl_out: scatterlist for this request subpart
298   * @sec_req: The request which this subpart forms a part of
299   * @el_length: Number of bytes in this subpart. Needed to locate
300   * last ivsize chunk for iv chaining.
301   */
302  struct sec_request_el {
303  	struct list_head head;
304  	struct sec_bd_info req;
305  	struct sec_hw_sgl *in;
306  	dma_addr_t dma_in;
307  	struct scatterlist *sgl_in;
308  	struct sec_hw_sgl *out;
309  	dma_addr_t dma_out;
310  	struct scatterlist *sgl_out;
311  	struct sec_request *sec_req;
312  	size_t el_length;
313  };
314  
315  /**
316   * struct sec_queue - All the information about a HW queue
317   * @dev_info: The parent SEC device to which this queue belongs.
318   * @task_irq: Completion interrupt for the queue.
319   * @name: Human readable queue description also used as irq name.
320   * @ring: The several HW rings associated with one queue.
321   * @regs: The iomapped device registers
322   * @queue_id: Index of the queue used for naming and resource selection.
323   * @in_use: Flag to say if the queue is in use.
324   * @expected: The next expected element to finish assuming we were in order.
325   * @uprocessed: A bitmap to track which OoO elements are done but not handled.
326   * @softqueue: A software queue used when chaining requirements prevent direct
327   *   use of the hardware queues.
328   * @havesoftqueue: A flag to say we have a queues - as we may need one for the
329   *   current mode.
330   * @queuelock: Protect the soft queue from concurrent changes to avoid some
331   *   potential loss of data races.
332   * @shadow: Pointers back to the shadow copy of the hardware ring element
333   *   need because we can't store any context reference in the bd element.
334   */
335  struct sec_queue {
336  	struct sec_dev_info *dev_info;
337  	int task_irq;
338  	char name[SEC_NAME_SIZE];
339  	struct sec_queue_ring_cmd ring_cmd;
340  	struct sec_queue_ring_cq ring_cq;
341  	struct sec_queue_ring_db ring_db;
342  	void __iomem *regs;
343  	u32 queue_id;
344  	bool in_use;
345  	int expected;
346  
347  	DECLARE_BITMAP(unprocessed, SEC_QUEUE_LEN);
348  	DECLARE_KFIFO_PTR(softqueue, typeof(struct sec_request_el *));
349  	bool havesoftqueue;
350  	spinlock_t queuelock;
351  	void *shadow[SEC_QUEUE_LEN];
352  };
353  
354  /**
355   * struct sec_hw_sge: Track each of the 64 element SEC HW SGL entries
356   * @buf: The IOV dma address for this entry.
357   * @len: Length of this IOV.
358   * @pad: Reserved space.
359   */
360  struct sec_hw_sge {
361  	dma_addr_t buf;
362  	unsigned int len;
363  	unsigned int pad;
364  };
365  
366  /**
367   * struct sec_hw_sgl: One hardware SGL entry.
368   * @next_sgl: The next entry if we need to chain dma address. Null if last.
369   * @entry_sum_in_chain: The full count of SGEs - only matters for first SGL.
370   * @entry_sum_in_sgl: The number of SGEs in this SGL element.
371   * @flag: Unused in skciphers.
372   * @serial_num: Unsued in skciphers.
373   * @cpuid: Currently unused.
374   * @data_bytes_in_sgl: Count of bytes from all SGEs in this SGL.
375   * @next: Virtual address used to stash the next sgl - useful in completion.
376   * @reserved: A reserved field not currently used.
377   * @sge_entries: The (up to) 64 Scatter Gather Entries, representing IOVs.
378   * @node: Currently unused.
379   */
380  struct sec_hw_sgl {
381  	dma_addr_t next_sgl;
382  	u16 entry_sum_in_chain;
383  	u16 entry_sum_in_sgl;
384  	u32 flag;
385  	u64 serial_num;
386  	u32 cpuid;
387  	u32 data_bytes_in_sgl;
388  	struct sec_hw_sgl *next;
389  	u64 reserved;
390  	struct sec_hw_sge  sge_entries[SEC_MAX_SGE_NUM];
391  	u8 node[16];
392  };
393  
394  struct dma_pool;
395  
396  /**
397   * struct sec_dev_info: The full SEC unit comprising queues and processors.
398   * @sec_id: Index used to track which SEC this is when more than one is present.
399   * @num_saas: The number of backed processors enabled.
400   * @regs: iomapped register regions shared by whole SEC unit.
401   * @dev_lock: Protects concurrent queue allocation / freeing for the SEC.
402   * @queues: The 16 queues that this SEC instance provides.
403   * @dev: Device pointer.
404   * @hw_sgl_pool: DMA pool used to mimise mapping for the scatter gather lists.
405   */
406  struct sec_dev_info {
407  	int sec_id;
408  	int num_saas;
409  	void __iomem *regs[SEC_NUM_ADDR_REGIONS];
410  	struct mutex dev_lock;
411  	int queues_in_use;
412  	struct sec_queue queues[SEC_Q_NUM];
413  	struct device *dev;
414  	struct dma_pool *hw_sgl_pool;
415  };
416  
417  int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx);
418  bool sec_queue_can_enqueue(struct sec_queue *queue, int num);
419  int sec_queue_stop_release(struct sec_queue *queue);
420  struct sec_queue *sec_queue_alloc_start_safe(void);
421  bool sec_queue_empty(struct sec_queue *queue);
422  
423  /* Algorithm specific elements from sec_algs.c */
424  void sec_alg_callback(struct sec_bd_info *resp, void *ctx);
425  int sec_algs_register(void);
426  void sec_algs_unregister(void);
427  
428  #endif /* _SEC_DRV_H_ */
429