xref: /openbmc/linux/drivers/crypto/sa2ul.c (revision 1188f7f111c61394ec56beb8e30322305a8220b6)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * K3 SA2UL crypto accelerator driver
4   *
5   * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6   *
7   * Authors:	Keerthy
8   *		Vitaly Andrianov
9   *		Tero Kristo
10   */
11  #include <linux/bitfield.h>
12  #include <linux/clk.h>
13  #include <linux/dma-mapping.h>
14  #include <linux/dmaengine.h>
15  #include <linux/dmapool.h>
16  #include <linux/kernel.h>
17  #include <linux/module.h>
18  #include <linux/of.h>
19  #include <linux/of_platform.h>
20  #include <linux/platform_device.h>
21  #include <linux/pm_runtime.h>
22  
23  #include <crypto/aes.h>
24  #include <crypto/authenc.h>
25  #include <crypto/des.h>
26  #include <crypto/internal/aead.h>
27  #include <crypto/internal/hash.h>
28  #include <crypto/internal/skcipher.h>
29  #include <crypto/scatterwalk.h>
30  #include <crypto/sha1.h>
31  #include <crypto/sha2.h>
32  
33  #include "sa2ul.h"
34  
35  /* Byte offset for key in encryption security context */
36  #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
37  /* Byte offset for Aux-1 in encryption security context */
38  #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
39  
40  #define SA_CMDL_UPD_ENC         0x0001
41  #define SA_CMDL_UPD_AUTH        0x0002
42  #define SA_CMDL_UPD_ENC_IV      0x0004
43  #define SA_CMDL_UPD_AUTH_IV     0x0008
44  #define SA_CMDL_UPD_AUX_KEY     0x0010
45  
46  #define SA_AUTH_SUBKEY_LEN	16
47  #define SA_CMDL_PAYLOAD_LENGTH_MASK	0xFFFF
48  #define SA_CMDL_SOP_BYPASS_LEN_MASK	0xFF000000
49  
50  #define MODE_CONTROL_BYTES	27
51  #define SA_HASH_PROCESSING	0
52  #define SA_CRYPTO_PROCESSING	0
53  #define SA_UPLOAD_HASH_TO_TLR	BIT(6)
54  
55  #define SA_SW0_FLAGS_MASK	0xF0000
56  #define SA_SW0_CMDL_INFO_MASK	0x1F00000
57  #define SA_SW0_CMDL_PRESENT	BIT(4)
58  #define SA_SW0_ENG_ID_MASK	0x3E000000
59  #define SA_SW0_DEST_INFO_PRESENT	BIT(30)
60  #define SA_SW2_EGRESS_LENGTH		0xFF000000
61  #define SA_BASIC_HASH		0x10
62  
63  #define SHA256_DIGEST_WORDS    8
64  /* Make 32-bit word from 4 bytes */
65  #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
66  				   ((b2) << 8) | (b3))
67  
68  /* size of SCCTL structure in bytes */
69  #define SA_SCCTL_SZ 16
70  
71  /* Max Authentication tag size */
72  #define SA_MAX_AUTH_TAG_SZ 64
73  
74  enum sa_algo_id {
75  	SA_ALG_CBC_AES = 0,
76  	SA_ALG_EBC_AES,
77  	SA_ALG_CBC_DES3,
78  	SA_ALG_ECB_DES3,
79  	SA_ALG_SHA1,
80  	SA_ALG_SHA256,
81  	SA_ALG_SHA512,
82  	SA_ALG_AUTHENC_SHA1_AES,
83  	SA_ALG_AUTHENC_SHA256_AES,
84  };
85  
86  struct sa_match_data {
87  	u8 priv;
88  	u8 priv_id;
89  	u32 supported_algos;
90  };
91  
92  static struct device *sa_k3_dev;
93  
94  /**
95   * struct sa_cmdl_cfg - Command label configuration descriptor
96   * @aalg: authentication algorithm ID
97   * @enc_eng_id: Encryption Engine ID supported by the SA hardware
98   * @auth_eng_id: Authentication Engine ID
99   * @iv_size: Initialization Vector size
100   * @akey: Authentication key
101   * @akey_len: Authentication key length
102   * @enc: True, if this is an encode request
103   */
104  struct sa_cmdl_cfg {
105  	int aalg;
106  	u8 enc_eng_id;
107  	u8 auth_eng_id;
108  	u8 iv_size;
109  	const u8 *akey;
110  	u16 akey_len;
111  	bool enc;
112  };
113  
114  /**
115   * struct algo_data - Crypto algorithm specific data
116   * @enc_eng: Encryption engine info structure
117   * @auth_eng: Authentication engine info structure
118   * @auth_ctrl: Authentication control word
119   * @hash_size: Size of digest
120   * @iv_idx: iv index in psdata
121   * @iv_out_size: iv out size
122   * @ealg_id: Encryption Algorithm ID
123   * @aalg_id: Authentication algorithm ID
124   * @mci_enc: Mode Control Instruction for Encryption algorithm
125   * @mci_dec: Mode Control Instruction for Decryption
126   * @inv_key: Whether the encryption algorithm demands key inversion
127   * @ctx: Pointer to the algorithm context
128   * @keyed_mac: Whether the authentication algorithm has key
129   * @prep_iopad: Function pointer to generate intermediate ipad/opad
130   */
131  struct algo_data {
132  	struct sa_eng_info enc_eng;
133  	struct sa_eng_info auth_eng;
134  	u8 auth_ctrl;
135  	u8 hash_size;
136  	u8 iv_idx;
137  	u8 iv_out_size;
138  	u8 ealg_id;
139  	u8 aalg_id;
140  	u8 *mci_enc;
141  	u8 *mci_dec;
142  	bool inv_key;
143  	struct sa_tfm_ctx *ctx;
144  	bool keyed_mac;
145  	void (*prep_iopad)(struct algo_data *algo, const u8 *key,
146  			   u16 key_sz, __be32 *ipad, __be32 *opad);
147  };
148  
149  /**
150   * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
151   * @type: Type of the crypto algorithm.
152   * @alg: Union of crypto algorithm definitions.
153   * @registered: Flag indicating if the crypto algorithm is already registered
154   */
155  struct sa_alg_tmpl {
156  	u32 type;		/* CRYPTO_ALG_TYPE from <linux/crypto.h> */
157  	union {
158  		struct skcipher_alg skcipher;
159  		struct ahash_alg ahash;
160  		struct aead_alg aead;
161  	} alg;
162  	bool registered;
163  };
164  
165  /**
166   * struct sa_mapped_sg: scatterlist information for tx and rx
167   * @mapped: Set to true if the @sgt is mapped
168   * @dir: mapping direction used for @sgt
169   * @split_sg: Set if the sg is split and needs to be freed up
170   * @static_sg: Static scatterlist entry for overriding data
171   * @sgt: scatterlist table for DMA API use
172   */
173  struct sa_mapped_sg {
174  	bool mapped;
175  	enum dma_data_direction dir;
176  	struct scatterlist static_sg;
177  	struct scatterlist *split_sg;
178  	struct sg_table sgt;
179  };
180  /**
181   * struct sa_rx_data: RX Packet miscellaneous data place holder
182   * @req: crypto request data pointer
183   * @ddev: pointer to the DMA device
184   * @tx_in: dma_async_tx_descriptor pointer for rx channel
185   * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
186   * @enc: Flag indicating either encryption or decryption
187   * @enc_iv_size: Initialisation vector size
188   * @iv_idx: Initialisation vector index
189   */
190  struct sa_rx_data {
191  	void *req;
192  	struct device *ddev;
193  	struct dma_async_tx_descriptor *tx_in;
194  	struct sa_mapped_sg mapped_sg[2];
195  	u8 enc;
196  	u8 enc_iv_size;
197  	u8 iv_idx;
198  };
199  
200  /**
201   * struct sa_req: SA request definition
202   * @dev: device for the request
203   * @size: total data to the xmitted via DMA
204   * @enc_offset: offset of cipher data
205   * @enc_size: data to be passed to cipher engine
206   * @enc_iv: cipher IV
207   * @auth_offset: offset of the authentication data
208   * @auth_size: size of the authentication data
209   * @auth_iv: authentication IV
210   * @type: algorithm type for the request
211   * @cmdl: command label pointer
212   * @base: pointer to the base request
213   * @ctx: pointer to the algorithm context data
214   * @enc: true if this is an encode request
215   * @src: source data
216   * @dst: destination data
217   * @callback: DMA callback for the request
218   * @mdata_size: metadata size passed to DMA
219   */
220  struct sa_req {
221  	struct device *dev;
222  	u16 size;
223  	u8 enc_offset;
224  	u16 enc_size;
225  	u8 *enc_iv;
226  	u8 auth_offset;
227  	u16 auth_size;
228  	u8 *auth_iv;
229  	u32 type;
230  	u32 *cmdl;
231  	struct crypto_async_request *base;
232  	struct sa_tfm_ctx *ctx;
233  	bool enc;
234  	struct scatterlist *src;
235  	struct scatterlist *dst;
236  	dma_async_tx_callback callback;
237  	u16 mdata_size;
238  };
239  
240  /*
241   * Mode Control Instructions for various Key lengths 128, 192, 256
242   * For CBC (Cipher Block Chaining) mode for encryption
243   */
244  static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
245  	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
246  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
247  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
248  	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
249  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
251  	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
252  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
254  };
255  
256  /*
257   * Mode Control Instructions for various Key lengths 128, 192, 256
258   * For CBC (Cipher Block Chaining) mode for decryption
259   */
260  static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
261  	{	0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
262  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
263  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
264  	{	0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
265  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
267  	{	0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
268  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
270  };
271  
272  /*
273   * Mode Control Instructions for various Key lengths 128, 192, 256
274   * For CBC (Cipher Block Chaining) mode for encryption
275   */
276  static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
277  	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
278  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
279  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
280  	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
281  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
283  	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
284  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
285  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
286  };
287  
288  /*
289   * Mode Control Instructions for various Key lengths 128, 192, 256
290   * For CBC (Cipher Block Chaining) mode for decryption
291   */
292  static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
293  	{	0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
294  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
296  	{	0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
297  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
299  	{	0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
300  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
302  };
303  
304  /*
305   * Mode Control Instructions for various Key lengths 128, 192, 256
306   * For ECB (Electronic Code Book) mode for encryption
307   */
308  static u8 mci_ecb_enc_array[3][27] = {
309  	{	0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
310  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
311  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
312  	{	0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
313  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
315  	{	0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
316  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
317  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
318  };
319  
320  /*
321   * Mode Control Instructions for various Key lengths 128, 192, 256
322   * For ECB (Electronic Code Book) mode for decryption
323   */
324  static u8 mci_ecb_dec_array[3][27] = {
325  	{	0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
326  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
328  	{	0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
329  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
331  	{	0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
332  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333  		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
334  };
335  
336  /*
337   * Mode Control Instructions for DES algorithm
338   * For CBC (Cipher Block Chaining) mode and ECB mode
339   * encryption and for decryption respectively
340   */
341  static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
342  	0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
343  	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
344  	0x00, 0x00, 0x00,
345  };
346  
347  static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
348  	0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
349  	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350  	0x00, 0x00, 0x00,
351  };
352  
353  static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
354  	0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
355  	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
356  	0x00, 0x00, 0x00,
357  };
358  
359  static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
360  	0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
361  	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362  	0x00, 0x00, 0x00,
363  };
364  
365  /*
366   * Perform 16 byte or 128 bit swizzling
367   * The SA2UL Expects the security context to
368   * be in little Endian and the bus width is 128 bits or 16 bytes
369   * Hence swap 16 bytes at a time from higher to lower address
370   */
sa_swiz_128(u8 * in,u16 len)371  static void sa_swiz_128(u8 *in, u16 len)
372  {
373  	u8 data[16];
374  	int i, j;
375  
376  	for (i = 0; i < len; i += 16) {
377  		memcpy(data, &in[i], 16);
378  		for (j = 0; j < 16; j++)
379  			in[i + j] = data[15 - j];
380  	}
381  }
382  
383  /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
prepare_kipad(u8 * k_ipad,const u8 * key,u16 key_sz)384  static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
385  {
386  	int i;
387  
388  	for (i = 0; i < key_sz; i++)
389  		k_ipad[i] = key[i] ^ 0x36;
390  
391  	/* Instead of XOR with 0 */
392  	for (; i < SHA1_BLOCK_SIZE; i++)
393  		k_ipad[i] = 0x36;
394  }
395  
prepare_kopad(u8 * k_opad,const u8 * key,u16 key_sz)396  static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
397  {
398  	int i;
399  
400  	for (i = 0; i < key_sz; i++)
401  		k_opad[i] = key[i] ^ 0x5c;
402  
403  	/* Instead of XOR with 0 */
404  	for (; i < SHA1_BLOCK_SIZE; i++)
405  		k_opad[i] = 0x5c;
406  }
407  
sa_export_shash(void * state,struct shash_desc * hash,int digest_size,__be32 * out)408  static void sa_export_shash(void *state, struct shash_desc *hash,
409  			    int digest_size, __be32 *out)
410  {
411  	struct sha1_state *sha1;
412  	struct sha256_state *sha256;
413  	u32 *result;
414  
415  	switch (digest_size) {
416  	case SHA1_DIGEST_SIZE:
417  		sha1 = state;
418  		result = sha1->state;
419  		break;
420  	case SHA256_DIGEST_SIZE:
421  		sha256 = state;
422  		result = sha256->state;
423  		break;
424  	default:
425  		dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
426  			digest_size);
427  		return;
428  	}
429  
430  	crypto_shash_export(hash, state);
431  
432  	cpu_to_be32_array(out, result, digest_size / 4);
433  }
434  
sa_prepare_iopads(struct algo_data * data,const u8 * key,u16 key_sz,__be32 * ipad,__be32 * opad)435  static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
436  			      u16 key_sz, __be32 *ipad, __be32 *opad)
437  {
438  	SHASH_DESC_ON_STACK(shash, data->ctx->shash);
439  	int block_size = crypto_shash_blocksize(data->ctx->shash);
440  	int digest_size = crypto_shash_digestsize(data->ctx->shash);
441  	union {
442  		struct sha1_state sha1;
443  		struct sha256_state sha256;
444  		u8 k_pad[SHA1_BLOCK_SIZE];
445  	} sha;
446  
447  	shash->tfm = data->ctx->shash;
448  
449  	prepare_kipad(sha.k_pad, key, key_sz);
450  
451  	crypto_shash_init(shash);
452  	crypto_shash_update(shash, sha.k_pad, block_size);
453  	sa_export_shash(&sha, shash, digest_size, ipad);
454  
455  	prepare_kopad(sha.k_pad, key, key_sz);
456  
457  	crypto_shash_init(shash);
458  	crypto_shash_update(shash, sha.k_pad, block_size);
459  
460  	sa_export_shash(&sha, shash, digest_size, opad);
461  
462  	memzero_explicit(&sha, sizeof(sha));
463  }
464  
465  /* Derive the inverse key used in AES-CBC decryption operation */
sa_aes_inv_key(u8 * inv_key,const u8 * key,u16 key_sz)466  static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
467  {
468  	struct crypto_aes_ctx ctx;
469  	int key_pos;
470  
471  	if (aes_expandkey(&ctx, key, key_sz)) {
472  		dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
473  		return -EINVAL;
474  	}
475  
476  	/* work around to get the right inverse for AES_KEYSIZE_192 size keys */
477  	if (key_sz == AES_KEYSIZE_192) {
478  		ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
479  		ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
480  	}
481  
482  	/* Based crypto_aes_expand_key logic */
483  	switch (key_sz) {
484  	case AES_KEYSIZE_128:
485  	case AES_KEYSIZE_192:
486  		key_pos = key_sz + 24;
487  		break;
488  
489  	case AES_KEYSIZE_256:
490  		key_pos = key_sz + 24 - 4;
491  		break;
492  
493  	default:
494  		dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
495  		return -EINVAL;
496  	}
497  
498  	memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
499  	return 0;
500  }
501  
502  /* Set Security context for the encryption engine */
sa_set_sc_enc(struct algo_data * ad,const u8 * key,u16 key_sz,u8 enc,u8 * sc_buf)503  static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
504  			 u8 enc, u8 *sc_buf)
505  {
506  	const u8 *mci = NULL;
507  
508  	/* Set Encryption mode selector to crypto processing */
509  	sc_buf[0] = SA_CRYPTO_PROCESSING;
510  
511  	if (enc)
512  		mci = ad->mci_enc;
513  	else
514  		mci = ad->mci_dec;
515  	/* Set the mode control instructions in security context */
516  	if (mci)
517  		memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
518  
519  	/* For AES-CBC decryption get the inverse key */
520  	if (ad->inv_key && !enc) {
521  		if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
522  			return -EINVAL;
523  	/* For all other cases: key is used */
524  	} else {
525  		memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
526  	}
527  
528  	return 0;
529  }
530  
531  /* Set Security context for the authentication engine */
sa_set_sc_auth(struct algo_data * ad,const u8 * key,u16 key_sz,u8 * sc_buf)532  static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
533  			   u8 *sc_buf)
534  {
535  	__be32 *ipad = (void *)(sc_buf + 32);
536  	__be32 *opad = (void *)(sc_buf + 64);
537  
538  	/* Set Authentication mode selector to hash processing */
539  	sc_buf[0] = SA_HASH_PROCESSING;
540  	/* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
541  	sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
542  	sc_buf[1] |= ad->auth_ctrl;
543  
544  	/* Copy the keys or ipad/opad */
545  	if (ad->keyed_mac)
546  		ad->prep_iopad(ad, key, key_sz, ipad, opad);
547  	else {
548  		/* basic hash */
549  		sc_buf[1] |= SA_BASIC_HASH;
550  	}
551  }
552  
sa_copy_iv(__be32 * out,const u8 * iv,bool size16)553  static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
554  {
555  	int j;
556  
557  	for (j = 0; j < ((size16) ? 4 : 2); j++) {
558  		*out = cpu_to_be32(*((u32 *)iv));
559  		iv += 4;
560  		out++;
561  	}
562  }
563  
564  /* Format general command label */
sa_format_cmdl_gen(struct sa_cmdl_cfg * cfg,u8 * cmdl,struct sa_cmdl_upd_info * upd_info)565  static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
566  			      struct sa_cmdl_upd_info *upd_info)
567  {
568  	u8 enc_offset = 0, auth_offset = 0, total = 0;
569  	u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
570  	u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
571  	u32 *word_ptr = (u32 *)cmdl;
572  	int i;
573  
574  	/* Clear the command label */
575  	memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
576  
577  	/* Iniialize the command update structure */
578  	memzero_explicit(upd_info, sizeof(*upd_info));
579  
580  	if (cfg->enc_eng_id && cfg->auth_eng_id) {
581  		if (cfg->enc) {
582  			auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
583  			enc_next_eng = cfg->auth_eng_id;
584  
585  			if (cfg->iv_size)
586  				auth_offset += cfg->iv_size;
587  		} else {
588  			enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
589  			auth_next_eng = cfg->enc_eng_id;
590  		}
591  	}
592  
593  	if (cfg->enc_eng_id) {
594  		upd_info->flags |= SA_CMDL_UPD_ENC;
595  		upd_info->enc_size.index = enc_offset >> 2;
596  		upd_info->enc_offset.index = upd_info->enc_size.index + 1;
597  		/* Encryption command label */
598  		cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
599  
600  		/* Encryption modes requiring IV */
601  		if (cfg->iv_size) {
602  			upd_info->flags |= SA_CMDL_UPD_ENC_IV;
603  			upd_info->enc_iv.index =
604  				(enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
605  			upd_info->enc_iv.size = cfg->iv_size;
606  
607  			cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
608  				SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
609  
610  			cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
611  				(SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
612  			total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
613  		} else {
614  			cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
615  						SA_CMDL_HEADER_SIZE_BYTES;
616  			total += SA_CMDL_HEADER_SIZE_BYTES;
617  		}
618  	}
619  
620  	if (cfg->auth_eng_id) {
621  		upd_info->flags |= SA_CMDL_UPD_AUTH;
622  		upd_info->auth_size.index = auth_offset >> 2;
623  		upd_info->auth_offset.index = upd_info->auth_size.index + 1;
624  		cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
625  		cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
626  			SA_CMDL_HEADER_SIZE_BYTES;
627  		total += SA_CMDL_HEADER_SIZE_BYTES;
628  	}
629  
630  	total = roundup(total, 8);
631  
632  	for (i = 0; i < total / 4; i++)
633  		word_ptr[i] = swab32(word_ptr[i]);
634  
635  	return total;
636  }
637  
638  /* Update Command label */
sa_update_cmdl(struct sa_req * req,u32 * cmdl,struct sa_cmdl_upd_info * upd_info)639  static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
640  				  struct sa_cmdl_upd_info *upd_info)
641  {
642  	int i = 0, j;
643  
644  	if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
645  		cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
646  		cmdl[upd_info->enc_size.index] |= req->enc_size;
647  		cmdl[upd_info->enc_offset.index] &=
648  						~SA_CMDL_SOP_BYPASS_LEN_MASK;
649  		cmdl[upd_info->enc_offset.index] |=
650  			FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
651  				   req->enc_offset);
652  
653  		if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
654  			__be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
655  			u32 *enc_iv = (u32 *)req->enc_iv;
656  
657  			for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
658  				data[j] = cpu_to_be32(*enc_iv);
659  				enc_iv++;
660  			}
661  		}
662  	}
663  
664  	if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
665  		cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
666  		cmdl[upd_info->auth_size.index] |= req->auth_size;
667  		cmdl[upd_info->auth_offset.index] &=
668  			~SA_CMDL_SOP_BYPASS_LEN_MASK;
669  		cmdl[upd_info->auth_offset.index] |=
670  			FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
671  				   req->auth_offset);
672  		if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
673  			sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
674  				   req->auth_iv,
675  				   (upd_info->auth_iv.size > 8));
676  		}
677  		if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
678  			int offset = (req->auth_size & 0xF) ? 4 : 0;
679  
680  			memcpy(&cmdl[upd_info->aux_key_info.index],
681  			       &upd_info->aux_key[offset], 16);
682  		}
683  	}
684  }
685  
686  /* Format SWINFO words to be sent to SA */
687  static
sa_set_swinfo(u8 eng_id,u16 sc_id,dma_addr_t sc_phys,u8 cmdl_present,u8 cmdl_offset,u8 flags,u8 hash_size,u32 * swinfo)688  void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
689  		   u8 cmdl_present, u8 cmdl_offset, u8 flags,
690  		   u8 hash_size, u32 *swinfo)
691  {
692  	swinfo[0] = sc_id;
693  	swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
694  	if (likely(cmdl_present))
695  		swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
696  					cmdl_offset | SA_SW0_CMDL_PRESENT);
697  	swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
698  
699  	swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
700  	swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
701  	swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
702  	swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
703  }
704  
705  /* Dump the security context */
sa_dump_sc(u8 * buf,dma_addr_t dma_addr)706  static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
707  {
708  #ifdef DEBUG
709  	dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
710  	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
711  		       16, 1, buf, SA_CTX_MAX_SZ, false);
712  #endif
713  }
714  
715  static
sa_init_sc(struct sa_ctx_info * ctx,const struct sa_match_data * match_data,const u8 * enc_key,u16 enc_key_sz,const u8 * auth_key,u16 auth_key_sz,struct algo_data * ad,u8 enc,u32 * swinfo)716  int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
717  	       const u8 *enc_key, u16 enc_key_sz,
718  	       const u8 *auth_key, u16 auth_key_sz,
719  	       struct algo_data *ad, u8 enc, u32 *swinfo)
720  {
721  	int enc_sc_offset = 0;
722  	int auth_sc_offset = 0;
723  	u8 *sc_buf = ctx->sc;
724  	u16 sc_id = ctx->sc_id;
725  	u8 first_engine = 0;
726  
727  	memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
728  
729  	if (ad->auth_eng.eng_id) {
730  		if (enc)
731  			first_engine = ad->enc_eng.eng_id;
732  		else
733  			first_engine = ad->auth_eng.eng_id;
734  
735  		enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
736  		auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
737  		sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
738  		if (!ad->hash_size)
739  			return -EINVAL;
740  		ad->hash_size = roundup(ad->hash_size, 8);
741  
742  	} else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
743  		enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
744  		first_engine = ad->enc_eng.eng_id;
745  		sc_buf[1] = SA_SCCTL_FE_ENC;
746  		ad->hash_size = ad->iv_out_size;
747  	}
748  
749  	/* SCCTL Owner info: 0=host, 1=CP_ACE */
750  	sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
751  	memcpy(&sc_buf[2], &sc_id, 2);
752  	sc_buf[4] = 0x0;
753  	sc_buf[5] = match_data->priv_id;
754  	sc_buf[6] = match_data->priv;
755  	sc_buf[7] = 0x0;
756  
757  	/* Prepare context for encryption engine */
758  	if (ad->enc_eng.sc_size) {
759  		if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
760  				  &sc_buf[enc_sc_offset]))
761  			return -EINVAL;
762  	}
763  
764  	/* Prepare context for authentication engine */
765  	if (ad->auth_eng.sc_size)
766  		sa_set_sc_auth(ad, auth_key, auth_key_sz,
767  			       &sc_buf[auth_sc_offset]);
768  
769  	/* Set the ownership of context to CP_ACE */
770  	sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
771  
772  	/* swizzle the security context */
773  	sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
774  
775  	sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
776  		      SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
777  
778  	sa_dump_sc(sc_buf, ctx->sc_phys);
779  
780  	return 0;
781  }
782  
783  /* Free the per direction context memory */
sa_free_ctx_info(struct sa_ctx_info * ctx,struct sa_crypto_data * data)784  static void sa_free_ctx_info(struct sa_ctx_info *ctx,
785  			     struct sa_crypto_data *data)
786  {
787  	unsigned long bn;
788  
789  	bn = ctx->sc_id - data->sc_id_start;
790  	spin_lock(&data->scid_lock);
791  	__clear_bit(bn, data->ctx_bm);
792  	data->sc_id--;
793  	spin_unlock(&data->scid_lock);
794  
795  	if (ctx->sc) {
796  		dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
797  		ctx->sc = NULL;
798  	}
799  }
800  
sa_init_ctx_info(struct sa_ctx_info * ctx,struct sa_crypto_data * data)801  static int sa_init_ctx_info(struct sa_ctx_info *ctx,
802  			    struct sa_crypto_data *data)
803  {
804  	unsigned long bn;
805  	int err;
806  
807  	spin_lock(&data->scid_lock);
808  	bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
809  	__set_bit(bn, data->ctx_bm);
810  	data->sc_id++;
811  	spin_unlock(&data->scid_lock);
812  
813  	ctx->sc_id = (u16)(data->sc_id_start + bn);
814  
815  	ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
816  	if (!ctx->sc) {
817  		dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
818  		err = -ENOMEM;
819  		goto scid_rollback;
820  	}
821  
822  	return 0;
823  
824  scid_rollback:
825  	spin_lock(&data->scid_lock);
826  	__clear_bit(bn, data->ctx_bm);
827  	data->sc_id--;
828  	spin_unlock(&data->scid_lock);
829  
830  	return err;
831  }
832  
sa_cipher_cra_exit(struct crypto_skcipher * tfm)833  static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
834  {
835  	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
836  	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
837  
838  	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
839  		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
840  		ctx->dec.sc_id, &ctx->dec.sc_phys);
841  
842  	sa_free_ctx_info(&ctx->enc, data);
843  	sa_free_ctx_info(&ctx->dec, data);
844  
845  	crypto_free_skcipher(ctx->fallback.skcipher);
846  }
847  
sa_cipher_cra_init(struct crypto_skcipher * tfm)848  static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
849  {
850  	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
851  	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
852  	const char *name = crypto_tfm_alg_name(&tfm->base);
853  	struct crypto_skcipher *child;
854  	int ret;
855  
856  	memzero_explicit(ctx, sizeof(*ctx));
857  	ctx->dev_data = data;
858  
859  	ret = sa_init_ctx_info(&ctx->enc, data);
860  	if (ret)
861  		return ret;
862  	ret = sa_init_ctx_info(&ctx->dec, data);
863  	if (ret) {
864  		sa_free_ctx_info(&ctx->enc, data);
865  		return ret;
866  	}
867  
868  	child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
869  
870  	if (IS_ERR(child)) {
871  		dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
872  		return PTR_ERR(child);
873  	}
874  
875  	ctx->fallback.skcipher = child;
876  	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
877  					 sizeof(struct skcipher_request));
878  
879  	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
880  		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
881  		ctx->dec.sc_id, &ctx->dec.sc_phys);
882  	return 0;
883  }
884  
sa_cipher_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen,struct algo_data * ad)885  static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
886  			    unsigned int keylen, struct algo_data *ad)
887  {
888  	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
889  	struct crypto_skcipher *child = ctx->fallback.skcipher;
890  	int cmdl_len;
891  	struct sa_cmdl_cfg cfg;
892  	int ret;
893  
894  	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
895  	    keylen != AES_KEYSIZE_256)
896  		return -EINVAL;
897  
898  	ad->enc_eng.eng_id = SA_ENG_ID_EM1;
899  	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
900  
901  	memzero_explicit(&cfg, sizeof(cfg));
902  	cfg.enc_eng_id = ad->enc_eng.eng_id;
903  	cfg.iv_size = crypto_skcipher_ivsize(tfm);
904  
905  	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
906  	crypto_skcipher_set_flags(child, tfm->base.crt_flags &
907  					 CRYPTO_TFM_REQ_MASK);
908  	ret = crypto_skcipher_setkey(child, key, keylen);
909  	if (ret)
910  		return ret;
911  
912  	/* Setup Encryption Security Context & Command label template */
913  	if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
914  		       ad, 1, &ctx->enc.epib[1]))
915  		goto badkey;
916  
917  	cmdl_len = sa_format_cmdl_gen(&cfg,
918  				      (u8 *)ctx->enc.cmdl,
919  				      &ctx->enc.cmdl_upd_info);
920  	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
921  		goto badkey;
922  
923  	ctx->enc.cmdl_size = cmdl_len;
924  
925  	/* Setup Decryption Security Context & Command label template */
926  	if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
927  		       ad, 0, &ctx->dec.epib[1]))
928  		goto badkey;
929  
930  	cfg.enc_eng_id = ad->enc_eng.eng_id;
931  	cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
932  				      &ctx->dec.cmdl_upd_info);
933  
934  	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
935  		goto badkey;
936  
937  	ctx->dec.cmdl_size = cmdl_len;
938  	ctx->iv_idx = ad->iv_idx;
939  
940  	return 0;
941  
942  badkey:
943  	dev_err(sa_k3_dev, "%s: badkey\n", __func__);
944  	return -EINVAL;
945  }
946  
sa_aes_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)947  static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
948  			     unsigned int keylen)
949  {
950  	struct algo_data ad = { 0 };
951  	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
952  	int key_idx = (keylen >> 3) - 2;
953  
954  	if (key_idx >= 3)
955  		return -EINVAL;
956  
957  	ad.mci_enc = mci_cbc_enc_array[key_idx];
958  	ad.mci_dec = mci_cbc_dec_array[key_idx];
959  	ad.inv_key = true;
960  	ad.ealg_id = SA_EALG_ID_AES_CBC;
961  	ad.iv_idx = 4;
962  	ad.iv_out_size = 16;
963  
964  	return sa_cipher_setkey(tfm, key, keylen, &ad);
965  }
966  
sa_aes_ecb_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)967  static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
968  			     unsigned int keylen)
969  {
970  	struct algo_data ad = { 0 };
971  	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
972  	int key_idx = (keylen >> 3) - 2;
973  
974  	if (key_idx >= 3)
975  		return -EINVAL;
976  
977  	ad.mci_enc = mci_ecb_enc_array[key_idx];
978  	ad.mci_dec = mci_ecb_dec_array[key_idx];
979  	ad.inv_key = true;
980  	ad.ealg_id = SA_EALG_ID_AES_ECB;
981  
982  	return sa_cipher_setkey(tfm, key, keylen, &ad);
983  }
984  
sa_3des_cbc_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)985  static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
986  			      unsigned int keylen)
987  {
988  	struct algo_data ad = { 0 };
989  
990  	ad.mci_enc = mci_cbc_3des_enc_array;
991  	ad.mci_dec = mci_cbc_3des_dec_array;
992  	ad.ealg_id = SA_EALG_ID_3DES_CBC;
993  	ad.iv_idx = 6;
994  	ad.iv_out_size = 8;
995  
996  	return sa_cipher_setkey(tfm, key, keylen, &ad);
997  }
998  
sa_3des_ecb_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int keylen)999  static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
1000  			      unsigned int keylen)
1001  {
1002  	struct algo_data ad = { 0 };
1003  
1004  	ad.mci_enc = mci_ecb_3des_enc_array;
1005  	ad.mci_dec = mci_ecb_3des_dec_array;
1006  
1007  	return sa_cipher_setkey(tfm, key, keylen, &ad);
1008  }
1009  
sa_sync_from_device(struct sa_rx_data * rxd)1010  static void sa_sync_from_device(struct sa_rx_data *rxd)
1011  {
1012  	struct sg_table *sgt;
1013  
1014  	if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1015  		sgt = &rxd->mapped_sg[0].sgt;
1016  	else
1017  		sgt = &rxd->mapped_sg[1].sgt;
1018  
1019  	dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1020  }
1021  
sa_free_sa_rx_data(struct sa_rx_data * rxd)1022  static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1023  {
1024  	int i;
1025  
1026  	for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1027  		struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1028  
1029  		if (mapped_sg->mapped) {
1030  			dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1031  					  mapped_sg->dir, 0);
1032  			kfree(mapped_sg->split_sg);
1033  		}
1034  	}
1035  
1036  	kfree(rxd);
1037  }
1038  
sa_aes_dma_in_callback(void * data)1039  static void sa_aes_dma_in_callback(void *data)
1040  {
1041  	struct sa_rx_data *rxd = data;
1042  	struct skcipher_request *req;
1043  	u32 *result;
1044  	__be32 *mdptr;
1045  	size_t ml, pl;
1046  	int i;
1047  
1048  	sa_sync_from_device(rxd);
1049  	req = container_of(rxd->req, struct skcipher_request, base);
1050  
1051  	if (req->iv) {
1052  		mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1053  							       &ml);
1054  		result = (u32 *)req->iv;
1055  
1056  		for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1057  			result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1058  	}
1059  
1060  	sa_free_sa_rx_data(rxd);
1061  
1062  	skcipher_request_complete(req, 0);
1063  }
1064  
1065  static void
sa_prepare_tx_desc(u32 * mdptr,u32 pslen,u32 * psdata,u32 epiblen,u32 * epib)1066  sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1067  {
1068  	u32 *out, *in;
1069  	int i;
1070  
1071  	for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1072  		*out++ = *in++;
1073  
1074  	mdptr[4] = (0xFFFF << 16);
1075  	for (out = &mdptr[5], in = psdata, i = 0;
1076  	     i < pslen / sizeof(u32); i++)
1077  		*out++ = *in++;
1078  }
1079  
sa_run(struct sa_req * req)1080  static int sa_run(struct sa_req *req)
1081  {
1082  	struct sa_rx_data *rxd;
1083  	gfp_t gfp_flags;
1084  	u32 cmdl[SA_MAX_CMDL_WORDS];
1085  	struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1086  	struct device *ddev;
1087  	struct dma_chan *dma_rx;
1088  	int sg_nents, src_nents, dst_nents;
1089  	struct scatterlist *src, *dst;
1090  	size_t pl, ml, split_size;
1091  	struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1092  	int ret;
1093  	struct dma_async_tx_descriptor *tx_out;
1094  	u32 *mdptr;
1095  	bool diff_dst;
1096  	enum dma_data_direction dir_src;
1097  	struct sa_mapped_sg *mapped_sg;
1098  
1099  	gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1100  		GFP_KERNEL : GFP_ATOMIC;
1101  
1102  	rxd = kzalloc(sizeof(*rxd), gfp_flags);
1103  	if (!rxd)
1104  		return -ENOMEM;
1105  
1106  	if (req->src != req->dst) {
1107  		diff_dst = true;
1108  		dir_src = DMA_TO_DEVICE;
1109  	} else {
1110  		diff_dst = false;
1111  		dir_src = DMA_BIDIRECTIONAL;
1112  	}
1113  
1114  	/*
1115  	 * SA2UL has an interesting feature where the receive DMA channel
1116  	 * is selected based on the data passed to the engine. Within the
1117  	 * transition range, there is also a space where it is impossible
1118  	 * to determine where the data will end up, and this should be
1119  	 * avoided. This will be handled by the SW fallback mechanism by
1120  	 * the individual algorithm implementations.
1121  	 */
1122  	if (req->size >= 256)
1123  		dma_rx = pdata->dma_rx2;
1124  	else
1125  		dma_rx = pdata->dma_rx1;
1126  
1127  	ddev = dmaengine_get_dma_device(pdata->dma_tx);
1128  	rxd->ddev = ddev;
1129  
1130  	memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1131  
1132  	sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1133  
1134  	if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1135  		if (req->enc)
1136  			req->type |=
1137  				(SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1138  		else
1139  			req->type |=
1140  				(SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1141  	}
1142  
1143  	cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1144  
1145  	/*
1146  	 * Map the packets, first we check if the data fits into a single
1147  	 * sg entry and use that if possible. If it does not fit, we check
1148  	 * if we need to do sg_split to align the scatterlist data on the
1149  	 * actual data size being processed by the crypto engine.
1150  	 */
1151  	src = req->src;
1152  	sg_nents = sg_nents_for_len(src, req->size);
1153  
1154  	split_size = req->size;
1155  
1156  	mapped_sg = &rxd->mapped_sg[0];
1157  	if (sg_nents == 1 && split_size <= req->src->length) {
1158  		src = &mapped_sg->static_sg;
1159  		src_nents = 1;
1160  		sg_init_table(src, 1);
1161  		sg_set_page(src, sg_page(req->src), split_size,
1162  			    req->src->offset);
1163  
1164  		mapped_sg->sgt.sgl = src;
1165  		mapped_sg->sgt.orig_nents = src_nents;
1166  		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1167  		if (ret) {
1168  			kfree(rxd);
1169  			return ret;
1170  		}
1171  
1172  		mapped_sg->dir = dir_src;
1173  		mapped_sg->mapped = true;
1174  	} else {
1175  		mapped_sg->sgt.sgl = req->src;
1176  		mapped_sg->sgt.orig_nents = sg_nents;
1177  		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1178  		if (ret) {
1179  			kfree(rxd);
1180  			return ret;
1181  		}
1182  
1183  		mapped_sg->dir = dir_src;
1184  		mapped_sg->mapped = true;
1185  
1186  		ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1187  			       &split_size, &src, &src_nents, gfp_flags);
1188  		if (ret) {
1189  			src_nents = mapped_sg->sgt.nents;
1190  			src = mapped_sg->sgt.sgl;
1191  		} else {
1192  			mapped_sg->split_sg = src;
1193  		}
1194  	}
1195  
1196  	dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1197  
1198  	if (!diff_dst) {
1199  		dst_nents = src_nents;
1200  		dst = src;
1201  	} else {
1202  		dst_nents = sg_nents_for_len(req->dst, req->size);
1203  		mapped_sg = &rxd->mapped_sg[1];
1204  
1205  		if (dst_nents == 1 && split_size <= req->dst->length) {
1206  			dst = &mapped_sg->static_sg;
1207  			dst_nents = 1;
1208  			sg_init_table(dst, 1);
1209  			sg_set_page(dst, sg_page(req->dst), split_size,
1210  				    req->dst->offset);
1211  
1212  			mapped_sg->sgt.sgl = dst;
1213  			mapped_sg->sgt.orig_nents = dst_nents;
1214  			ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1215  					      DMA_FROM_DEVICE, 0);
1216  			if (ret)
1217  				goto err_cleanup;
1218  
1219  			mapped_sg->dir = DMA_FROM_DEVICE;
1220  			mapped_sg->mapped = true;
1221  		} else {
1222  			mapped_sg->sgt.sgl = req->dst;
1223  			mapped_sg->sgt.orig_nents = dst_nents;
1224  			ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1225  					      DMA_FROM_DEVICE, 0);
1226  			if (ret)
1227  				goto err_cleanup;
1228  
1229  			mapped_sg->dir = DMA_FROM_DEVICE;
1230  			mapped_sg->mapped = true;
1231  
1232  			ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1233  				       0, 1, &split_size, &dst, &dst_nents,
1234  				       gfp_flags);
1235  			if (ret) {
1236  				dst_nents = mapped_sg->sgt.nents;
1237  				dst = mapped_sg->sgt.sgl;
1238  			} else {
1239  				mapped_sg->split_sg = dst;
1240  			}
1241  		}
1242  	}
1243  
1244  	rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1245  					     DMA_DEV_TO_MEM,
1246  					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1247  	if (!rxd->tx_in) {
1248  		dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1249  		ret = -EINVAL;
1250  		goto err_cleanup;
1251  	}
1252  
1253  	rxd->req = (void *)req->base;
1254  	rxd->enc = req->enc;
1255  	rxd->iv_idx = req->ctx->iv_idx;
1256  	rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1257  	rxd->tx_in->callback = req->callback;
1258  	rxd->tx_in->callback_param = rxd;
1259  
1260  	tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1261  					 src_nents, DMA_MEM_TO_DEV,
1262  					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1263  
1264  	if (!tx_out) {
1265  		dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1266  		ret = -EINVAL;
1267  		goto err_cleanup;
1268  	}
1269  
1270  	/*
1271  	 * Prepare metadata for DMA engine. This essentially describes the
1272  	 * crypto algorithm to be used, data sizes, different keys etc.
1273  	 */
1274  	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1275  
1276  	sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1277  				   sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1278  			   sa_ctx->epib);
1279  
1280  	ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1281  	dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1282  
1283  	dmaengine_submit(tx_out);
1284  	dmaengine_submit(rxd->tx_in);
1285  
1286  	dma_async_issue_pending(dma_rx);
1287  	dma_async_issue_pending(pdata->dma_tx);
1288  
1289  	return -EINPROGRESS;
1290  
1291  err_cleanup:
1292  	sa_free_sa_rx_data(rxd);
1293  
1294  	return ret;
1295  }
1296  
sa_cipher_run(struct skcipher_request * req,u8 * iv,int enc)1297  static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1298  {
1299  	struct sa_tfm_ctx *ctx =
1300  	    crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1301  	struct crypto_alg *alg = req->base.tfm->__crt_alg;
1302  	struct sa_req sa_req = { 0 };
1303  
1304  	if (!req->cryptlen)
1305  		return 0;
1306  
1307  	if (req->cryptlen % alg->cra_blocksize)
1308  		return -EINVAL;
1309  
1310  	/* Use SW fallback if the data size is not supported */
1311  	if (req->cryptlen > SA_MAX_DATA_SZ ||
1312  	    (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1313  	     req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1314  		struct skcipher_request *subreq = skcipher_request_ctx(req);
1315  
1316  		skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1317  		skcipher_request_set_callback(subreq, req->base.flags,
1318  					      req->base.complete,
1319  					      req->base.data);
1320  		skcipher_request_set_crypt(subreq, req->src, req->dst,
1321  					   req->cryptlen, req->iv);
1322  		if (enc)
1323  			return crypto_skcipher_encrypt(subreq);
1324  		else
1325  			return crypto_skcipher_decrypt(subreq);
1326  	}
1327  
1328  	sa_req.size = req->cryptlen;
1329  	sa_req.enc_size = req->cryptlen;
1330  	sa_req.src = req->src;
1331  	sa_req.dst = req->dst;
1332  	sa_req.enc_iv = iv;
1333  	sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1334  	sa_req.enc = enc;
1335  	sa_req.callback = sa_aes_dma_in_callback;
1336  	sa_req.mdata_size = 44;
1337  	sa_req.base = &req->base;
1338  	sa_req.ctx = ctx;
1339  
1340  	return sa_run(&sa_req);
1341  }
1342  
sa_encrypt(struct skcipher_request * req)1343  static int sa_encrypt(struct skcipher_request *req)
1344  {
1345  	return sa_cipher_run(req, req->iv, 1);
1346  }
1347  
sa_decrypt(struct skcipher_request * req)1348  static int sa_decrypt(struct skcipher_request *req)
1349  {
1350  	return sa_cipher_run(req, req->iv, 0);
1351  }
1352  
sa_sha_dma_in_callback(void * data)1353  static void sa_sha_dma_in_callback(void *data)
1354  {
1355  	struct sa_rx_data *rxd = data;
1356  	struct ahash_request *req;
1357  	struct crypto_ahash *tfm;
1358  	unsigned int authsize;
1359  	int i;
1360  	size_t ml, pl;
1361  	u32 *result;
1362  	__be32 *mdptr;
1363  
1364  	sa_sync_from_device(rxd);
1365  	req = container_of(rxd->req, struct ahash_request, base);
1366  	tfm = crypto_ahash_reqtfm(req);
1367  	authsize = crypto_ahash_digestsize(tfm);
1368  
1369  	mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1370  	result = (u32 *)req->result;
1371  
1372  	for (i = 0; i < (authsize / 4); i++)
1373  		result[i] = be32_to_cpu(mdptr[i + 4]);
1374  
1375  	sa_free_sa_rx_data(rxd);
1376  
1377  	ahash_request_complete(req, 0);
1378  }
1379  
zero_message_process(struct ahash_request * req)1380  static int zero_message_process(struct ahash_request *req)
1381  {
1382  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1383  	int sa_digest_size = crypto_ahash_digestsize(tfm);
1384  
1385  	switch (sa_digest_size) {
1386  	case SHA1_DIGEST_SIZE:
1387  		memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1388  		break;
1389  	case SHA256_DIGEST_SIZE:
1390  		memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1391  		break;
1392  	case SHA512_DIGEST_SIZE:
1393  		memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1394  		break;
1395  	default:
1396  		return -EINVAL;
1397  	}
1398  
1399  	return 0;
1400  }
1401  
sa_sha_run(struct ahash_request * req)1402  static int sa_sha_run(struct ahash_request *req)
1403  {
1404  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1405  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1406  	struct sa_req sa_req = { 0 };
1407  	size_t auth_len;
1408  
1409  	auth_len = req->nbytes;
1410  
1411  	if (!auth_len)
1412  		return zero_message_process(req);
1413  
1414  	if (auth_len > SA_MAX_DATA_SZ ||
1415  	    (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1416  	     auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1417  		struct ahash_request *subreq = &rctx->fallback_req;
1418  		int ret = 0;
1419  
1420  		ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1421  		subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1422  
1423  		crypto_ahash_init(subreq);
1424  
1425  		subreq->nbytes = auth_len;
1426  		subreq->src = req->src;
1427  		subreq->result = req->result;
1428  
1429  		ret |= crypto_ahash_update(subreq);
1430  
1431  		subreq->nbytes = 0;
1432  
1433  		ret |= crypto_ahash_final(subreq);
1434  
1435  		return ret;
1436  	}
1437  
1438  	sa_req.size = auth_len;
1439  	sa_req.auth_size = auth_len;
1440  	sa_req.src = req->src;
1441  	sa_req.dst = req->src;
1442  	sa_req.enc = true;
1443  	sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1444  	sa_req.callback = sa_sha_dma_in_callback;
1445  	sa_req.mdata_size = 28;
1446  	sa_req.ctx = ctx;
1447  	sa_req.base = &req->base;
1448  
1449  	return sa_run(&sa_req);
1450  }
1451  
sa_sha_setup(struct sa_tfm_ctx * ctx,struct algo_data * ad)1452  static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1453  {
1454  	int bs = crypto_shash_blocksize(ctx->shash);
1455  	int cmdl_len;
1456  	struct sa_cmdl_cfg cfg;
1457  
1458  	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1459  	ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1460  	ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1461  
1462  	memset(ctx->authkey, 0, bs);
1463  	memset(&cfg, 0, sizeof(cfg));
1464  	cfg.aalg = ad->aalg_id;
1465  	cfg.enc_eng_id = ad->enc_eng.eng_id;
1466  	cfg.auth_eng_id = ad->auth_eng.eng_id;
1467  	cfg.iv_size = 0;
1468  	cfg.akey = NULL;
1469  	cfg.akey_len = 0;
1470  
1471  	ctx->dev_data = dev_get_drvdata(sa_k3_dev);
1472  	/* Setup Encryption Security Context & Command label template */
1473  	if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
1474  		       ad, 0, &ctx->enc.epib[1]))
1475  		goto badkey;
1476  
1477  	cmdl_len = sa_format_cmdl_gen(&cfg,
1478  				      (u8 *)ctx->enc.cmdl,
1479  				      &ctx->enc.cmdl_upd_info);
1480  	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1481  		goto badkey;
1482  
1483  	ctx->enc.cmdl_size = cmdl_len;
1484  
1485  	return 0;
1486  
1487  badkey:
1488  	dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1489  	return -EINVAL;
1490  }
1491  
sa_sha_cra_init_alg(struct crypto_tfm * tfm,const char * alg_base)1492  static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1493  {
1494  	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1495  	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1496  	int ret;
1497  
1498  	memset(ctx, 0, sizeof(*ctx));
1499  	ctx->dev_data = data;
1500  	ret = sa_init_ctx_info(&ctx->enc, data);
1501  	if (ret)
1502  		return ret;
1503  
1504  	if (alg_base) {
1505  		ctx->shash = crypto_alloc_shash(alg_base, 0,
1506  						CRYPTO_ALG_NEED_FALLBACK);
1507  		if (IS_ERR(ctx->shash)) {
1508  			dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1509  				alg_base);
1510  			return PTR_ERR(ctx->shash);
1511  		}
1512  		/* for fallback */
1513  		ctx->fallback.ahash =
1514  			crypto_alloc_ahash(alg_base, 0,
1515  					   CRYPTO_ALG_NEED_FALLBACK);
1516  		if (IS_ERR(ctx->fallback.ahash)) {
1517  			dev_err(ctx->dev_data->dev,
1518  				"Could not load fallback driver\n");
1519  			return PTR_ERR(ctx->fallback.ahash);
1520  		}
1521  	}
1522  
1523  	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1524  		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1525  		ctx->dec.sc_id, &ctx->dec.sc_phys);
1526  
1527  	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1528  				 sizeof(struct sa_sha_req_ctx) +
1529  				 crypto_ahash_reqsize(ctx->fallback.ahash));
1530  
1531  	return 0;
1532  }
1533  
sa_sha_digest(struct ahash_request * req)1534  static int sa_sha_digest(struct ahash_request *req)
1535  {
1536  	return sa_sha_run(req);
1537  }
1538  
sa_sha_init(struct ahash_request * req)1539  static int sa_sha_init(struct ahash_request *req)
1540  {
1541  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1542  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1543  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1544  
1545  	dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1546  		crypto_ahash_digestsize(tfm), rctx);
1547  
1548  	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1549  	rctx->fallback_req.base.flags =
1550  		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1551  
1552  	return crypto_ahash_init(&rctx->fallback_req);
1553  }
1554  
sa_sha_update(struct ahash_request * req)1555  static int sa_sha_update(struct ahash_request *req)
1556  {
1557  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1559  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1560  
1561  	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1562  	rctx->fallback_req.base.flags =
1563  		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1564  	rctx->fallback_req.nbytes = req->nbytes;
1565  	rctx->fallback_req.src = req->src;
1566  
1567  	return crypto_ahash_update(&rctx->fallback_req);
1568  }
1569  
sa_sha_final(struct ahash_request * req)1570  static int sa_sha_final(struct ahash_request *req)
1571  {
1572  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1573  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1574  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1575  
1576  	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1577  	rctx->fallback_req.base.flags =
1578  		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1579  	rctx->fallback_req.result = req->result;
1580  
1581  	return crypto_ahash_final(&rctx->fallback_req);
1582  }
1583  
sa_sha_finup(struct ahash_request * req)1584  static int sa_sha_finup(struct ahash_request *req)
1585  {
1586  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1587  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1588  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1589  
1590  	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1591  	rctx->fallback_req.base.flags =
1592  		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1593  
1594  	rctx->fallback_req.nbytes = req->nbytes;
1595  	rctx->fallback_req.src = req->src;
1596  	rctx->fallback_req.result = req->result;
1597  
1598  	return crypto_ahash_finup(&rctx->fallback_req);
1599  }
1600  
sa_sha_import(struct ahash_request * req,const void * in)1601  static int sa_sha_import(struct ahash_request *req, const void *in)
1602  {
1603  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1604  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1605  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1606  
1607  	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1608  	rctx->fallback_req.base.flags = req->base.flags &
1609  		CRYPTO_TFM_REQ_MAY_SLEEP;
1610  
1611  	return crypto_ahash_import(&rctx->fallback_req, in);
1612  }
1613  
sa_sha_export(struct ahash_request * req,void * out)1614  static int sa_sha_export(struct ahash_request *req, void *out)
1615  {
1616  	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1617  	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1618  	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1619  	struct ahash_request *subreq = &rctx->fallback_req;
1620  
1621  	ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1622  	subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1623  
1624  	return crypto_ahash_export(subreq, out);
1625  }
1626  
sa_sha1_cra_init(struct crypto_tfm * tfm)1627  static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1628  {
1629  	struct algo_data ad = { 0 };
1630  	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1631  
1632  	sa_sha_cra_init_alg(tfm, "sha1");
1633  
1634  	ad.aalg_id = SA_AALG_ID_SHA1;
1635  	ad.hash_size = SHA1_DIGEST_SIZE;
1636  	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1637  
1638  	sa_sha_setup(ctx, &ad);
1639  
1640  	return 0;
1641  }
1642  
sa_sha256_cra_init(struct crypto_tfm * tfm)1643  static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1644  {
1645  	struct algo_data ad = { 0 };
1646  	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1647  
1648  	sa_sha_cra_init_alg(tfm, "sha256");
1649  
1650  	ad.aalg_id = SA_AALG_ID_SHA2_256;
1651  	ad.hash_size = SHA256_DIGEST_SIZE;
1652  	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1653  
1654  	sa_sha_setup(ctx, &ad);
1655  
1656  	return 0;
1657  }
1658  
sa_sha512_cra_init(struct crypto_tfm * tfm)1659  static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1660  {
1661  	struct algo_data ad = { 0 };
1662  	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1663  
1664  	sa_sha_cra_init_alg(tfm, "sha512");
1665  
1666  	ad.aalg_id = SA_AALG_ID_SHA2_512;
1667  	ad.hash_size = SHA512_DIGEST_SIZE;
1668  	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1669  
1670  	sa_sha_setup(ctx, &ad);
1671  
1672  	return 0;
1673  }
1674  
sa_sha_cra_exit(struct crypto_tfm * tfm)1675  static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1676  {
1677  	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1678  	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1679  
1680  	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1681  		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1682  		ctx->dec.sc_id, &ctx->dec.sc_phys);
1683  
1684  	if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1685  		sa_free_ctx_info(&ctx->enc, data);
1686  
1687  	crypto_free_shash(ctx->shash);
1688  	crypto_free_ahash(ctx->fallback.ahash);
1689  }
1690  
sa_aead_dma_in_callback(void * data)1691  static void sa_aead_dma_in_callback(void *data)
1692  {
1693  	struct sa_rx_data *rxd = data;
1694  	struct aead_request *req;
1695  	struct crypto_aead *tfm;
1696  	unsigned int start;
1697  	unsigned int authsize;
1698  	u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1699  	size_t pl, ml;
1700  	int i;
1701  	int err = 0;
1702  	u32 *mdptr;
1703  
1704  	sa_sync_from_device(rxd);
1705  	req = container_of(rxd->req, struct aead_request, base);
1706  	tfm = crypto_aead_reqtfm(req);
1707  	start = req->assoclen + req->cryptlen;
1708  	authsize = crypto_aead_authsize(tfm);
1709  
1710  	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1711  	for (i = 0; i < (authsize / 4); i++)
1712  		mdptr[i + 4] = swab32(mdptr[i + 4]);
1713  
1714  	if (rxd->enc) {
1715  		scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1716  					 1);
1717  	} else {
1718  		start -= authsize;
1719  		scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1720  					 0);
1721  
1722  		err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1723  	}
1724  
1725  	sa_free_sa_rx_data(rxd);
1726  
1727  	aead_request_complete(req, err);
1728  }
1729  
sa_cra_init_aead(struct crypto_aead * tfm,const char * hash,const char * fallback)1730  static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1731  			    const char *fallback)
1732  {
1733  	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1734  	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1735  	int ret;
1736  
1737  	memzero_explicit(ctx, sizeof(*ctx));
1738  	ctx->dev_data = data;
1739  
1740  	ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1741  	if (IS_ERR(ctx->shash)) {
1742  		dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1743  		return PTR_ERR(ctx->shash);
1744  	}
1745  
1746  	ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1747  					       CRYPTO_ALG_NEED_FALLBACK);
1748  
1749  	if (IS_ERR(ctx->fallback.aead)) {
1750  		dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1751  			fallback);
1752  		return PTR_ERR(ctx->fallback.aead);
1753  	}
1754  
1755  	crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1756  				crypto_aead_reqsize(ctx->fallback.aead));
1757  
1758  	ret = sa_init_ctx_info(&ctx->enc, data);
1759  	if (ret)
1760  		return ret;
1761  
1762  	ret = sa_init_ctx_info(&ctx->dec, data);
1763  	if (ret) {
1764  		sa_free_ctx_info(&ctx->enc, data);
1765  		return ret;
1766  	}
1767  
1768  	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1769  		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1770  		ctx->dec.sc_id, &ctx->dec.sc_phys);
1771  
1772  	return ret;
1773  }
1774  
sa_cra_init_aead_sha1(struct crypto_aead * tfm)1775  static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1776  {
1777  	return sa_cra_init_aead(tfm, "sha1",
1778  				"authenc(hmac(sha1-ce),cbc(aes-ce))");
1779  }
1780  
sa_cra_init_aead_sha256(struct crypto_aead * tfm)1781  static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1782  {
1783  	return sa_cra_init_aead(tfm, "sha256",
1784  				"authenc(hmac(sha256-ce),cbc(aes-ce))");
1785  }
1786  
sa_exit_tfm_aead(struct crypto_aead * tfm)1787  static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1788  {
1789  	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1790  	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1791  
1792  	crypto_free_shash(ctx->shash);
1793  	crypto_free_aead(ctx->fallback.aead);
1794  
1795  	sa_free_ctx_info(&ctx->enc, data);
1796  	sa_free_ctx_info(&ctx->dec, data);
1797  }
1798  
1799  /* AEAD algorithm configuration interface function */
sa_aead_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen,struct algo_data * ad)1800  static int sa_aead_setkey(struct crypto_aead *authenc,
1801  			  const u8 *key, unsigned int keylen,
1802  			  struct algo_data *ad)
1803  {
1804  	struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1805  	struct crypto_authenc_keys keys;
1806  	int cmdl_len;
1807  	struct sa_cmdl_cfg cfg;
1808  	int key_idx;
1809  
1810  	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1811  		return -EINVAL;
1812  
1813  	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
1814  	key_idx = (keys.enckeylen >> 3) - 2;
1815  	if (key_idx >= 3)
1816  		return -EINVAL;
1817  
1818  	ad->ctx = ctx;
1819  	ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1820  	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1821  	ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1822  	ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1823  	ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1824  	ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1825  	ad->inv_key = true;
1826  	ad->keyed_mac = true;
1827  	ad->ealg_id = SA_EALG_ID_AES_CBC;
1828  	ad->prep_iopad = sa_prepare_iopads;
1829  
1830  	memset(&cfg, 0, sizeof(cfg));
1831  	cfg.enc = true;
1832  	cfg.aalg = ad->aalg_id;
1833  	cfg.enc_eng_id = ad->enc_eng.eng_id;
1834  	cfg.auth_eng_id = ad->auth_eng.eng_id;
1835  	cfg.iv_size = crypto_aead_ivsize(authenc);
1836  	cfg.akey = keys.authkey;
1837  	cfg.akey_len = keys.authkeylen;
1838  
1839  	/* Setup Encryption Security Context & Command label template */
1840  	if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
1841  		       keys.enckeylen, keys.authkey, keys.authkeylen,
1842  		       ad, 1, &ctx->enc.epib[1]))
1843  		return -EINVAL;
1844  
1845  	cmdl_len = sa_format_cmdl_gen(&cfg,
1846  				      (u8 *)ctx->enc.cmdl,
1847  				      &ctx->enc.cmdl_upd_info);
1848  	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1849  		return -EINVAL;
1850  
1851  	ctx->enc.cmdl_size = cmdl_len;
1852  
1853  	/* Setup Decryption Security Context & Command label template */
1854  	if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
1855  		       keys.enckeylen, keys.authkey, keys.authkeylen,
1856  		       ad, 0, &ctx->dec.epib[1]))
1857  		return -EINVAL;
1858  
1859  	cfg.enc = false;
1860  	cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1861  				      &ctx->dec.cmdl_upd_info);
1862  
1863  	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1864  		return -EINVAL;
1865  
1866  	ctx->dec.cmdl_size = cmdl_len;
1867  
1868  	crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1869  	crypto_aead_set_flags(ctx->fallback.aead,
1870  			      crypto_aead_get_flags(authenc) &
1871  			      CRYPTO_TFM_REQ_MASK);
1872  
1873  	return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1874  }
1875  
sa_aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1876  static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1877  {
1878  	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1879  
1880  	return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1881  }
1882  
sa_aead_cbc_sha1_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)1883  static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1884  				   const u8 *key, unsigned int keylen)
1885  {
1886  	struct algo_data ad = { 0 };
1887  
1888  	ad.ealg_id = SA_EALG_ID_AES_CBC;
1889  	ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1890  	ad.hash_size = SHA1_DIGEST_SIZE;
1891  	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1892  
1893  	return sa_aead_setkey(authenc, key, keylen, &ad);
1894  }
1895  
sa_aead_cbc_sha256_setkey(struct crypto_aead * authenc,const u8 * key,unsigned int keylen)1896  static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1897  				     const u8 *key, unsigned int keylen)
1898  {
1899  	struct algo_data ad = { 0 };
1900  
1901  	ad.ealg_id = SA_EALG_ID_AES_CBC;
1902  	ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1903  	ad.hash_size = SHA256_DIGEST_SIZE;
1904  	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1905  
1906  	return sa_aead_setkey(authenc, key, keylen, &ad);
1907  }
1908  
sa_aead_run(struct aead_request * req,u8 * iv,int enc)1909  static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1910  {
1911  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912  	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1913  	struct sa_req sa_req = { 0 };
1914  	size_t auth_size, enc_size;
1915  
1916  	enc_size = req->cryptlen;
1917  	auth_size = req->assoclen + req->cryptlen;
1918  
1919  	if (!enc) {
1920  		enc_size -= crypto_aead_authsize(tfm);
1921  		auth_size -= crypto_aead_authsize(tfm);
1922  	}
1923  
1924  	if (auth_size > SA_MAX_DATA_SZ ||
1925  	    (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1926  	     auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1927  		struct aead_request *subreq = aead_request_ctx(req);
1928  		int ret;
1929  
1930  		aead_request_set_tfm(subreq, ctx->fallback.aead);
1931  		aead_request_set_callback(subreq, req->base.flags,
1932  					  req->base.complete, req->base.data);
1933  		aead_request_set_crypt(subreq, req->src, req->dst,
1934  				       req->cryptlen, req->iv);
1935  		aead_request_set_ad(subreq, req->assoclen);
1936  
1937  		ret = enc ? crypto_aead_encrypt(subreq) :
1938  			crypto_aead_decrypt(subreq);
1939  		return ret;
1940  	}
1941  
1942  	sa_req.enc_offset = req->assoclen;
1943  	sa_req.enc_size = enc_size;
1944  	sa_req.auth_size = auth_size;
1945  	sa_req.size = auth_size;
1946  	sa_req.enc_iv = iv;
1947  	sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1948  	sa_req.enc = enc;
1949  	sa_req.callback = sa_aead_dma_in_callback;
1950  	sa_req.mdata_size = 52;
1951  	sa_req.base = &req->base;
1952  	sa_req.ctx = ctx;
1953  	sa_req.src = req->src;
1954  	sa_req.dst = req->dst;
1955  
1956  	return sa_run(&sa_req);
1957  }
1958  
1959  /* AEAD algorithm encrypt interface function */
sa_aead_encrypt(struct aead_request * req)1960  static int sa_aead_encrypt(struct aead_request *req)
1961  {
1962  	return sa_aead_run(req, req->iv, 1);
1963  }
1964  
1965  /* AEAD algorithm decrypt interface function */
sa_aead_decrypt(struct aead_request * req)1966  static int sa_aead_decrypt(struct aead_request *req)
1967  {
1968  	return sa_aead_run(req, req->iv, 0);
1969  }
1970  
1971  static struct sa_alg_tmpl sa_algs[] = {
1972  	[SA_ALG_CBC_AES] = {
1973  		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1974  		.alg.skcipher = {
1975  			.base.cra_name		= "cbc(aes)",
1976  			.base.cra_driver_name	= "cbc-aes-sa2ul",
1977  			.base.cra_priority	= 30000,
1978  			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
1979  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
1980  						  CRYPTO_ALG_ASYNC |
1981  						  CRYPTO_ALG_NEED_FALLBACK,
1982  			.base.cra_blocksize	= AES_BLOCK_SIZE,
1983  			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
1984  			.base.cra_module	= THIS_MODULE,
1985  			.init			= sa_cipher_cra_init,
1986  			.exit			= sa_cipher_cra_exit,
1987  			.min_keysize		= AES_MIN_KEY_SIZE,
1988  			.max_keysize		= AES_MAX_KEY_SIZE,
1989  			.ivsize			= AES_BLOCK_SIZE,
1990  			.setkey			= sa_aes_cbc_setkey,
1991  			.encrypt		= sa_encrypt,
1992  			.decrypt		= sa_decrypt,
1993  		}
1994  	},
1995  	[SA_ALG_EBC_AES] = {
1996  		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1997  		.alg.skcipher = {
1998  			.base.cra_name		= "ecb(aes)",
1999  			.base.cra_driver_name	= "ecb-aes-sa2ul",
2000  			.base.cra_priority	= 30000,
2001  			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2002  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2003  						  CRYPTO_ALG_ASYNC |
2004  						  CRYPTO_ALG_NEED_FALLBACK,
2005  			.base.cra_blocksize	= AES_BLOCK_SIZE,
2006  			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2007  			.base.cra_module	= THIS_MODULE,
2008  			.init			= sa_cipher_cra_init,
2009  			.exit			= sa_cipher_cra_exit,
2010  			.min_keysize		= AES_MIN_KEY_SIZE,
2011  			.max_keysize		= AES_MAX_KEY_SIZE,
2012  			.setkey			= sa_aes_ecb_setkey,
2013  			.encrypt		= sa_encrypt,
2014  			.decrypt		= sa_decrypt,
2015  		}
2016  	},
2017  	[SA_ALG_CBC_DES3] = {
2018  		.type = CRYPTO_ALG_TYPE_SKCIPHER,
2019  		.alg.skcipher = {
2020  			.base.cra_name		= "cbc(des3_ede)",
2021  			.base.cra_driver_name	= "cbc-des3-sa2ul",
2022  			.base.cra_priority	= 30000,
2023  			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2024  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2025  						  CRYPTO_ALG_ASYNC |
2026  						  CRYPTO_ALG_NEED_FALLBACK,
2027  			.base.cra_blocksize	= DES_BLOCK_SIZE,
2028  			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2029  			.base.cra_module	= THIS_MODULE,
2030  			.init			= sa_cipher_cra_init,
2031  			.exit			= sa_cipher_cra_exit,
2032  			.min_keysize		= 3 * DES_KEY_SIZE,
2033  			.max_keysize		= 3 * DES_KEY_SIZE,
2034  			.ivsize			= DES_BLOCK_SIZE,
2035  			.setkey			= sa_3des_cbc_setkey,
2036  			.encrypt		= sa_encrypt,
2037  			.decrypt		= sa_decrypt,
2038  		}
2039  	},
2040  	[SA_ALG_ECB_DES3] = {
2041  		.type = CRYPTO_ALG_TYPE_SKCIPHER,
2042  		.alg.skcipher = {
2043  			.base.cra_name		= "ecb(des3_ede)",
2044  			.base.cra_driver_name	= "ecb-des3-sa2ul",
2045  			.base.cra_priority	= 30000,
2046  			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2047  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2048  						  CRYPTO_ALG_ASYNC |
2049  						  CRYPTO_ALG_NEED_FALLBACK,
2050  			.base.cra_blocksize	= DES_BLOCK_SIZE,
2051  			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2052  			.base.cra_module	= THIS_MODULE,
2053  			.init			= sa_cipher_cra_init,
2054  			.exit			= sa_cipher_cra_exit,
2055  			.min_keysize		= 3 * DES_KEY_SIZE,
2056  			.max_keysize		= 3 * DES_KEY_SIZE,
2057  			.setkey			= sa_3des_ecb_setkey,
2058  			.encrypt		= sa_encrypt,
2059  			.decrypt		= sa_decrypt,
2060  		}
2061  	},
2062  	[SA_ALG_SHA1] = {
2063  		.type = CRYPTO_ALG_TYPE_AHASH,
2064  		.alg.ahash = {
2065  			.halg.base = {
2066  				.cra_name	= "sha1",
2067  				.cra_driver_name	= "sha1-sa2ul",
2068  				.cra_priority	= 400,
2069  				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2070  						  CRYPTO_ALG_ASYNC |
2071  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2072  						  CRYPTO_ALG_NEED_FALLBACK,
2073  				.cra_blocksize	= SHA1_BLOCK_SIZE,
2074  				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2075  				.cra_module	= THIS_MODULE,
2076  				.cra_init	= sa_sha1_cra_init,
2077  				.cra_exit	= sa_sha_cra_exit,
2078  			},
2079  			.halg.digestsize	= SHA1_DIGEST_SIZE,
2080  			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2081  						  sizeof(struct sha1_state),
2082  			.init			= sa_sha_init,
2083  			.update			= sa_sha_update,
2084  			.final			= sa_sha_final,
2085  			.finup			= sa_sha_finup,
2086  			.digest			= sa_sha_digest,
2087  			.export			= sa_sha_export,
2088  			.import			= sa_sha_import,
2089  		},
2090  	},
2091  	[SA_ALG_SHA256] = {
2092  		.type = CRYPTO_ALG_TYPE_AHASH,
2093  		.alg.ahash = {
2094  			.halg.base = {
2095  				.cra_name	= "sha256",
2096  				.cra_driver_name	= "sha256-sa2ul",
2097  				.cra_priority	= 400,
2098  				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2099  						  CRYPTO_ALG_ASYNC |
2100  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2101  						  CRYPTO_ALG_NEED_FALLBACK,
2102  				.cra_blocksize	= SHA256_BLOCK_SIZE,
2103  				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2104  				.cra_module	= THIS_MODULE,
2105  				.cra_init	= sa_sha256_cra_init,
2106  				.cra_exit	= sa_sha_cra_exit,
2107  			},
2108  			.halg.digestsize	= SHA256_DIGEST_SIZE,
2109  			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2110  						  sizeof(struct sha256_state),
2111  			.init			= sa_sha_init,
2112  			.update			= sa_sha_update,
2113  			.final			= sa_sha_final,
2114  			.finup			= sa_sha_finup,
2115  			.digest			= sa_sha_digest,
2116  			.export			= sa_sha_export,
2117  			.import			= sa_sha_import,
2118  		},
2119  	},
2120  	[SA_ALG_SHA512] = {
2121  		.type = CRYPTO_ALG_TYPE_AHASH,
2122  		.alg.ahash = {
2123  			.halg.base = {
2124  				.cra_name	= "sha512",
2125  				.cra_driver_name	= "sha512-sa2ul",
2126  				.cra_priority	= 400,
2127  				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2128  						  CRYPTO_ALG_ASYNC |
2129  						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2130  						  CRYPTO_ALG_NEED_FALLBACK,
2131  				.cra_blocksize	= SHA512_BLOCK_SIZE,
2132  				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2133  				.cra_module	= THIS_MODULE,
2134  				.cra_init	= sa_sha512_cra_init,
2135  				.cra_exit	= sa_sha_cra_exit,
2136  			},
2137  			.halg.digestsize	= SHA512_DIGEST_SIZE,
2138  			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2139  						  sizeof(struct sha512_state),
2140  			.init			= sa_sha_init,
2141  			.update			= sa_sha_update,
2142  			.final			= sa_sha_final,
2143  			.finup			= sa_sha_finup,
2144  			.digest			= sa_sha_digest,
2145  			.export			= sa_sha_export,
2146  			.import			= sa_sha_import,
2147  		},
2148  	},
2149  	[SA_ALG_AUTHENC_SHA1_AES] = {
2150  		.type	= CRYPTO_ALG_TYPE_AEAD,
2151  		.alg.aead = {
2152  			.base = {
2153  				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2154  				.cra_driver_name =
2155  					"authenc(hmac(sha1),cbc(aes))-sa2ul",
2156  				.cra_blocksize = AES_BLOCK_SIZE,
2157  				.cra_flags = CRYPTO_ALG_TYPE_AEAD |
2158  					CRYPTO_ALG_KERN_DRIVER_ONLY |
2159  					CRYPTO_ALG_ASYNC |
2160  					CRYPTO_ALG_NEED_FALLBACK,
2161  				.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2162  				.cra_module = THIS_MODULE,
2163  				.cra_priority = 3000,
2164  			},
2165  			.ivsize = AES_BLOCK_SIZE,
2166  			.maxauthsize = SHA1_DIGEST_SIZE,
2167  
2168  			.init = sa_cra_init_aead_sha1,
2169  			.exit = sa_exit_tfm_aead,
2170  			.setkey = sa_aead_cbc_sha1_setkey,
2171  			.setauthsize = sa_aead_setauthsize,
2172  			.encrypt = sa_aead_encrypt,
2173  			.decrypt = sa_aead_decrypt,
2174  		},
2175  	},
2176  	[SA_ALG_AUTHENC_SHA256_AES] = {
2177  		.type	= CRYPTO_ALG_TYPE_AEAD,
2178  		.alg.aead = {
2179  			.base = {
2180  				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2181  				.cra_driver_name =
2182  					"authenc(hmac(sha256),cbc(aes))-sa2ul",
2183  				.cra_blocksize = AES_BLOCK_SIZE,
2184  				.cra_flags = CRYPTO_ALG_TYPE_AEAD |
2185  					CRYPTO_ALG_KERN_DRIVER_ONLY |
2186  					CRYPTO_ALG_ASYNC |
2187  					CRYPTO_ALG_NEED_FALLBACK,
2188  				.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2189  				.cra_module = THIS_MODULE,
2190  				.cra_alignmask = 0,
2191  				.cra_priority = 3000,
2192  			},
2193  			.ivsize = AES_BLOCK_SIZE,
2194  			.maxauthsize = SHA256_DIGEST_SIZE,
2195  
2196  			.init = sa_cra_init_aead_sha256,
2197  			.exit = sa_exit_tfm_aead,
2198  			.setkey = sa_aead_cbc_sha256_setkey,
2199  			.setauthsize = sa_aead_setauthsize,
2200  			.encrypt = sa_aead_encrypt,
2201  			.decrypt = sa_aead_decrypt,
2202  		},
2203  	},
2204  };
2205  
2206  /* Register the algorithms in crypto framework */
sa_register_algos(struct sa_crypto_data * dev_data)2207  static void sa_register_algos(struct sa_crypto_data *dev_data)
2208  {
2209  	const struct sa_match_data *match_data = dev_data->match_data;
2210  	struct device *dev = dev_data->dev;
2211  	char *alg_name;
2212  	u32 type;
2213  	int i, err;
2214  
2215  	for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2216  		/* Skip unsupported algos */
2217  		if (!(match_data->supported_algos & BIT(i)))
2218  			continue;
2219  
2220  		type = sa_algs[i].type;
2221  		if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2222  			alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2223  			err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2224  		} else if (type == CRYPTO_ALG_TYPE_AHASH) {
2225  			alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2226  			err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2227  		} else if (type == CRYPTO_ALG_TYPE_AEAD) {
2228  			alg_name = sa_algs[i].alg.aead.base.cra_name;
2229  			err = crypto_register_aead(&sa_algs[i].alg.aead);
2230  		} else {
2231  			dev_err(dev,
2232  				"un-supported crypto algorithm (%d)",
2233  				sa_algs[i].type);
2234  			continue;
2235  		}
2236  
2237  		if (err)
2238  			dev_err(dev, "Failed to register '%s'\n", alg_name);
2239  		else
2240  			sa_algs[i].registered = true;
2241  	}
2242  }
2243  
2244  /* Unregister the algorithms in crypto framework */
sa_unregister_algos(const struct device * dev)2245  static void sa_unregister_algos(const struct device *dev)
2246  {
2247  	u32 type;
2248  	int i;
2249  
2250  	for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2251  		type = sa_algs[i].type;
2252  		if (!sa_algs[i].registered)
2253  			continue;
2254  		if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2255  			crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2256  		else if (type == CRYPTO_ALG_TYPE_AHASH)
2257  			crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2258  		else if (type == CRYPTO_ALG_TYPE_AEAD)
2259  			crypto_unregister_aead(&sa_algs[i].alg.aead);
2260  
2261  		sa_algs[i].registered = false;
2262  	}
2263  }
2264  
sa_init_mem(struct sa_crypto_data * dev_data)2265  static int sa_init_mem(struct sa_crypto_data *dev_data)
2266  {
2267  	struct device *dev = &dev_data->pdev->dev;
2268  	/* Setup dma pool for security context buffers */
2269  	dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2270  					    SA_CTX_MAX_SZ, 64, 0);
2271  	if (!dev_data->sc_pool) {
2272  		dev_err(dev, "Failed to create dma pool");
2273  		return -ENOMEM;
2274  	}
2275  
2276  	return 0;
2277  }
2278  
sa_dma_init(struct sa_crypto_data * dd)2279  static int sa_dma_init(struct sa_crypto_data *dd)
2280  {
2281  	int ret;
2282  	struct dma_slave_config cfg;
2283  
2284  	dd->dma_rx1 = NULL;
2285  	dd->dma_tx = NULL;
2286  	dd->dma_rx2 = NULL;
2287  
2288  	ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2289  	if (ret)
2290  		return ret;
2291  
2292  	dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2293  	if (IS_ERR(dd->dma_rx1))
2294  		return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2295  				     "Unable to request rx1 DMA channel\n");
2296  
2297  	dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2298  	if (IS_ERR(dd->dma_rx2)) {
2299  		ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2300  				    "Unable to request rx2 DMA channel\n");
2301  		goto err_dma_rx2;
2302  	}
2303  
2304  	dd->dma_tx = dma_request_chan(dd->dev, "tx");
2305  	if (IS_ERR(dd->dma_tx)) {
2306  		ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2307  				    "Unable to request tx DMA channel\n");
2308  		goto err_dma_tx;
2309  	}
2310  
2311  	memzero_explicit(&cfg, sizeof(cfg));
2312  
2313  	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2314  	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2315  	cfg.src_maxburst = 4;
2316  	cfg.dst_maxburst = 4;
2317  
2318  	ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2319  	if (ret) {
2320  		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2321  			ret);
2322  		goto err_dma_config;
2323  	}
2324  
2325  	ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2326  	if (ret) {
2327  		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2328  			ret);
2329  		goto err_dma_config;
2330  	}
2331  
2332  	ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2333  	if (ret) {
2334  		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2335  			ret);
2336  		goto err_dma_config;
2337  	}
2338  
2339  	return 0;
2340  
2341  err_dma_config:
2342  	dma_release_channel(dd->dma_tx);
2343  err_dma_tx:
2344  	dma_release_channel(dd->dma_rx2);
2345  err_dma_rx2:
2346  	dma_release_channel(dd->dma_rx1);
2347  
2348  	return ret;
2349  }
2350  
sa_link_child(struct device * dev,void * data)2351  static int sa_link_child(struct device *dev, void *data)
2352  {
2353  	struct device *parent = data;
2354  
2355  	device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2356  
2357  	return 0;
2358  }
2359  
2360  static struct sa_match_data am654_match_data = {
2361  	.priv = 1,
2362  	.priv_id = 1,
2363  	.supported_algos = BIT(SA_ALG_CBC_AES) |
2364  			   BIT(SA_ALG_EBC_AES) |
2365  			   BIT(SA_ALG_CBC_DES3) |
2366  			   BIT(SA_ALG_ECB_DES3) |
2367  			   BIT(SA_ALG_SHA1) |
2368  			   BIT(SA_ALG_SHA256) |
2369  			   BIT(SA_ALG_SHA512) |
2370  			   BIT(SA_ALG_AUTHENC_SHA1_AES) |
2371  			   BIT(SA_ALG_AUTHENC_SHA256_AES),
2372  };
2373  
2374  static struct sa_match_data am64_match_data = {
2375  	.priv = 0,
2376  	.priv_id = 0,
2377  	.supported_algos = BIT(SA_ALG_CBC_AES) |
2378  			   BIT(SA_ALG_EBC_AES) |
2379  			   BIT(SA_ALG_SHA256) |
2380  			   BIT(SA_ALG_SHA512) |
2381  			   BIT(SA_ALG_AUTHENC_SHA256_AES),
2382  };
2383  
2384  static const struct of_device_id of_match[] = {
2385  	{ .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2386  	{ .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2387  	{ .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2388  	{ .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2389  	{},
2390  };
2391  MODULE_DEVICE_TABLE(of, of_match);
2392  
sa_ul_probe(struct platform_device * pdev)2393  static int sa_ul_probe(struct platform_device *pdev)
2394  {
2395  	struct device *dev = &pdev->dev;
2396  	struct device_node *node = dev->of_node;
2397  	static void __iomem *saul_base;
2398  	struct sa_crypto_data *dev_data;
2399  	u32 status, val;
2400  	int ret;
2401  
2402  	dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2403  	if (!dev_data)
2404  		return -ENOMEM;
2405  
2406  	dev_data->match_data = of_device_get_match_data(dev);
2407  	if (!dev_data->match_data)
2408  		return -ENODEV;
2409  
2410  	saul_base = devm_platform_ioremap_resource(pdev, 0);
2411  	if (IS_ERR(saul_base))
2412  		return PTR_ERR(saul_base);
2413  
2414  	sa_k3_dev = dev;
2415  	dev_data->dev = dev;
2416  	dev_data->pdev = pdev;
2417  	dev_data->base = saul_base;
2418  	platform_set_drvdata(pdev, dev_data);
2419  	dev_set_drvdata(sa_k3_dev, dev_data);
2420  
2421  	pm_runtime_enable(dev);
2422  	ret = pm_runtime_resume_and_get(dev);
2423  	if (ret < 0) {
2424  		dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2425  		pm_runtime_disable(dev);
2426  		return ret;
2427  	}
2428  
2429  	sa_init_mem(dev_data);
2430  	ret = sa_dma_init(dev_data);
2431  	if (ret)
2432  		goto destroy_dma_pool;
2433  
2434  	spin_lock_init(&dev_data->scid_lock);
2435  
2436  	val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2437  	      SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2438  	      SA_EEC_TRNG_EN;
2439  	status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2440  	/* Only enable engines if all are not already enabled */
2441  	if (val & ~status)
2442  		writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2443  
2444  	sa_register_algos(dev_data);
2445  
2446  	ret = of_platform_populate(node, NULL, NULL, dev);
2447  	if (ret)
2448  		goto release_dma;
2449  
2450  	device_for_each_child(dev, dev, sa_link_child);
2451  
2452  	return 0;
2453  
2454  release_dma:
2455  	sa_unregister_algos(dev);
2456  
2457  	dma_release_channel(dev_data->dma_rx2);
2458  	dma_release_channel(dev_data->dma_rx1);
2459  	dma_release_channel(dev_data->dma_tx);
2460  
2461  destroy_dma_pool:
2462  	dma_pool_destroy(dev_data->sc_pool);
2463  
2464  	pm_runtime_put_sync(dev);
2465  	pm_runtime_disable(dev);
2466  
2467  	return ret;
2468  }
2469  
sa_ul_remove(struct platform_device * pdev)2470  static int sa_ul_remove(struct platform_device *pdev)
2471  {
2472  	struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2473  
2474  	of_platform_depopulate(&pdev->dev);
2475  
2476  	sa_unregister_algos(&pdev->dev);
2477  
2478  	dma_release_channel(dev_data->dma_rx2);
2479  	dma_release_channel(dev_data->dma_rx1);
2480  	dma_release_channel(dev_data->dma_tx);
2481  
2482  	dma_pool_destroy(dev_data->sc_pool);
2483  
2484  	platform_set_drvdata(pdev, NULL);
2485  
2486  	pm_runtime_put_sync(&pdev->dev);
2487  	pm_runtime_disable(&pdev->dev);
2488  
2489  	return 0;
2490  }
2491  
2492  static struct platform_driver sa_ul_driver = {
2493  	.probe = sa_ul_probe,
2494  	.remove = sa_ul_remove,
2495  	.driver = {
2496  		   .name = "saul-crypto",
2497  		   .of_match_table = of_match,
2498  		   },
2499  };
2500  module_platform_driver(sa_ul_driver);
2501  MODULE_LICENSE("GPL v2");
2502