xref: /openbmc/linux/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Intel IXP4xx NPE-C crypto driver
4   *
5   * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6   */
7  
8  #include <linux/platform_device.h>
9  #include <linux/dma-mapping.h>
10  #include <linux/dmapool.h>
11  #include <linux/crypto.h>
12  #include <linux/kernel.h>
13  #include <linux/rtnetlink.h>
14  #include <linux/interrupt.h>
15  #include <linux/spinlock.h>
16  #include <linux/gfp.h>
17  #include <linux/module.h>
18  #include <linux/of.h>
19  
20  #include <crypto/ctr.h>
21  #include <crypto/internal/des.h>
22  #include <crypto/aes.h>
23  #include <crypto/hmac.h>
24  #include <crypto/sha1.h>
25  #include <crypto/algapi.h>
26  #include <crypto/internal/aead.h>
27  #include <crypto/internal/skcipher.h>
28  #include <crypto/authenc.h>
29  #include <crypto/scatterwalk.h>
30  
31  #include <linux/soc/ixp4xx/npe.h>
32  #include <linux/soc/ixp4xx/qmgr.h>
33  
34  /* Intermittent includes, delete this after v5.14-rc1 */
35  #include <linux/soc/ixp4xx/cpu.h>
36  
37  #define MAX_KEYLEN 32
38  
39  /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
40  #define NPE_CTX_LEN 80
41  #define AES_BLOCK128 16
42  
43  #define NPE_OP_HASH_VERIFY   0x01
44  #define NPE_OP_CCM_ENABLE    0x04
45  #define NPE_OP_CRYPT_ENABLE  0x08
46  #define NPE_OP_HASH_ENABLE   0x10
47  #define NPE_OP_NOT_IN_PLACE  0x20
48  #define NPE_OP_HMAC_DISABLE  0x40
49  #define NPE_OP_CRYPT_ENCRYPT 0x80
50  
51  #define NPE_OP_CCM_GEN_MIC   0xcc
52  #define NPE_OP_HASH_GEN_ICV  0x50
53  #define NPE_OP_ENC_GEN_KEY   0xc9
54  
55  #define MOD_ECB     0x0000
56  #define MOD_CTR     0x1000
57  #define MOD_CBC_ENC 0x2000
58  #define MOD_CBC_DEC 0x3000
59  #define MOD_CCM_ENC 0x4000
60  #define MOD_CCM_DEC 0x5000
61  
62  #define KEYLEN_128  4
63  #define KEYLEN_192  6
64  #define KEYLEN_256  8
65  
66  #define CIPH_DECR   0x0000
67  #define CIPH_ENCR   0x0400
68  
69  #define MOD_DES     0x0000
70  #define MOD_TDEA2   0x0100
71  #define MOD_3DES   0x0200
72  #define MOD_AES     0x0800
73  #define MOD_AES128  (0x0800 | KEYLEN_128)
74  #define MOD_AES192  (0x0900 | KEYLEN_192)
75  #define MOD_AES256  (0x0a00 | KEYLEN_256)
76  
77  #define MAX_IVLEN   16
78  #define NPE_QLEN    16
79  /* Space for registering when the first
80   * NPE_QLEN crypt_ctl are busy */
81  #define NPE_QLEN_TOTAL 64
82  
83  #define CTL_FLAG_UNUSED		0x0000
84  #define CTL_FLAG_USED		0x1000
85  #define CTL_FLAG_PERFORM_ABLK	0x0001
86  #define CTL_FLAG_GEN_ICV	0x0002
87  #define CTL_FLAG_GEN_REVAES	0x0004
88  #define CTL_FLAG_PERFORM_AEAD	0x0008
89  #define CTL_FLAG_MASK		0x000f
90  
91  #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
92  
93  #define MD5_DIGEST_SIZE   16
94  
95  struct buffer_desc {
96  	u32 phys_next;
97  #ifdef __ARMEB__
98  	u16 buf_len;
99  	u16 pkt_len;
100  #else
101  	u16 pkt_len;
102  	u16 buf_len;
103  #endif
104  	dma_addr_t phys_addr;
105  	u32 __reserved[4];
106  	struct buffer_desc *next;
107  	enum dma_data_direction dir;
108  };
109  
110  struct crypt_ctl {
111  #ifdef __ARMEB__
112  	u8 mode;		/* NPE_OP_*  operation mode */
113  	u8 init_len;
114  	u16 reserved;
115  #else
116  	u16 reserved;
117  	u8 init_len;
118  	u8 mode;		/* NPE_OP_*  operation mode */
119  #endif
120  	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
121  	u32 icv_rev_aes;	/* icv or rev aes */
122  	u32 src_buf;
123  	u32 dst_buf;
124  #ifdef __ARMEB__
125  	u16 auth_offs;		/* Authentication start offset */
126  	u16 auth_len;		/* Authentication data length */
127  	u16 crypt_offs;		/* Cryption start offset */
128  	u16 crypt_len;		/* Cryption data length */
129  #else
130  	u16 auth_len;		/* Authentication data length */
131  	u16 auth_offs;		/* Authentication start offset */
132  	u16 crypt_len;		/* Cryption data length */
133  	u16 crypt_offs;		/* Cryption start offset */
134  #endif
135  	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
136  	u32 crypto_ctx;		/* NPE Crypto Param structure address */
137  
138  	/* Used by Host: 4*4 bytes*/
139  	unsigned int ctl_flags;
140  	union {
141  		struct skcipher_request *ablk_req;
142  		struct aead_request *aead_req;
143  		struct crypto_tfm *tfm;
144  	} data;
145  	struct buffer_desc *regist_buf;
146  	u8 *regist_ptr;
147  };
148  
149  struct ablk_ctx {
150  	struct buffer_desc *src;
151  	struct buffer_desc *dst;
152  	u8 iv[MAX_IVLEN];
153  	bool encrypt;
154  	struct skcipher_request fallback_req;   // keep at the end
155  };
156  
157  struct aead_ctx {
158  	struct buffer_desc *src;
159  	struct buffer_desc *dst;
160  	struct scatterlist ivlist;
161  	/* used when the hmac is not on one sg entry */
162  	u8 *hmac_virt;
163  	int encrypt;
164  };
165  
166  struct ix_hash_algo {
167  	u32 cfgword;
168  	unsigned char *icv;
169  };
170  
171  struct ix_sa_dir {
172  	unsigned char *npe_ctx;
173  	dma_addr_t npe_ctx_phys;
174  	int npe_ctx_idx;
175  	u8 npe_mode;
176  };
177  
178  struct ixp_ctx {
179  	struct ix_sa_dir encrypt;
180  	struct ix_sa_dir decrypt;
181  	int authkey_len;
182  	u8 authkey[MAX_KEYLEN];
183  	int enckey_len;
184  	u8 enckey[MAX_KEYLEN];
185  	u8 salt[MAX_IVLEN];
186  	u8 nonce[CTR_RFC3686_NONCE_SIZE];
187  	unsigned int salted;
188  	atomic_t configuring;
189  	struct completion completion;
190  	struct crypto_skcipher *fallback_tfm;
191  };
192  
193  struct ixp_alg {
194  	struct skcipher_alg crypto;
195  	const struct ix_hash_algo *hash;
196  	u32 cfg_enc;
197  	u32 cfg_dec;
198  
199  	int registered;
200  };
201  
202  struct ixp_aead_alg {
203  	struct aead_alg crypto;
204  	const struct ix_hash_algo *hash;
205  	u32 cfg_enc;
206  	u32 cfg_dec;
207  
208  	int registered;
209  };
210  
211  static const struct ix_hash_algo hash_alg_md5 = {
212  	.cfgword	= 0xAA010004,
213  	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
214  			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
215  };
216  
217  static const struct ix_hash_algo hash_alg_sha1 = {
218  	.cfgword	= 0x00000005,
219  	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
220  			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
221  };
222  
223  static struct npe *npe_c;
224  
225  static unsigned int send_qid;
226  static unsigned int recv_qid;
227  static struct dma_pool *buffer_pool;
228  static struct dma_pool *ctx_pool;
229  
230  static struct crypt_ctl *crypt_virt;
231  static dma_addr_t crypt_phys;
232  
233  static int support_aes = 1;
234  
235  static struct platform_device *pdev;
236  
crypt_virt2phys(struct crypt_ctl * virt)237  static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
238  {
239  	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
240  }
241  
crypt_phys2virt(dma_addr_t phys)242  static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
243  {
244  	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
245  }
246  
cipher_cfg_enc(struct crypto_tfm * tfm)247  static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
248  {
249  	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
250  }
251  
cipher_cfg_dec(struct crypto_tfm * tfm)252  static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
253  {
254  	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
255  }
256  
ix_hash(struct crypto_tfm * tfm)257  static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
258  {
259  	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
260  }
261  
setup_crypt_desc(void)262  static int setup_crypt_desc(void)
263  {
264  	struct device *dev = &pdev->dev;
265  
266  	BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
267  		       IS_ENABLED(CONFIG_64BIT)) &&
268  		     sizeof(struct crypt_ctl) != 64);
269  	crypt_virt = dma_alloc_coherent(dev,
270  					NPE_QLEN * sizeof(struct crypt_ctl),
271  					&crypt_phys, GFP_ATOMIC);
272  	if (!crypt_virt)
273  		return -ENOMEM;
274  	return 0;
275  }
276  
277  static DEFINE_SPINLOCK(desc_lock);
get_crypt_desc(void)278  static struct crypt_ctl *get_crypt_desc(void)
279  {
280  	int i;
281  	static int idx;
282  	unsigned long flags;
283  
284  	spin_lock_irqsave(&desc_lock, flags);
285  
286  	if (unlikely(!crypt_virt))
287  		setup_crypt_desc();
288  	if (unlikely(!crypt_virt)) {
289  		spin_unlock_irqrestore(&desc_lock, flags);
290  		return NULL;
291  	}
292  	i = idx;
293  	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
294  		if (++idx >= NPE_QLEN)
295  			idx = 0;
296  		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
297  		spin_unlock_irqrestore(&desc_lock, flags);
298  		return crypt_virt + i;
299  	} else {
300  		spin_unlock_irqrestore(&desc_lock, flags);
301  		return NULL;
302  	}
303  }
304  
305  static DEFINE_SPINLOCK(emerg_lock);
get_crypt_desc_emerg(void)306  static struct crypt_ctl *get_crypt_desc_emerg(void)
307  {
308  	int i;
309  	static int idx = NPE_QLEN;
310  	struct crypt_ctl *desc;
311  	unsigned long flags;
312  
313  	desc = get_crypt_desc();
314  	if (desc)
315  		return desc;
316  	if (unlikely(!crypt_virt))
317  		return NULL;
318  
319  	spin_lock_irqsave(&emerg_lock, flags);
320  	i = idx;
321  	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
322  		if (++idx >= NPE_QLEN_TOTAL)
323  			idx = NPE_QLEN;
324  		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
325  		spin_unlock_irqrestore(&emerg_lock, flags);
326  		return crypt_virt + i;
327  	} else {
328  		spin_unlock_irqrestore(&emerg_lock, flags);
329  		return NULL;
330  	}
331  }
332  
free_buf_chain(struct device * dev,struct buffer_desc * buf,dma_addr_t phys)333  static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
334  			   dma_addr_t phys)
335  {
336  	while (buf) {
337  		struct buffer_desc *buf1;
338  		u32 phys1;
339  
340  		buf1 = buf->next;
341  		phys1 = buf->phys_next;
342  		dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
343  		dma_pool_free(buffer_pool, buf, phys);
344  		buf = buf1;
345  		phys = phys1;
346  	}
347  }
348  
349  static struct tasklet_struct crypto_done_tasklet;
350  
finish_scattered_hmac(struct crypt_ctl * crypt)351  static void finish_scattered_hmac(struct crypt_ctl *crypt)
352  {
353  	struct aead_request *req = crypt->data.aead_req;
354  	struct aead_ctx *req_ctx = aead_request_ctx(req);
355  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
356  	int authsize = crypto_aead_authsize(tfm);
357  	int decryptlen = req->assoclen + req->cryptlen - authsize;
358  
359  	if (req_ctx->encrypt) {
360  		scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
361  					 decryptlen, authsize, 1);
362  	}
363  	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
364  }
365  
one_packet(dma_addr_t phys)366  static void one_packet(dma_addr_t phys)
367  {
368  	struct device *dev = &pdev->dev;
369  	struct crypt_ctl *crypt;
370  	struct ixp_ctx *ctx;
371  	int failed;
372  
373  	failed = phys & 0x1 ? -EBADMSG : 0;
374  	phys &= ~0x3;
375  	crypt = crypt_phys2virt(phys);
376  
377  	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
378  	case CTL_FLAG_PERFORM_AEAD: {
379  		struct aead_request *req = crypt->data.aead_req;
380  		struct aead_ctx *req_ctx = aead_request_ctx(req);
381  
382  		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
383  		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
384  		if (req_ctx->hmac_virt)
385  			finish_scattered_hmac(crypt);
386  
387  		aead_request_complete(req, failed);
388  		break;
389  	}
390  	case CTL_FLAG_PERFORM_ABLK: {
391  		struct skcipher_request *req = crypt->data.ablk_req;
392  		struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
393  		struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
394  		unsigned int ivsize = crypto_skcipher_ivsize(tfm);
395  		unsigned int offset;
396  
397  		if (ivsize > 0) {
398  			offset = req->cryptlen - ivsize;
399  			if (req_ctx->encrypt) {
400  				scatterwalk_map_and_copy(req->iv, req->dst,
401  							 offset, ivsize, 0);
402  			} else {
403  				memcpy(req->iv, req_ctx->iv, ivsize);
404  				memzero_explicit(req_ctx->iv, ivsize);
405  			}
406  		}
407  
408  		if (req_ctx->dst)
409  			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
410  
411  		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
412  		skcipher_request_complete(req, failed);
413  		break;
414  	}
415  	case CTL_FLAG_GEN_ICV:
416  		ctx = crypto_tfm_ctx(crypt->data.tfm);
417  		dma_pool_free(ctx_pool, crypt->regist_ptr,
418  			      crypt->regist_buf->phys_addr);
419  		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
420  		if (atomic_dec_and_test(&ctx->configuring))
421  			complete(&ctx->completion);
422  		break;
423  	case CTL_FLAG_GEN_REVAES:
424  		ctx = crypto_tfm_ctx(crypt->data.tfm);
425  		*(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
426  		if (atomic_dec_and_test(&ctx->configuring))
427  			complete(&ctx->completion);
428  		break;
429  	default:
430  		BUG();
431  	}
432  	crypt->ctl_flags = CTL_FLAG_UNUSED;
433  }
434  
irqhandler(void * _unused)435  static void irqhandler(void *_unused)
436  {
437  	tasklet_schedule(&crypto_done_tasklet);
438  }
439  
crypto_done_action(unsigned long arg)440  static void crypto_done_action(unsigned long arg)
441  {
442  	int i;
443  
444  	for (i = 0; i < 4; i++) {
445  		dma_addr_t phys = qmgr_get_entry(recv_qid);
446  		if (!phys)
447  			return;
448  		one_packet(phys);
449  	}
450  	tasklet_schedule(&crypto_done_tasklet);
451  }
452  
init_ixp_crypto(struct device * dev)453  static int init_ixp_crypto(struct device *dev)
454  {
455  	struct device_node *np = dev->of_node;
456  	u32 msg[2] = { 0, 0 };
457  	int ret = -ENODEV;
458  	u32 npe_id;
459  
460  	dev_info(dev, "probing...\n");
461  
462  	/* Locate the NPE and queue manager to use from device tree */
463  	if (IS_ENABLED(CONFIG_OF) && np) {
464  		struct of_phandle_args queue_spec;
465  		struct of_phandle_args npe_spec;
466  
467  		ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
468  						       1, 0, &npe_spec);
469  		if (ret) {
470  			dev_err(dev, "no NPE engine specified\n");
471  			return -ENODEV;
472  		}
473  		npe_id = npe_spec.args[0];
474  
475  		ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
476  						       &queue_spec);
477  		if (ret) {
478  			dev_err(dev, "no rx queue phandle\n");
479  			return -ENODEV;
480  		}
481  		recv_qid = queue_spec.args[0];
482  
483  		ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
484  						       &queue_spec);
485  		if (ret) {
486  			dev_err(dev, "no txready queue phandle\n");
487  			return -ENODEV;
488  		}
489  		send_qid = queue_spec.args[0];
490  	} else {
491  		/*
492  		 * Hardcoded engine when using platform data, this goes away
493  		 * when we switch to using DT only.
494  		 */
495  		npe_id = 2;
496  		send_qid = 29;
497  		recv_qid = 30;
498  	}
499  
500  	npe_c = npe_request(npe_id);
501  	if (!npe_c)
502  		return ret;
503  
504  	if (!npe_running(npe_c)) {
505  		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
506  		if (ret)
507  			goto npe_release;
508  		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
509  			goto npe_error;
510  	} else {
511  		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
512  			goto npe_error;
513  
514  		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
515  			goto npe_error;
516  	}
517  
518  	switch ((msg[1] >> 16) & 0xff) {
519  	case 3:
520  		dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
521  		support_aes = 0;
522  		break;
523  	case 4:
524  	case 5:
525  		support_aes = 1;
526  		break;
527  	default:
528  		dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
529  		ret = -ENODEV;
530  		goto npe_release;
531  	}
532  	/* buffer_pool will also be used to sometimes store the hmac,
533  	 * so assure it is large enough
534  	 */
535  	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
536  	buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
537  				      32, 0);
538  	ret = -ENOMEM;
539  	if (!buffer_pool)
540  		goto err;
541  
542  	ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
543  	if (!ctx_pool)
544  		goto err;
545  
546  	ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
547  				 "ixp_crypto:out", NULL);
548  	if (ret)
549  		goto err;
550  	ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
551  				 "ixp_crypto:in", NULL);
552  	if (ret) {
553  		qmgr_release_queue(send_qid);
554  		goto err;
555  	}
556  	qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
557  	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
558  
559  	qmgr_enable_irq(recv_qid);
560  	return 0;
561  
562  npe_error:
563  	dev_err(dev, "%s not responding\n", npe_name(npe_c));
564  	ret = -EIO;
565  err:
566  	dma_pool_destroy(ctx_pool);
567  	dma_pool_destroy(buffer_pool);
568  npe_release:
569  	npe_release(npe_c);
570  	return ret;
571  }
572  
release_ixp_crypto(struct device * dev)573  static void release_ixp_crypto(struct device *dev)
574  {
575  	qmgr_disable_irq(recv_qid);
576  	tasklet_kill(&crypto_done_tasklet);
577  
578  	qmgr_release_queue(send_qid);
579  	qmgr_release_queue(recv_qid);
580  
581  	dma_pool_destroy(ctx_pool);
582  	dma_pool_destroy(buffer_pool);
583  
584  	npe_release(npe_c);
585  
586  	if (crypt_virt)
587  		dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
588  				  crypt_virt, crypt_phys);
589  }
590  
reset_sa_dir(struct ix_sa_dir * dir)591  static void reset_sa_dir(struct ix_sa_dir *dir)
592  {
593  	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
594  	dir->npe_ctx_idx = 0;
595  	dir->npe_mode = 0;
596  }
597  
init_sa_dir(struct ix_sa_dir * dir)598  static int init_sa_dir(struct ix_sa_dir *dir)
599  {
600  	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
601  	if (!dir->npe_ctx)
602  		return -ENOMEM;
603  
604  	reset_sa_dir(dir);
605  	return 0;
606  }
607  
free_sa_dir(struct ix_sa_dir * dir)608  static void free_sa_dir(struct ix_sa_dir *dir)
609  {
610  	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
611  	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
612  }
613  
init_tfm(struct crypto_tfm * tfm)614  static int init_tfm(struct crypto_tfm *tfm)
615  {
616  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
617  	int ret;
618  
619  	atomic_set(&ctx->configuring, 0);
620  	ret = init_sa_dir(&ctx->encrypt);
621  	if (ret)
622  		return ret;
623  	ret = init_sa_dir(&ctx->decrypt);
624  	if (ret)
625  		free_sa_dir(&ctx->encrypt);
626  
627  	return ret;
628  }
629  
init_tfm_ablk(struct crypto_skcipher * tfm)630  static int init_tfm_ablk(struct crypto_skcipher *tfm)
631  {
632  	struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
633  	struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
634  	const char *name = crypto_tfm_alg_name(ctfm);
635  
636  	ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
637  	if (IS_ERR(ctx->fallback_tfm)) {
638  		pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
639  			name, PTR_ERR(ctx->fallback_tfm));
640  		return PTR_ERR(ctx->fallback_tfm);
641  	}
642  
643  	pr_info("Fallback for %s is %s\n",
644  		 crypto_tfm_alg_driver_name(&tfm->base),
645  		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
646  		 );
647  
648  	crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
649  	return init_tfm(crypto_skcipher_tfm(tfm));
650  }
651  
init_tfm_aead(struct crypto_aead * tfm)652  static int init_tfm_aead(struct crypto_aead *tfm)
653  {
654  	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
655  	return init_tfm(crypto_aead_tfm(tfm));
656  }
657  
exit_tfm(struct crypto_tfm * tfm)658  static void exit_tfm(struct crypto_tfm *tfm)
659  {
660  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
661  
662  	free_sa_dir(&ctx->encrypt);
663  	free_sa_dir(&ctx->decrypt);
664  }
665  
exit_tfm_ablk(struct crypto_skcipher * tfm)666  static void exit_tfm_ablk(struct crypto_skcipher *tfm)
667  {
668  	struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
669  	struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
670  
671  	crypto_free_skcipher(ctx->fallback_tfm);
672  	exit_tfm(crypto_skcipher_tfm(tfm));
673  }
674  
exit_tfm_aead(struct crypto_aead * tfm)675  static void exit_tfm_aead(struct crypto_aead *tfm)
676  {
677  	exit_tfm(crypto_aead_tfm(tfm));
678  }
679  
register_chain_var(struct crypto_tfm * tfm,u8 xpad,u32 target,int init_len,u32 ctx_addr,const u8 * key,int key_len)680  static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
681  			      int init_len, u32 ctx_addr, const u8 *key,
682  			      int key_len)
683  {
684  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
685  	struct crypt_ctl *crypt;
686  	struct buffer_desc *buf;
687  	int i;
688  	u8 *pad;
689  	dma_addr_t pad_phys, buf_phys;
690  
691  	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
692  	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
693  	if (!pad)
694  		return -ENOMEM;
695  	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
696  	if (!buf) {
697  		dma_pool_free(ctx_pool, pad, pad_phys);
698  		return -ENOMEM;
699  	}
700  	crypt = get_crypt_desc_emerg();
701  	if (!crypt) {
702  		dma_pool_free(ctx_pool, pad, pad_phys);
703  		dma_pool_free(buffer_pool, buf, buf_phys);
704  		return -EAGAIN;
705  	}
706  
707  	memcpy(pad, key, key_len);
708  	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
709  	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
710  		pad[i] ^= xpad;
711  
712  	crypt->data.tfm = tfm;
713  	crypt->regist_ptr = pad;
714  	crypt->regist_buf = buf;
715  
716  	crypt->auth_offs = 0;
717  	crypt->auth_len = HMAC_PAD_BLOCKLEN;
718  	crypt->crypto_ctx = ctx_addr;
719  	crypt->src_buf = buf_phys;
720  	crypt->icv_rev_aes = target;
721  	crypt->mode = NPE_OP_HASH_GEN_ICV;
722  	crypt->init_len = init_len;
723  	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
724  
725  	buf->next = NULL;
726  	buf->buf_len = HMAC_PAD_BLOCKLEN;
727  	buf->pkt_len = 0;
728  	buf->phys_addr = pad_phys;
729  
730  	atomic_inc(&ctx->configuring);
731  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
732  	BUG_ON(qmgr_stat_overflow(send_qid));
733  	return 0;
734  }
735  
setup_auth(struct crypto_tfm * tfm,int encrypt,unsigned int authsize,const u8 * key,int key_len,unsigned int digest_len)736  static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
737  		      const u8 *key, int key_len, unsigned int digest_len)
738  {
739  	u32 itarget, otarget, npe_ctx_addr;
740  	unsigned char *cinfo;
741  	int init_len, ret = 0;
742  	u32 cfgword;
743  	struct ix_sa_dir *dir;
744  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
745  	const struct ix_hash_algo *algo;
746  
747  	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
748  	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
749  	algo = ix_hash(tfm);
750  
751  	/* write cfg word to cryptinfo */
752  	cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
753  #ifndef __ARMEB__
754  	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
755  #endif
756  	*(__be32 *)cinfo = cpu_to_be32(cfgword);
757  	cinfo += sizeof(cfgword);
758  
759  	/* write ICV to cryptinfo */
760  	memcpy(cinfo, algo->icv, digest_len);
761  	cinfo += digest_len;
762  
763  	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
764  				+ sizeof(algo->cfgword);
765  	otarget = itarget + digest_len;
766  	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
767  	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
768  
769  	dir->npe_ctx_idx += init_len;
770  	dir->npe_mode |= NPE_OP_HASH_ENABLE;
771  
772  	if (!encrypt)
773  		dir->npe_mode |= NPE_OP_HASH_VERIFY;
774  
775  	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
776  				 init_len, npe_ctx_addr, key, key_len);
777  	if (ret)
778  		return ret;
779  	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
780  				  init_len, npe_ctx_addr, key, key_len);
781  }
782  
gen_rev_aes_key(struct crypto_tfm * tfm)783  static int gen_rev_aes_key(struct crypto_tfm *tfm)
784  {
785  	struct crypt_ctl *crypt;
786  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
787  	struct ix_sa_dir *dir = &ctx->decrypt;
788  
789  	crypt = get_crypt_desc_emerg();
790  	if (!crypt)
791  		return -EAGAIN;
792  
793  	*(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
794  
795  	crypt->data.tfm = tfm;
796  	crypt->crypt_offs = 0;
797  	crypt->crypt_len = AES_BLOCK128;
798  	crypt->src_buf = 0;
799  	crypt->crypto_ctx = dir->npe_ctx_phys;
800  	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
801  	crypt->mode = NPE_OP_ENC_GEN_KEY;
802  	crypt->init_len = dir->npe_ctx_idx;
803  	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
804  
805  	atomic_inc(&ctx->configuring);
806  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
807  	BUG_ON(qmgr_stat_overflow(send_qid));
808  	return 0;
809  }
810  
setup_cipher(struct crypto_tfm * tfm,int encrypt,const u8 * key,int key_len)811  static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
812  			int key_len)
813  {
814  	u8 *cinfo;
815  	u32 cipher_cfg;
816  	u32 keylen_cfg = 0;
817  	struct ix_sa_dir *dir;
818  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
819  	int err;
820  
821  	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
822  	cinfo = dir->npe_ctx;
823  
824  	if (encrypt) {
825  		cipher_cfg = cipher_cfg_enc(tfm);
826  		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
827  	} else {
828  		cipher_cfg = cipher_cfg_dec(tfm);
829  	}
830  	if (cipher_cfg & MOD_AES) {
831  		switch (key_len) {
832  		case 16:
833  			keylen_cfg = MOD_AES128;
834  			break;
835  		case 24:
836  			keylen_cfg = MOD_AES192;
837  			break;
838  		case 32:
839  			keylen_cfg = MOD_AES256;
840  			break;
841  		default:
842  			return -EINVAL;
843  		}
844  		cipher_cfg |= keylen_cfg;
845  	} else {
846  		err = crypto_des_verify_key(tfm, key);
847  		if (err)
848  			return err;
849  	}
850  	/* write cfg word to cryptinfo */
851  	*(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
852  	cinfo += sizeof(cipher_cfg);
853  
854  	/* write cipher key to cryptinfo */
855  	memcpy(cinfo, key, key_len);
856  	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
857  	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
858  		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
859  		key_len = DES3_EDE_KEY_SIZE;
860  	}
861  	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
862  	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
863  	if ((cipher_cfg & MOD_AES) && !encrypt)
864  		return gen_rev_aes_key(tfm);
865  
866  	return 0;
867  }
868  
chainup_buffers(struct device * dev,struct scatterlist * sg,unsigned int nbytes,struct buffer_desc * buf,gfp_t flags,enum dma_data_direction dir)869  static struct buffer_desc *chainup_buffers(struct device *dev,
870  		struct scatterlist *sg,	unsigned int nbytes,
871  		struct buffer_desc *buf, gfp_t flags,
872  		enum dma_data_direction dir)
873  {
874  	for (; nbytes > 0; sg = sg_next(sg)) {
875  		unsigned int len = min(nbytes, sg->length);
876  		struct buffer_desc *next_buf;
877  		dma_addr_t next_buf_phys;
878  		void *ptr;
879  
880  		nbytes -= len;
881  		ptr = sg_virt(sg);
882  		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
883  		if (!next_buf) {
884  			buf = NULL;
885  			break;
886  		}
887  		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
888  		buf->next = next_buf;
889  		buf->phys_next = next_buf_phys;
890  		buf = next_buf;
891  
892  		buf->phys_addr = sg_dma_address(sg);
893  		buf->buf_len = len;
894  		buf->dir = dir;
895  	}
896  	buf->next = NULL;
897  	buf->phys_next = 0;
898  	return buf;
899  }
900  
ablk_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)901  static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
902  		       unsigned int key_len)
903  {
904  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
905  	int ret;
906  
907  	init_completion(&ctx->completion);
908  	atomic_inc(&ctx->configuring);
909  
910  	reset_sa_dir(&ctx->encrypt);
911  	reset_sa_dir(&ctx->decrypt);
912  
913  	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
914  	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
915  
916  	ret = setup_cipher(&tfm->base, 0, key, key_len);
917  	if (ret)
918  		goto out;
919  	ret = setup_cipher(&tfm->base, 1, key, key_len);
920  out:
921  	if (!atomic_dec_and_test(&ctx->configuring))
922  		wait_for_completion(&ctx->completion);
923  	if (ret)
924  		return ret;
925  	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
926  	crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
927  
928  	return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
929  }
930  
ablk_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)931  static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
932  			    unsigned int key_len)
933  {
934  	return verify_skcipher_des3_key(tfm, key) ?:
935  	       ablk_setkey(tfm, key, key_len);
936  }
937  
ablk_rfc3686_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)938  static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
939  			       unsigned int key_len)
940  {
941  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
942  
943  	/* the nonce is stored in bytes at end of key */
944  	if (key_len < CTR_RFC3686_NONCE_SIZE)
945  		return -EINVAL;
946  
947  	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
948  	       CTR_RFC3686_NONCE_SIZE);
949  
950  	key_len -= CTR_RFC3686_NONCE_SIZE;
951  	return ablk_setkey(tfm, key, key_len);
952  }
953  
ixp4xx_cipher_fallback(struct skcipher_request * areq,int encrypt)954  static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
955  {
956  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
957  	struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
958  	struct ablk_ctx *rctx = skcipher_request_ctx(areq);
959  	int err;
960  
961  	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
962  	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
963  				      areq->base.complete, areq->base.data);
964  	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
965  				   areq->cryptlen, areq->iv);
966  	if (encrypt)
967  		err = crypto_skcipher_encrypt(&rctx->fallback_req);
968  	else
969  		err = crypto_skcipher_decrypt(&rctx->fallback_req);
970  	return err;
971  }
972  
ablk_perform(struct skcipher_request * req,int encrypt)973  static int ablk_perform(struct skcipher_request *req, int encrypt)
974  {
975  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
976  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
977  	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
978  	struct ix_sa_dir *dir;
979  	struct crypt_ctl *crypt;
980  	unsigned int nbytes = req->cryptlen;
981  	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
982  	struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
983  	struct buffer_desc src_hook;
984  	struct device *dev = &pdev->dev;
985  	unsigned int offset;
986  	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
987  				GFP_KERNEL : GFP_ATOMIC;
988  
989  	if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
990  		return ixp4xx_cipher_fallback(req, encrypt);
991  
992  	if (qmgr_stat_full(send_qid))
993  		return -EAGAIN;
994  	if (atomic_read(&ctx->configuring))
995  		return -EAGAIN;
996  
997  	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
998  	req_ctx->encrypt = encrypt;
999  
1000  	crypt = get_crypt_desc();
1001  	if (!crypt)
1002  		return -ENOMEM;
1003  
1004  	crypt->data.ablk_req = req;
1005  	crypt->crypto_ctx = dir->npe_ctx_phys;
1006  	crypt->mode = dir->npe_mode;
1007  	crypt->init_len = dir->npe_ctx_idx;
1008  
1009  	crypt->crypt_offs = 0;
1010  	crypt->crypt_len = nbytes;
1011  
1012  	BUG_ON(ivsize && !req->iv);
1013  	memcpy(crypt->iv, req->iv, ivsize);
1014  	if (ivsize > 0 && !encrypt) {
1015  		offset = req->cryptlen - ivsize;
1016  		scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1017  	}
1018  	if (req->src != req->dst) {
1019  		struct buffer_desc dst_hook;
1020  
1021  		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1022  		/* This was never tested by Intel
1023  		 * for more than one dst buffer, I think. */
1024  		req_ctx->dst = NULL;
1025  		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1026  				     flags, DMA_FROM_DEVICE))
1027  			goto free_buf_dest;
1028  		src_direction = DMA_TO_DEVICE;
1029  		req_ctx->dst = dst_hook.next;
1030  		crypt->dst_buf = dst_hook.phys_next;
1031  	} else {
1032  		req_ctx->dst = NULL;
1033  	}
1034  	req_ctx->src = NULL;
1035  	if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1036  			     src_direction))
1037  		goto free_buf_src;
1038  
1039  	req_ctx->src = src_hook.next;
1040  	crypt->src_buf = src_hook.phys_next;
1041  	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1042  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1043  	BUG_ON(qmgr_stat_overflow(send_qid));
1044  	return -EINPROGRESS;
1045  
1046  free_buf_src:
1047  	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1048  free_buf_dest:
1049  	if (req->src != req->dst)
1050  		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1051  
1052  	crypt->ctl_flags = CTL_FLAG_UNUSED;
1053  	return -ENOMEM;
1054  }
1055  
ablk_encrypt(struct skcipher_request * req)1056  static int ablk_encrypt(struct skcipher_request *req)
1057  {
1058  	return ablk_perform(req, 1);
1059  }
1060  
ablk_decrypt(struct skcipher_request * req)1061  static int ablk_decrypt(struct skcipher_request *req)
1062  {
1063  	return ablk_perform(req, 0);
1064  }
1065  
ablk_rfc3686_crypt(struct skcipher_request * req)1066  static int ablk_rfc3686_crypt(struct skcipher_request *req)
1067  {
1068  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1069  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1070  	u8 iv[CTR_RFC3686_BLOCK_SIZE];
1071  	u8 *info = req->iv;
1072  	int ret;
1073  
1074  	/* set up counter block */
1075  	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1076  	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1077  
1078  	/* initialize counter portion of counter block */
1079  	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1080  		cpu_to_be32(1);
1081  
1082  	req->iv = iv;
1083  	ret = ablk_perform(req, 1);
1084  	req->iv = info;
1085  	return ret;
1086  }
1087  
aead_perform(struct aead_request * req,int encrypt,int cryptoffset,int eff_cryptlen,u8 * iv)1088  static int aead_perform(struct aead_request *req, int encrypt,
1089  			int cryptoffset, int eff_cryptlen, u8 *iv)
1090  {
1091  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1092  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1093  	unsigned int ivsize = crypto_aead_ivsize(tfm);
1094  	unsigned int authsize = crypto_aead_authsize(tfm);
1095  	struct ix_sa_dir *dir;
1096  	struct crypt_ctl *crypt;
1097  	unsigned int cryptlen;
1098  	struct buffer_desc *buf, src_hook;
1099  	struct aead_ctx *req_ctx = aead_request_ctx(req);
1100  	struct device *dev = &pdev->dev;
1101  	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1102  				GFP_KERNEL : GFP_ATOMIC;
1103  	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1104  	unsigned int lastlen;
1105  
1106  	if (qmgr_stat_full(send_qid))
1107  		return -EAGAIN;
1108  	if (atomic_read(&ctx->configuring))
1109  		return -EAGAIN;
1110  
1111  	if (encrypt) {
1112  		dir = &ctx->encrypt;
1113  		cryptlen = req->cryptlen;
1114  	} else {
1115  		dir = &ctx->decrypt;
1116  		/* req->cryptlen includes the authsize when decrypting */
1117  		cryptlen = req->cryptlen - authsize;
1118  		eff_cryptlen -= authsize;
1119  	}
1120  	crypt = get_crypt_desc();
1121  	if (!crypt)
1122  		return -ENOMEM;
1123  
1124  	crypt->data.aead_req = req;
1125  	crypt->crypto_ctx = dir->npe_ctx_phys;
1126  	crypt->mode = dir->npe_mode;
1127  	crypt->init_len = dir->npe_ctx_idx;
1128  
1129  	crypt->crypt_offs = cryptoffset;
1130  	crypt->crypt_len = eff_cryptlen;
1131  
1132  	crypt->auth_offs = 0;
1133  	crypt->auth_len = req->assoclen + cryptlen;
1134  	BUG_ON(ivsize && !req->iv);
1135  	memcpy(crypt->iv, req->iv, ivsize);
1136  
1137  	buf = chainup_buffers(dev, req->src, crypt->auth_len,
1138  			      &src_hook, flags, src_direction);
1139  	req_ctx->src = src_hook.next;
1140  	crypt->src_buf = src_hook.phys_next;
1141  	if (!buf)
1142  		goto free_buf_src;
1143  
1144  	lastlen = buf->buf_len;
1145  	if (lastlen >= authsize)
1146  		crypt->icv_rev_aes = buf->phys_addr +
1147  				     buf->buf_len - authsize;
1148  
1149  	req_ctx->dst = NULL;
1150  
1151  	if (req->src != req->dst) {
1152  		struct buffer_desc dst_hook;
1153  
1154  		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1155  		src_direction = DMA_TO_DEVICE;
1156  
1157  		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1158  				      &dst_hook, flags, DMA_FROM_DEVICE);
1159  		req_ctx->dst = dst_hook.next;
1160  		crypt->dst_buf = dst_hook.phys_next;
1161  
1162  		if (!buf)
1163  			goto free_buf_dst;
1164  
1165  		if (encrypt) {
1166  			lastlen = buf->buf_len;
1167  			if (lastlen >= authsize)
1168  				crypt->icv_rev_aes = buf->phys_addr +
1169  						     buf->buf_len - authsize;
1170  		}
1171  	}
1172  
1173  	if (unlikely(lastlen < authsize)) {
1174  		dma_addr_t dma;
1175  		/* The 12 hmac bytes are scattered,
1176  		 * we need to copy them into a safe buffer */
1177  		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
1178  		if (unlikely(!req_ctx->hmac_virt))
1179  			goto free_buf_dst;
1180  		crypt->icv_rev_aes = dma;
1181  		if (!encrypt) {
1182  			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1183  						 req->src, cryptlen, authsize, 0);
1184  		}
1185  		req_ctx->encrypt = encrypt;
1186  	} else {
1187  		req_ctx->hmac_virt = NULL;
1188  	}
1189  
1190  	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1191  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1192  	BUG_ON(qmgr_stat_overflow(send_qid));
1193  	return -EINPROGRESS;
1194  
1195  free_buf_dst:
1196  	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1197  free_buf_src:
1198  	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1199  	crypt->ctl_flags = CTL_FLAG_UNUSED;
1200  	return -ENOMEM;
1201  }
1202  
aead_setup(struct crypto_aead * tfm,unsigned int authsize)1203  static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1204  {
1205  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1206  	unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1207  	int ret;
1208  
1209  	if (!ctx->enckey_len && !ctx->authkey_len)
1210  		return 0;
1211  	init_completion(&ctx->completion);
1212  	atomic_inc(&ctx->configuring);
1213  
1214  	reset_sa_dir(&ctx->encrypt);
1215  	reset_sa_dir(&ctx->decrypt);
1216  
1217  	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1218  	if (ret)
1219  		goto out;
1220  	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1221  	if (ret)
1222  		goto out;
1223  	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1224  			 ctx->authkey_len, digest_len);
1225  	if (ret)
1226  		goto out;
1227  	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1228  			 ctx->authkey_len, digest_len);
1229  out:
1230  	if (!atomic_dec_and_test(&ctx->configuring))
1231  		wait_for_completion(&ctx->completion);
1232  	return ret;
1233  }
1234  
aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1235  static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1236  {
1237  	int max = crypto_aead_maxauthsize(tfm) >> 2;
1238  
1239  	if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1240  		return -EINVAL;
1241  	return aead_setup(tfm, authsize);
1242  }
1243  
aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1244  static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1245  		       unsigned int keylen)
1246  {
1247  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1248  	struct crypto_authenc_keys keys;
1249  
1250  	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1251  		goto badkey;
1252  
1253  	if (keys.authkeylen > sizeof(ctx->authkey))
1254  		goto badkey;
1255  
1256  	if (keys.enckeylen > sizeof(ctx->enckey))
1257  		goto badkey;
1258  
1259  	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1260  	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1261  	ctx->authkey_len = keys.authkeylen;
1262  	ctx->enckey_len = keys.enckeylen;
1263  
1264  	memzero_explicit(&keys, sizeof(keys));
1265  	return aead_setup(tfm, crypto_aead_authsize(tfm));
1266  badkey:
1267  	memzero_explicit(&keys, sizeof(keys));
1268  	return -EINVAL;
1269  }
1270  
des3_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1271  static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1272  			    unsigned int keylen)
1273  {
1274  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1275  	struct crypto_authenc_keys keys;
1276  	int err;
1277  
1278  	err = crypto_authenc_extractkeys(&keys, key, keylen);
1279  	if (unlikely(err))
1280  		goto badkey;
1281  
1282  	err = -EINVAL;
1283  	if (keys.authkeylen > sizeof(ctx->authkey))
1284  		goto badkey;
1285  
1286  	err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1287  	if (err)
1288  		goto badkey;
1289  
1290  	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1291  	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1292  	ctx->authkey_len = keys.authkeylen;
1293  	ctx->enckey_len = keys.enckeylen;
1294  
1295  	memzero_explicit(&keys, sizeof(keys));
1296  	return aead_setup(tfm, crypto_aead_authsize(tfm));
1297  badkey:
1298  	memzero_explicit(&keys, sizeof(keys));
1299  	return err;
1300  }
1301  
aead_encrypt(struct aead_request * req)1302  static int aead_encrypt(struct aead_request *req)
1303  {
1304  	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1305  }
1306  
aead_decrypt(struct aead_request * req)1307  static int aead_decrypt(struct aead_request *req)
1308  {
1309  	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1310  }
1311  
1312  static struct ixp_alg ixp4xx_algos[] = {
1313  {
1314  	.crypto	= {
1315  		.base.cra_name		= "cbc(des)",
1316  		.base.cra_blocksize	= DES_BLOCK_SIZE,
1317  
1318  		.min_keysize		= DES_KEY_SIZE,
1319  		.max_keysize		= DES_KEY_SIZE,
1320  		.ivsize			= DES_BLOCK_SIZE,
1321  	},
1322  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1323  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1324  
1325  }, {
1326  	.crypto	= {
1327  		.base.cra_name		= "ecb(des)",
1328  		.base.cra_blocksize	= DES_BLOCK_SIZE,
1329  		.min_keysize		= DES_KEY_SIZE,
1330  		.max_keysize		= DES_KEY_SIZE,
1331  	},
1332  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1333  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1334  }, {
1335  	.crypto	= {
1336  		.base.cra_name		= "cbc(des3_ede)",
1337  		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1338  
1339  		.min_keysize		= DES3_EDE_KEY_SIZE,
1340  		.max_keysize		= DES3_EDE_KEY_SIZE,
1341  		.ivsize			= DES3_EDE_BLOCK_SIZE,
1342  		.setkey			= ablk_des3_setkey,
1343  	},
1344  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1345  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1346  }, {
1347  	.crypto	= {
1348  		.base.cra_name		= "ecb(des3_ede)",
1349  		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1350  
1351  		.min_keysize		= DES3_EDE_KEY_SIZE,
1352  		.max_keysize		= DES3_EDE_KEY_SIZE,
1353  		.setkey			= ablk_des3_setkey,
1354  	},
1355  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1356  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1357  }, {
1358  	.crypto	= {
1359  		.base.cra_name		= "cbc(aes)",
1360  		.base.cra_blocksize	= AES_BLOCK_SIZE,
1361  
1362  		.min_keysize		= AES_MIN_KEY_SIZE,
1363  		.max_keysize		= AES_MAX_KEY_SIZE,
1364  		.ivsize			= AES_BLOCK_SIZE,
1365  	},
1366  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1367  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1368  }, {
1369  	.crypto	= {
1370  		.base.cra_name		= "ecb(aes)",
1371  		.base.cra_blocksize	= AES_BLOCK_SIZE,
1372  
1373  		.min_keysize		= AES_MIN_KEY_SIZE,
1374  		.max_keysize		= AES_MAX_KEY_SIZE,
1375  	},
1376  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1377  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1378  }, {
1379  	.crypto	= {
1380  		.base.cra_name		= "ctr(aes)",
1381  		.base.cra_blocksize	= 1,
1382  
1383  		.min_keysize		= AES_MIN_KEY_SIZE,
1384  		.max_keysize		= AES_MAX_KEY_SIZE,
1385  		.ivsize			= AES_BLOCK_SIZE,
1386  	},
1387  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1388  	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1389  }, {
1390  	.crypto	= {
1391  		.base.cra_name		= "rfc3686(ctr(aes))",
1392  		.base.cra_blocksize	= 1,
1393  
1394  		.min_keysize		= AES_MIN_KEY_SIZE,
1395  		.max_keysize		= AES_MAX_KEY_SIZE,
1396  		.ivsize			= AES_BLOCK_SIZE,
1397  		.setkey			= ablk_rfc3686_setkey,
1398  		.encrypt		= ablk_rfc3686_crypt,
1399  		.decrypt		= ablk_rfc3686_crypt,
1400  	},
1401  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1402  	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1403  } };
1404  
1405  static struct ixp_aead_alg ixp4xx_aeads[] = {
1406  {
1407  	.crypto	= {
1408  		.base = {
1409  			.cra_name	= "authenc(hmac(md5),cbc(des))",
1410  			.cra_blocksize	= DES_BLOCK_SIZE,
1411  		},
1412  		.ivsize		= DES_BLOCK_SIZE,
1413  		.maxauthsize	= MD5_DIGEST_SIZE,
1414  	},
1415  	.hash = &hash_alg_md5,
1416  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1417  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1418  }, {
1419  	.crypto	= {
1420  		.base = {
1421  			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1422  			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1423  		},
1424  		.ivsize		= DES3_EDE_BLOCK_SIZE,
1425  		.maxauthsize	= MD5_DIGEST_SIZE,
1426  		.setkey		= des3_aead_setkey,
1427  	},
1428  	.hash = &hash_alg_md5,
1429  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1430  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1431  }, {
1432  	.crypto	= {
1433  		.base = {
1434  			.cra_name	= "authenc(hmac(sha1),cbc(des))",
1435  			.cra_blocksize	= DES_BLOCK_SIZE,
1436  		},
1437  			.ivsize		= DES_BLOCK_SIZE,
1438  			.maxauthsize	= SHA1_DIGEST_SIZE,
1439  	},
1440  	.hash = &hash_alg_sha1,
1441  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1442  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1443  }, {
1444  	.crypto	= {
1445  		.base = {
1446  			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1447  			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1448  		},
1449  		.ivsize		= DES3_EDE_BLOCK_SIZE,
1450  		.maxauthsize	= SHA1_DIGEST_SIZE,
1451  		.setkey		= des3_aead_setkey,
1452  	},
1453  	.hash = &hash_alg_sha1,
1454  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1455  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1456  }, {
1457  	.crypto	= {
1458  		.base = {
1459  			.cra_name	= "authenc(hmac(md5),cbc(aes))",
1460  			.cra_blocksize	= AES_BLOCK_SIZE,
1461  		},
1462  		.ivsize		= AES_BLOCK_SIZE,
1463  		.maxauthsize	= MD5_DIGEST_SIZE,
1464  	},
1465  	.hash = &hash_alg_md5,
1466  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1467  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1468  }, {
1469  	.crypto	= {
1470  		.base = {
1471  			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1472  			.cra_blocksize	= AES_BLOCK_SIZE,
1473  		},
1474  		.ivsize		= AES_BLOCK_SIZE,
1475  		.maxauthsize	= SHA1_DIGEST_SIZE,
1476  	},
1477  	.hash = &hash_alg_sha1,
1478  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1479  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1480  } };
1481  
1482  #define IXP_POSTFIX "-ixp4xx"
1483  
ixp_crypto_probe(struct platform_device * _pdev)1484  static int ixp_crypto_probe(struct platform_device *_pdev)
1485  {
1486  	struct device *dev = &_pdev->dev;
1487  	int num = ARRAY_SIZE(ixp4xx_algos);
1488  	int i, err;
1489  
1490  	pdev = _pdev;
1491  
1492  	err = init_ixp_crypto(dev);
1493  	if (err)
1494  		return err;
1495  
1496  	for (i = 0; i < num; i++) {
1497  		struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1498  
1499  		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1500  			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1501  			     CRYPTO_MAX_ALG_NAME)
1502  			continue;
1503  		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1504  			continue;
1505  
1506  		/* block ciphers */
1507  		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1508  				      CRYPTO_ALG_ASYNC |
1509  				      CRYPTO_ALG_ALLOCATES_MEMORY |
1510  				      CRYPTO_ALG_NEED_FALLBACK;
1511  		if (!cra->setkey)
1512  			cra->setkey = ablk_setkey;
1513  		if (!cra->encrypt)
1514  			cra->encrypt = ablk_encrypt;
1515  		if (!cra->decrypt)
1516  			cra->decrypt = ablk_decrypt;
1517  		cra->init = init_tfm_ablk;
1518  		cra->exit = exit_tfm_ablk;
1519  
1520  		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1521  		cra->base.cra_module = THIS_MODULE;
1522  		cra->base.cra_alignmask = 3;
1523  		cra->base.cra_priority = 300;
1524  		if (crypto_register_skcipher(cra))
1525  			dev_err(&pdev->dev, "Failed to register '%s'\n",
1526  				cra->base.cra_name);
1527  		else
1528  			ixp4xx_algos[i].registered = 1;
1529  	}
1530  
1531  	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1532  		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1533  
1534  		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1535  			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1536  		    CRYPTO_MAX_ALG_NAME)
1537  			continue;
1538  		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1539  			continue;
1540  
1541  		/* authenc */
1542  		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1543  				      CRYPTO_ALG_ASYNC |
1544  				      CRYPTO_ALG_ALLOCATES_MEMORY;
1545  		cra->setkey = cra->setkey ?: aead_setkey;
1546  		cra->setauthsize = aead_setauthsize;
1547  		cra->encrypt = aead_encrypt;
1548  		cra->decrypt = aead_decrypt;
1549  		cra->init = init_tfm_aead;
1550  		cra->exit = exit_tfm_aead;
1551  
1552  		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1553  		cra->base.cra_module = THIS_MODULE;
1554  		cra->base.cra_alignmask = 3;
1555  		cra->base.cra_priority = 300;
1556  
1557  		if (crypto_register_aead(cra))
1558  			dev_err(&pdev->dev, "Failed to register '%s'\n",
1559  				cra->base.cra_driver_name);
1560  		else
1561  			ixp4xx_aeads[i].registered = 1;
1562  	}
1563  	return 0;
1564  }
1565  
ixp_crypto_remove(struct platform_device * pdev)1566  static int ixp_crypto_remove(struct platform_device *pdev)
1567  {
1568  	int num = ARRAY_SIZE(ixp4xx_algos);
1569  	int i;
1570  
1571  	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1572  		if (ixp4xx_aeads[i].registered)
1573  			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1574  	}
1575  
1576  	for (i = 0; i < num; i++) {
1577  		if (ixp4xx_algos[i].registered)
1578  			crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1579  	}
1580  	release_ixp_crypto(&pdev->dev);
1581  
1582  	return 0;
1583  }
1584  static const struct of_device_id ixp4xx_crypto_of_match[] = {
1585  	{
1586  		.compatible = "intel,ixp4xx-crypto",
1587  	},
1588  	{},
1589  };
1590  
1591  static struct platform_driver ixp_crypto_driver = {
1592  	.probe = ixp_crypto_probe,
1593  	.remove = ixp_crypto_remove,
1594  	.driver = {
1595  		.name = "ixp4xx_crypto",
1596  		.of_match_table = ixp4xx_crypto_of_match,
1597  	},
1598  };
1599  module_platform_driver(ixp_crypto_driver);
1600  
1601  MODULE_LICENSE("GPL");
1602  MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1603  MODULE_DESCRIPTION("IXP4xx hardware crypto");
1604  
1605