xref: /openbmc/linux/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c (revision 060f35a317ef09101b128f399dce7ed13d019461)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Intel IXP4xx NPE-C crypto driver
4   *
5   * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6   */
7  
8  #include <linux/platform_device.h>
9  #include <linux/dma-mapping.h>
10  #include <linux/dmapool.h>
11  #include <linux/crypto.h>
12  #include <linux/kernel.h>
13  #include <linux/rtnetlink.h>
14  #include <linux/interrupt.h>
15  #include <linux/spinlock.h>
16  #include <linux/gfp.h>
17  #include <linux/module.h>
18  #include <linux/of.h>
19  
20  #include <crypto/ctr.h>
21  #include <crypto/internal/des.h>
22  #include <crypto/aes.h>
23  #include <crypto/hmac.h>
24  #include <crypto/sha1.h>
25  #include <crypto/algapi.h>
26  #include <crypto/internal/aead.h>
27  #include <crypto/internal/skcipher.h>
28  #include <crypto/authenc.h>
29  #include <crypto/scatterwalk.h>
30  
31  #include <linux/soc/ixp4xx/npe.h>
32  #include <linux/soc/ixp4xx/qmgr.h>
33  
34  /* Intermittent includes, delete this after v5.14-rc1 */
35  #include <linux/soc/ixp4xx/cpu.h>
36  
37  #define MAX_KEYLEN 32
38  
39  /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
40  #define NPE_CTX_LEN 80
41  #define AES_BLOCK128 16
42  
43  #define NPE_OP_HASH_VERIFY   0x01
44  #define NPE_OP_CCM_ENABLE    0x04
45  #define NPE_OP_CRYPT_ENABLE  0x08
46  #define NPE_OP_HASH_ENABLE   0x10
47  #define NPE_OP_NOT_IN_PLACE  0x20
48  #define NPE_OP_HMAC_DISABLE  0x40
49  #define NPE_OP_CRYPT_ENCRYPT 0x80
50  
51  #define NPE_OP_CCM_GEN_MIC   0xcc
52  #define NPE_OP_HASH_GEN_ICV  0x50
53  #define NPE_OP_ENC_GEN_KEY   0xc9
54  
55  #define MOD_ECB     0x0000
56  #define MOD_CTR     0x1000
57  #define MOD_CBC_ENC 0x2000
58  #define MOD_CBC_DEC 0x3000
59  #define MOD_CCM_ENC 0x4000
60  #define MOD_CCM_DEC 0x5000
61  
62  #define KEYLEN_128  4
63  #define KEYLEN_192  6
64  #define KEYLEN_256  8
65  
66  #define CIPH_DECR   0x0000
67  #define CIPH_ENCR   0x0400
68  
69  #define MOD_DES     0x0000
70  #define MOD_TDEA2   0x0100
71  #define MOD_3DES   0x0200
72  #define MOD_AES     0x0800
73  #define MOD_AES128  (0x0800 | KEYLEN_128)
74  #define MOD_AES192  (0x0900 | KEYLEN_192)
75  #define MOD_AES256  (0x0a00 | KEYLEN_256)
76  
77  #define MAX_IVLEN   16
78  #define NPE_QLEN    16
79  /* Space for registering when the first
80   * NPE_QLEN crypt_ctl are busy */
81  #define NPE_QLEN_TOTAL 64
82  
83  #define CTL_FLAG_UNUSED		0x0000
84  #define CTL_FLAG_USED		0x1000
85  #define CTL_FLAG_PERFORM_ABLK	0x0001
86  #define CTL_FLAG_GEN_ICV	0x0002
87  #define CTL_FLAG_GEN_REVAES	0x0004
88  #define CTL_FLAG_PERFORM_AEAD	0x0008
89  #define CTL_FLAG_MASK		0x000f
90  
91  #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
92  
93  #define MD5_DIGEST_SIZE   16
94  
95  struct buffer_desc {
96  	u32 phys_next;
97  #ifdef __ARMEB__
98  	u16 buf_len;
99  	u16 pkt_len;
100  #else
101  	u16 pkt_len;
102  	u16 buf_len;
103  #endif
104  	dma_addr_t phys_addr;
105  	u32 __reserved[4];
106  	struct buffer_desc *next;
107  	enum dma_data_direction dir;
108  };
109  
110  struct crypt_ctl {
111  #ifdef __ARMEB__
112  	u8 mode;		/* NPE_OP_*  operation mode */
113  	u8 init_len;
114  	u16 reserved;
115  #else
116  	u16 reserved;
117  	u8 init_len;
118  	u8 mode;		/* NPE_OP_*  operation mode */
119  #endif
120  	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
121  	u32 icv_rev_aes;	/* icv or rev aes */
122  	u32 src_buf;
123  	u32 dst_buf;
124  #ifdef __ARMEB__
125  	u16 auth_offs;		/* Authentication start offset */
126  	u16 auth_len;		/* Authentication data length */
127  	u16 crypt_offs;		/* Cryption start offset */
128  	u16 crypt_len;		/* Cryption data length */
129  #else
130  	u16 auth_len;		/* Authentication data length */
131  	u16 auth_offs;		/* Authentication start offset */
132  	u16 crypt_len;		/* Cryption data length */
133  	u16 crypt_offs;		/* Cryption start offset */
134  #endif
135  	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
136  	u32 crypto_ctx;		/* NPE Crypto Param structure address */
137  
138  	/* Used by Host: 4*4 bytes*/
139  	unsigned int ctl_flags;
140  	union {
141  		struct skcipher_request *ablk_req;
142  		struct aead_request *aead_req;
143  		struct crypto_tfm *tfm;
144  	} data;
145  	struct buffer_desc *regist_buf;
146  	u8 *regist_ptr;
147  };
148  
149  struct ablk_ctx {
150  	struct buffer_desc *src;
151  	struct buffer_desc *dst;
152  	u8 iv[MAX_IVLEN];
153  	bool encrypt;
154  	struct skcipher_request fallback_req;   // keep at the end
155  };
156  
157  struct aead_ctx {
158  	struct buffer_desc *src;
159  	struct buffer_desc *dst;
160  	struct scatterlist ivlist;
161  	/* used when the hmac is not on one sg entry */
162  	u8 *hmac_virt;
163  	int encrypt;
164  };
165  
166  struct ix_hash_algo {
167  	u32 cfgword;
168  	unsigned char *icv;
169  };
170  
171  struct ix_sa_dir {
172  	unsigned char *npe_ctx;
173  	dma_addr_t npe_ctx_phys;
174  	int npe_ctx_idx;
175  	u8 npe_mode;
176  };
177  
178  struct ixp_ctx {
179  	struct ix_sa_dir encrypt;
180  	struct ix_sa_dir decrypt;
181  	int authkey_len;
182  	u8 authkey[MAX_KEYLEN];
183  	int enckey_len;
184  	u8 enckey[MAX_KEYLEN];
185  	u8 salt[MAX_IVLEN];
186  	u8 nonce[CTR_RFC3686_NONCE_SIZE];
187  	unsigned int salted;
188  	atomic_t configuring;
189  	struct completion completion;
190  	struct crypto_skcipher *fallback_tfm;
191  };
192  
193  struct ixp_alg {
194  	struct skcipher_alg crypto;
195  	const struct ix_hash_algo *hash;
196  	u32 cfg_enc;
197  	u32 cfg_dec;
198  
199  	int registered;
200  };
201  
202  struct ixp_aead_alg {
203  	struct aead_alg crypto;
204  	const struct ix_hash_algo *hash;
205  	u32 cfg_enc;
206  	u32 cfg_dec;
207  
208  	int registered;
209  };
210  
211  static const struct ix_hash_algo hash_alg_md5 = {
212  	.cfgword	= 0xAA010004,
213  	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
214  			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
215  };
216  
217  static const struct ix_hash_algo hash_alg_sha1 = {
218  	.cfgword	= 0x00000005,
219  	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
220  			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
221  };
222  
223  static struct npe *npe_c;
224  
225  static unsigned int send_qid;
226  static unsigned int recv_qid;
227  static struct dma_pool *buffer_pool;
228  static struct dma_pool *ctx_pool;
229  
230  static struct crypt_ctl *crypt_virt;
231  static dma_addr_t crypt_phys;
232  
233  static int support_aes = 1;
234  
235  static struct platform_device *pdev;
236  
crypt_virt2phys(struct crypt_ctl * virt)237  static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
238  {
239  	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
240  }
241  
crypt_phys2virt(dma_addr_t phys)242  static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
243  {
244  	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
245  }
246  
cipher_cfg_enc(struct crypto_tfm * tfm)247  static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
248  {
249  	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
250  }
251  
cipher_cfg_dec(struct crypto_tfm * tfm)252  static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
253  {
254  	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
255  }
256  
ix_hash(struct crypto_tfm * tfm)257  static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
258  {
259  	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
260  }
261  
setup_crypt_desc(void)262  static int setup_crypt_desc(void)
263  {
264  	struct device *dev = &pdev->dev;
265  
266  	BUILD_BUG_ON(!(IS_ENABLED(CONFIG_COMPILE_TEST) &&
267  		       IS_ENABLED(CONFIG_64BIT)) &&
268  		     sizeof(struct crypt_ctl) != 64);
269  	crypt_virt = dma_alloc_coherent(dev,
270  					NPE_QLEN * sizeof(struct crypt_ctl),
271  					&crypt_phys, GFP_ATOMIC);
272  	if (!crypt_virt)
273  		return -ENOMEM;
274  	return 0;
275  }
276  
277  static DEFINE_SPINLOCK(desc_lock);
get_crypt_desc(void)278  static struct crypt_ctl *get_crypt_desc(void)
279  {
280  	int i;
281  	static int idx;
282  	unsigned long flags;
283  
284  	spin_lock_irqsave(&desc_lock, flags);
285  
286  	if (unlikely(!crypt_virt))
287  		setup_crypt_desc();
288  	if (unlikely(!crypt_virt)) {
289  		spin_unlock_irqrestore(&desc_lock, flags);
290  		return NULL;
291  	}
292  	i = idx;
293  	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
294  		if (++idx >= NPE_QLEN)
295  			idx = 0;
296  		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
297  		spin_unlock_irqrestore(&desc_lock, flags);
298  		return crypt_virt + i;
299  	} else {
300  		spin_unlock_irqrestore(&desc_lock, flags);
301  		return NULL;
302  	}
303  }
304  
305  static DEFINE_SPINLOCK(emerg_lock);
get_crypt_desc_emerg(void)306  static struct crypt_ctl *get_crypt_desc_emerg(void)
307  {
308  	int i;
309  	static int idx = NPE_QLEN;
310  	struct crypt_ctl *desc;
311  	unsigned long flags;
312  
313  	desc = get_crypt_desc();
314  	if (desc)
315  		return desc;
316  	if (unlikely(!crypt_virt))
317  		return NULL;
318  
319  	spin_lock_irqsave(&emerg_lock, flags);
320  	i = idx;
321  	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
322  		if (++idx >= NPE_QLEN_TOTAL)
323  			idx = NPE_QLEN;
324  		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
325  		spin_unlock_irqrestore(&emerg_lock, flags);
326  		return crypt_virt + i;
327  	} else {
328  		spin_unlock_irqrestore(&emerg_lock, flags);
329  		return NULL;
330  	}
331  }
332  
free_buf_chain(struct device * dev,struct buffer_desc * buf,dma_addr_t phys)333  static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
334  			   dma_addr_t phys)
335  {
336  	while (buf) {
337  		struct buffer_desc *buf1;
338  		u32 phys1;
339  
340  		buf1 = buf->next;
341  		phys1 = buf->phys_next;
342  		dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
343  		dma_pool_free(buffer_pool, buf, phys);
344  		buf = buf1;
345  		phys = phys1;
346  	}
347  }
348  
349  static struct tasklet_struct crypto_done_tasklet;
350  
finish_scattered_hmac(struct crypt_ctl * crypt)351  static void finish_scattered_hmac(struct crypt_ctl *crypt)
352  {
353  	struct aead_request *req = crypt->data.aead_req;
354  	struct aead_ctx *req_ctx = aead_request_ctx(req);
355  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
356  	int authsize = crypto_aead_authsize(tfm);
357  	int decryptlen = req->assoclen + req->cryptlen - authsize;
358  
359  	if (req_ctx->encrypt) {
360  		scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
361  					 decryptlen, authsize, 1);
362  	}
363  	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
364  }
365  
one_packet(dma_addr_t phys)366  static void one_packet(dma_addr_t phys)
367  {
368  	struct device *dev = &pdev->dev;
369  	struct crypt_ctl *crypt;
370  	struct ixp_ctx *ctx;
371  	int failed;
372  
373  	failed = phys & 0x1 ? -EBADMSG : 0;
374  	phys &= ~0x3;
375  	crypt = crypt_phys2virt(phys);
376  
377  	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
378  	case CTL_FLAG_PERFORM_AEAD: {
379  		struct aead_request *req = crypt->data.aead_req;
380  		struct aead_ctx *req_ctx = aead_request_ctx(req);
381  
382  		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
383  		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
384  		if (req_ctx->hmac_virt)
385  			finish_scattered_hmac(crypt);
386  
387  		aead_request_complete(req, failed);
388  		break;
389  	}
390  	case CTL_FLAG_PERFORM_ABLK: {
391  		struct skcipher_request *req = crypt->data.ablk_req;
392  		struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
393  		struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
394  		unsigned int ivsize = crypto_skcipher_ivsize(tfm);
395  		unsigned int offset;
396  
397  		if (ivsize > 0) {
398  			offset = req->cryptlen - ivsize;
399  			if (req_ctx->encrypt) {
400  				scatterwalk_map_and_copy(req->iv, req->dst,
401  							 offset, ivsize, 0);
402  			} else {
403  				memcpy(req->iv, req_ctx->iv, ivsize);
404  				memzero_explicit(req_ctx->iv, ivsize);
405  			}
406  		}
407  
408  		if (req_ctx->dst)
409  			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
410  
411  		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
412  		skcipher_request_complete(req, failed);
413  		break;
414  	}
415  	case CTL_FLAG_GEN_ICV:
416  		ctx = crypto_tfm_ctx(crypt->data.tfm);
417  		dma_pool_free(ctx_pool, crypt->regist_ptr,
418  			      crypt->regist_buf->phys_addr);
419  		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
420  		if (atomic_dec_and_test(&ctx->configuring))
421  			complete(&ctx->completion);
422  		break;
423  	case CTL_FLAG_GEN_REVAES:
424  		ctx = crypto_tfm_ctx(crypt->data.tfm);
425  		*(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
426  		if (atomic_dec_and_test(&ctx->configuring))
427  			complete(&ctx->completion);
428  		break;
429  	default:
430  		BUG();
431  	}
432  	crypt->ctl_flags = CTL_FLAG_UNUSED;
433  }
434  
irqhandler(void * _unused)435  static void irqhandler(void *_unused)
436  {
437  	tasklet_schedule(&crypto_done_tasklet);
438  }
439  
crypto_done_action(unsigned long arg)440  static void crypto_done_action(unsigned long arg)
441  {
442  	int i;
443  
444  	for (i = 0; i < 4; i++) {
445  		dma_addr_t phys = qmgr_get_entry(recv_qid);
446  		if (!phys)
447  			return;
448  		one_packet(phys);
449  	}
450  	tasklet_schedule(&crypto_done_tasklet);
451  }
452  
init_ixp_crypto(struct device * dev)453  static int init_ixp_crypto(struct device *dev)
454  {
455  	struct device_node *np = dev->of_node;
456  	u32 msg[2] = { 0, 0 };
457  	int ret = -ENODEV;
458  	u32 npe_id;
459  
460  	dev_info(dev, "probing...\n");
461  
462  	/* Locate the NPE and queue manager to use from device tree */
463  	if (IS_ENABLED(CONFIG_OF) && np) {
464  		struct of_phandle_args queue_spec;
465  		struct of_phandle_args npe_spec;
466  
467  		ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
468  						       1, 0, &npe_spec);
469  		if (ret) {
470  			dev_err(dev, "no NPE engine specified\n");
471  			return -ENODEV;
472  		}
473  		npe_id = npe_spec.args[0];
474  		of_node_put(npe_spec.np);
475  
476  		ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
477  						       &queue_spec);
478  		if (ret) {
479  			dev_err(dev, "no rx queue phandle\n");
480  			return -ENODEV;
481  		}
482  		recv_qid = queue_spec.args[0];
483  		of_node_put(queue_spec.np);
484  
485  		ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
486  						       &queue_spec);
487  		if (ret) {
488  			dev_err(dev, "no txready queue phandle\n");
489  			return -ENODEV;
490  		}
491  		send_qid = queue_spec.args[0];
492  		of_node_put(queue_spec.np);
493  	} else {
494  		/*
495  		 * Hardcoded engine when using platform data, this goes away
496  		 * when we switch to using DT only.
497  		 */
498  		npe_id = 2;
499  		send_qid = 29;
500  		recv_qid = 30;
501  	}
502  
503  	npe_c = npe_request(npe_id);
504  	if (!npe_c)
505  		return ret;
506  
507  	if (!npe_running(npe_c)) {
508  		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
509  		if (ret)
510  			goto npe_release;
511  		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
512  			goto npe_error;
513  	} else {
514  		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
515  			goto npe_error;
516  
517  		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
518  			goto npe_error;
519  	}
520  
521  	switch ((msg[1] >> 16) & 0xff) {
522  	case 3:
523  		dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
524  		support_aes = 0;
525  		break;
526  	case 4:
527  	case 5:
528  		support_aes = 1;
529  		break;
530  	default:
531  		dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
532  		ret = -ENODEV;
533  		goto npe_release;
534  	}
535  	/* buffer_pool will also be used to sometimes store the hmac,
536  	 * so assure it is large enough
537  	 */
538  	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
539  	buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
540  				      32, 0);
541  	ret = -ENOMEM;
542  	if (!buffer_pool)
543  		goto err;
544  
545  	ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
546  	if (!ctx_pool)
547  		goto err;
548  
549  	ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
550  				 "ixp_crypto:out", NULL);
551  	if (ret)
552  		goto err;
553  	ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
554  				 "ixp_crypto:in", NULL);
555  	if (ret) {
556  		qmgr_release_queue(send_qid);
557  		goto err;
558  	}
559  	qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
560  	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
561  
562  	qmgr_enable_irq(recv_qid);
563  	return 0;
564  
565  npe_error:
566  	dev_err(dev, "%s not responding\n", npe_name(npe_c));
567  	ret = -EIO;
568  err:
569  	dma_pool_destroy(ctx_pool);
570  	dma_pool_destroy(buffer_pool);
571  npe_release:
572  	npe_release(npe_c);
573  	return ret;
574  }
575  
release_ixp_crypto(struct device * dev)576  static void release_ixp_crypto(struct device *dev)
577  {
578  	qmgr_disable_irq(recv_qid);
579  	tasklet_kill(&crypto_done_tasklet);
580  
581  	qmgr_release_queue(send_qid);
582  	qmgr_release_queue(recv_qid);
583  
584  	dma_pool_destroy(ctx_pool);
585  	dma_pool_destroy(buffer_pool);
586  
587  	npe_release(npe_c);
588  
589  	if (crypt_virt)
590  		dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
591  				  crypt_virt, crypt_phys);
592  }
593  
reset_sa_dir(struct ix_sa_dir * dir)594  static void reset_sa_dir(struct ix_sa_dir *dir)
595  {
596  	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
597  	dir->npe_ctx_idx = 0;
598  	dir->npe_mode = 0;
599  }
600  
init_sa_dir(struct ix_sa_dir * dir)601  static int init_sa_dir(struct ix_sa_dir *dir)
602  {
603  	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
604  	if (!dir->npe_ctx)
605  		return -ENOMEM;
606  
607  	reset_sa_dir(dir);
608  	return 0;
609  }
610  
free_sa_dir(struct ix_sa_dir * dir)611  static void free_sa_dir(struct ix_sa_dir *dir)
612  {
613  	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
614  	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
615  }
616  
init_tfm(struct crypto_tfm * tfm)617  static int init_tfm(struct crypto_tfm *tfm)
618  {
619  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
620  	int ret;
621  
622  	atomic_set(&ctx->configuring, 0);
623  	ret = init_sa_dir(&ctx->encrypt);
624  	if (ret)
625  		return ret;
626  	ret = init_sa_dir(&ctx->decrypt);
627  	if (ret)
628  		free_sa_dir(&ctx->encrypt);
629  
630  	return ret;
631  }
632  
init_tfm_ablk(struct crypto_skcipher * tfm)633  static int init_tfm_ablk(struct crypto_skcipher *tfm)
634  {
635  	struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
636  	struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
637  	const char *name = crypto_tfm_alg_name(ctfm);
638  
639  	ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
640  	if (IS_ERR(ctx->fallback_tfm)) {
641  		pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
642  			name, PTR_ERR(ctx->fallback_tfm));
643  		return PTR_ERR(ctx->fallback_tfm);
644  	}
645  
646  	pr_info("Fallback for %s is %s\n",
647  		 crypto_tfm_alg_driver_name(&tfm->base),
648  		 crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
649  		 );
650  
651  	crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
652  	return init_tfm(crypto_skcipher_tfm(tfm));
653  }
654  
init_tfm_aead(struct crypto_aead * tfm)655  static int init_tfm_aead(struct crypto_aead *tfm)
656  {
657  	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
658  	return init_tfm(crypto_aead_tfm(tfm));
659  }
660  
exit_tfm(struct crypto_tfm * tfm)661  static void exit_tfm(struct crypto_tfm *tfm)
662  {
663  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
664  
665  	free_sa_dir(&ctx->encrypt);
666  	free_sa_dir(&ctx->decrypt);
667  }
668  
exit_tfm_ablk(struct crypto_skcipher * tfm)669  static void exit_tfm_ablk(struct crypto_skcipher *tfm)
670  {
671  	struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
672  	struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
673  
674  	crypto_free_skcipher(ctx->fallback_tfm);
675  	exit_tfm(crypto_skcipher_tfm(tfm));
676  }
677  
exit_tfm_aead(struct crypto_aead * tfm)678  static void exit_tfm_aead(struct crypto_aead *tfm)
679  {
680  	exit_tfm(crypto_aead_tfm(tfm));
681  }
682  
register_chain_var(struct crypto_tfm * tfm,u8 xpad,u32 target,int init_len,u32 ctx_addr,const u8 * key,int key_len)683  static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
684  			      int init_len, u32 ctx_addr, const u8 *key,
685  			      int key_len)
686  {
687  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
688  	struct crypt_ctl *crypt;
689  	struct buffer_desc *buf;
690  	int i;
691  	u8 *pad;
692  	dma_addr_t pad_phys, buf_phys;
693  
694  	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
695  	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
696  	if (!pad)
697  		return -ENOMEM;
698  	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
699  	if (!buf) {
700  		dma_pool_free(ctx_pool, pad, pad_phys);
701  		return -ENOMEM;
702  	}
703  	crypt = get_crypt_desc_emerg();
704  	if (!crypt) {
705  		dma_pool_free(ctx_pool, pad, pad_phys);
706  		dma_pool_free(buffer_pool, buf, buf_phys);
707  		return -EAGAIN;
708  	}
709  
710  	memcpy(pad, key, key_len);
711  	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
712  	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
713  		pad[i] ^= xpad;
714  
715  	crypt->data.tfm = tfm;
716  	crypt->regist_ptr = pad;
717  	crypt->regist_buf = buf;
718  
719  	crypt->auth_offs = 0;
720  	crypt->auth_len = HMAC_PAD_BLOCKLEN;
721  	crypt->crypto_ctx = ctx_addr;
722  	crypt->src_buf = buf_phys;
723  	crypt->icv_rev_aes = target;
724  	crypt->mode = NPE_OP_HASH_GEN_ICV;
725  	crypt->init_len = init_len;
726  	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
727  
728  	buf->next = NULL;
729  	buf->buf_len = HMAC_PAD_BLOCKLEN;
730  	buf->pkt_len = 0;
731  	buf->phys_addr = pad_phys;
732  
733  	atomic_inc(&ctx->configuring);
734  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
735  	BUG_ON(qmgr_stat_overflow(send_qid));
736  	return 0;
737  }
738  
setup_auth(struct crypto_tfm * tfm,int encrypt,unsigned int authsize,const u8 * key,int key_len,unsigned int digest_len)739  static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
740  		      const u8 *key, int key_len, unsigned int digest_len)
741  {
742  	u32 itarget, otarget, npe_ctx_addr;
743  	unsigned char *cinfo;
744  	int init_len, ret = 0;
745  	u32 cfgword;
746  	struct ix_sa_dir *dir;
747  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
748  	const struct ix_hash_algo *algo;
749  
750  	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
751  	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
752  	algo = ix_hash(tfm);
753  
754  	/* write cfg word to cryptinfo */
755  	cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
756  #ifndef __ARMEB__
757  	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
758  #endif
759  	*(__be32 *)cinfo = cpu_to_be32(cfgword);
760  	cinfo += sizeof(cfgword);
761  
762  	/* write ICV to cryptinfo */
763  	memcpy(cinfo, algo->icv, digest_len);
764  	cinfo += digest_len;
765  
766  	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
767  				+ sizeof(algo->cfgword);
768  	otarget = itarget + digest_len;
769  	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
770  	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
771  
772  	dir->npe_ctx_idx += init_len;
773  	dir->npe_mode |= NPE_OP_HASH_ENABLE;
774  
775  	if (!encrypt)
776  		dir->npe_mode |= NPE_OP_HASH_VERIFY;
777  
778  	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
779  				 init_len, npe_ctx_addr, key, key_len);
780  	if (ret)
781  		return ret;
782  	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
783  				  init_len, npe_ctx_addr, key, key_len);
784  }
785  
gen_rev_aes_key(struct crypto_tfm * tfm)786  static int gen_rev_aes_key(struct crypto_tfm *tfm)
787  {
788  	struct crypt_ctl *crypt;
789  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
790  	struct ix_sa_dir *dir = &ctx->decrypt;
791  
792  	crypt = get_crypt_desc_emerg();
793  	if (!crypt)
794  		return -EAGAIN;
795  
796  	*(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
797  
798  	crypt->data.tfm = tfm;
799  	crypt->crypt_offs = 0;
800  	crypt->crypt_len = AES_BLOCK128;
801  	crypt->src_buf = 0;
802  	crypt->crypto_ctx = dir->npe_ctx_phys;
803  	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
804  	crypt->mode = NPE_OP_ENC_GEN_KEY;
805  	crypt->init_len = dir->npe_ctx_idx;
806  	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
807  
808  	atomic_inc(&ctx->configuring);
809  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
810  	BUG_ON(qmgr_stat_overflow(send_qid));
811  	return 0;
812  }
813  
setup_cipher(struct crypto_tfm * tfm,int encrypt,const u8 * key,int key_len)814  static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
815  			int key_len)
816  {
817  	u8 *cinfo;
818  	u32 cipher_cfg;
819  	u32 keylen_cfg = 0;
820  	struct ix_sa_dir *dir;
821  	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
822  	int err;
823  
824  	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
825  	cinfo = dir->npe_ctx;
826  
827  	if (encrypt) {
828  		cipher_cfg = cipher_cfg_enc(tfm);
829  		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
830  	} else {
831  		cipher_cfg = cipher_cfg_dec(tfm);
832  	}
833  	if (cipher_cfg & MOD_AES) {
834  		switch (key_len) {
835  		case 16:
836  			keylen_cfg = MOD_AES128;
837  			break;
838  		case 24:
839  			keylen_cfg = MOD_AES192;
840  			break;
841  		case 32:
842  			keylen_cfg = MOD_AES256;
843  			break;
844  		default:
845  			return -EINVAL;
846  		}
847  		cipher_cfg |= keylen_cfg;
848  	} else {
849  		err = crypto_des_verify_key(tfm, key);
850  		if (err)
851  			return err;
852  	}
853  	/* write cfg word to cryptinfo */
854  	*(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
855  	cinfo += sizeof(cipher_cfg);
856  
857  	/* write cipher key to cryptinfo */
858  	memcpy(cinfo, key, key_len);
859  	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
860  	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
861  		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
862  		key_len = DES3_EDE_KEY_SIZE;
863  	}
864  	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
865  	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
866  	if ((cipher_cfg & MOD_AES) && !encrypt)
867  		return gen_rev_aes_key(tfm);
868  
869  	return 0;
870  }
871  
chainup_buffers(struct device * dev,struct scatterlist * sg,unsigned int nbytes,struct buffer_desc * buf,gfp_t flags,enum dma_data_direction dir)872  static struct buffer_desc *chainup_buffers(struct device *dev,
873  		struct scatterlist *sg,	unsigned int nbytes,
874  		struct buffer_desc *buf, gfp_t flags,
875  		enum dma_data_direction dir)
876  {
877  	for (; nbytes > 0; sg = sg_next(sg)) {
878  		unsigned int len = min(nbytes, sg->length);
879  		struct buffer_desc *next_buf;
880  		dma_addr_t next_buf_phys;
881  		void *ptr;
882  
883  		nbytes -= len;
884  		ptr = sg_virt(sg);
885  		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
886  		if (!next_buf) {
887  			buf = NULL;
888  			break;
889  		}
890  		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
891  		buf->next = next_buf;
892  		buf->phys_next = next_buf_phys;
893  		buf = next_buf;
894  
895  		buf->phys_addr = sg_dma_address(sg);
896  		buf->buf_len = len;
897  		buf->dir = dir;
898  	}
899  	buf->next = NULL;
900  	buf->phys_next = 0;
901  	return buf;
902  }
903  
ablk_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)904  static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
905  		       unsigned int key_len)
906  {
907  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
908  	int ret;
909  
910  	init_completion(&ctx->completion);
911  	atomic_inc(&ctx->configuring);
912  
913  	reset_sa_dir(&ctx->encrypt);
914  	reset_sa_dir(&ctx->decrypt);
915  
916  	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
917  	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
918  
919  	ret = setup_cipher(&tfm->base, 0, key, key_len);
920  	if (ret)
921  		goto out;
922  	ret = setup_cipher(&tfm->base, 1, key, key_len);
923  out:
924  	if (!atomic_dec_and_test(&ctx->configuring))
925  		wait_for_completion(&ctx->completion);
926  	if (ret)
927  		return ret;
928  	crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
929  	crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
930  
931  	return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
932  }
933  
ablk_des3_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)934  static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
935  			    unsigned int key_len)
936  {
937  	return verify_skcipher_des3_key(tfm, key) ?:
938  	       ablk_setkey(tfm, key, key_len);
939  }
940  
ablk_rfc3686_setkey(struct crypto_skcipher * tfm,const u8 * key,unsigned int key_len)941  static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
942  			       unsigned int key_len)
943  {
944  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
945  
946  	/* the nonce is stored in bytes at end of key */
947  	if (key_len < CTR_RFC3686_NONCE_SIZE)
948  		return -EINVAL;
949  
950  	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
951  	       CTR_RFC3686_NONCE_SIZE);
952  
953  	key_len -= CTR_RFC3686_NONCE_SIZE;
954  	return ablk_setkey(tfm, key, key_len);
955  }
956  
ixp4xx_cipher_fallback(struct skcipher_request * areq,int encrypt)957  static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
958  {
959  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
960  	struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
961  	struct ablk_ctx *rctx = skcipher_request_ctx(areq);
962  	int err;
963  
964  	skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
965  	skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
966  				      areq->base.complete, areq->base.data);
967  	skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
968  				   areq->cryptlen, areq->iv);
969  	if (encrypt)
970  		err = crypto_skcipher_encrypt(&rctx->fallback_req);
971  	else
972  		err = crypto_skcipher_decrypt(&rctx->fallback_req);
973  	return err;
974  }
975  
ablk_perform(struct skcipher_request * req,int encrypt)976  static int ablk_perform(struct skcipher_request *req, int encrypt)
977  {
978  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
979  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
980  	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
981  	struct ix_sa_dir *dir;
982  	struct crypt_ctl *crypt;
983  	unsigned int nbytes = req->cryptlen;
984  	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
985  	struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
986  	struct buffer_desc src_hook;
987  	struct device *dev = &pdev->dev;
988  	unsigned int offset;
989  	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
990  				GFP_KERNEL : GFP_ATOMIC;
991  
992  	if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
993  		return ixp4xx_cipher_fallback(req, encrypt);
994  
995  	if (qmgr_stat_full(send_qid))
996  		return -EAGAIN;
997  	if (atomic_read(&ctx->configuring))
998  		return -EAGAIN;
999  
1000  	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
1001  	req_ctx->encrypt = encrypt;
1002  
1003  	crypt = get_crypt_desc();
1004  	if (!crypt)
1005  		return -ENOMEM;
1006  
1007  	crypt->data.ablk_req = req;
1008  	crypt->crypto_ctx = dir->npe_ctx_phys;
1009  	crypt->mode = dir->npe_mode;
1010  	crypt->init_len = dir->npe_ctx_idx;
1011  
1012  	crypt->crypt_offs = 0;
1013  	crypt->crypt_len = nbytes;
1014  
1015  	BUG_ON(ivsize && !req->iv);
1016  	memcpy(crypt->iv, req->iv, ivsize);
1017  	if (ivsize > 0 && !encrypt) {
1018  		offset = req->cryptlen - ivsize;
1019  		scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
1020  	}
1021  	if (req->src != req->dst) {
1022  		struct buffer_desc dst_hook;
1023  
1024  		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1025  		/* This was never tested by Intel
1026  		 * for more than one dst buffer, I think. */
1027  		req_ctx->dst = NULL;
1028  		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
1029  				     flags, DMA_FROM_DEVICE))
1030  			goto free_buf_dest;
1031  		src_direction = DMA_TO_DEVICE;
1032  		req_ctx->dst = dst_hook.next;
1033  		crypt->dst_buf = dst_hook.phys_next;
1034  	} else {
1035  		req_ctx->dst = NULL;
1036  	}
1037  	req_ctx->src = NULL;
1038  	if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
1039  			     src_direction))
1040  		goto free_buf_src;
1041  
1042  	req_ctx->src = src_hook.next;
1043  	crypt->src_buf = src_hook.phys_next;
1044  	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
1045  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1046  	BUG_ON(qmgr_stat_overflow(send_qid));
1047  	return -EINPROGRESS;
1048  
1049  free_buf_src:
1050  	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1051  free_buf_dest:
1052  	if (req->src != req->dst)
1053  		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1054  
1055  	crypt->ctl_flags = CTL_FLAG_UNUSED;
1056  	return -ENOMEM;
1057  }
1058  
ablk_encrypt(struct skcipher_request * req)1059  static int ablk_encrypt(struct skcipher_request *req)
1060  {
1061  	return ablk_perform(req, 1);
1062  }
1063  
ablk_decrypt(struct skcipher_request * req)1064  static int ablk_decrypt(struct skcipher_request *req)
1065  {
1066  	return ablk_perform(req, 0);
1067  }
1068  
ablk_rfc3686_crypt(struct skcipher_request * req)1069  static int ablk_rfc3686_crypt(struct skcipher_request *req)
1070  {
1071  	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1072  	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
1073  	u8 iv[CTR_RFC3686_BLOCK_SIZE];
1074  	u8 *info = req->iv;
1075  	int ret;
1076  
1077  	/* set up counter block */
1078  	memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
1079  	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
1080  
1081  	/* initialize counter portion of counter block */
1082  	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
1083  		cpu_to_be32(1);
1084  
1085  	req->iv = iv;
1086  	ret = ablk_perform(req, 1);
1087  	req->iv = info;
1088  	return ret;
1089  }
1090  
aead_perform(struct aead_request * req,int encrypt,int cryptoffset,int eff_cryptlen,u8 * iv)1091  static int aead_perform(struct aead_request *req, int encrypt,
1092  			int cryptoffset, int eff_cryptlen, u8 *iv)
1093  {
1094  	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1095  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1096  	unsigned int ivsize = crypto_aead_ivsize(tfm);
1097  	unsigned int authsize = crypto_aead_authsize(tfm);
1098  	struct ix_sa_dir *dir;
1099  	struct crypt_ctl *crypt;
1100  	unsigned int cryptlen;
1101  	struct buffer_desc *buf, src_hook;
1102  	struct aead_ctx *req_ctx = aead_request_ctx(req);
1103  	struct device *dev = &pdev->dev;
1104  	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1105  				GFP_KERNEL : GFP_ATOMIC;
1106  	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1107  	unsigned int lastlen;
1108  
1109  	if (qmgr_stat_full(send_qid))
1110  		return -EAGAIN;
1111  	if (atomic_read(&ctx->configuring))
1112  		return -EAGAIN;
1113  
1114  	if (encrypt) {
1115  		dir = &ctx->encrypt;
1116  		cryptlen = req->cryptlen;
1117  	} else {
1118  		dir = &ctx->decrypt;
1119  		/* req->cryptlen includes the authsize when decrypting */
1120  		cryptlen = req->cryptlen - authsize;
1121  		eff_cryptlen -= authsize;
1122  	}
1123  	crypt = get_crypt_desc();
1124  	if (!crypt)
1125  		return -ENOMEM;
1126  
1127  	crypt->data.aead_req = req;
1128  	crypt->crypto_ctx = dir->npe_ctx_phys;
1129  	crypt->mode = dir->npe_mode;
1130  	crypt->init_len = dir->npe_ctx_idx;
1131  
1132  	crypt->crypt_offs = cryptoffset;
1133  	crypt->crypt_len = eff_cryptlen;
1134  
1135  	crypt->auth_offs = 0;
1136  	crypt->auth_len = req->assoclen + cryptlen;
1137  	BUG_ON(ivsize && !req->iv);
1138  	memcpy(crypt->iv, req->iv, ivsize);
1139  
1140  	buf = chainup_buffers(dev, req->src, crypt->auth_len,
1141  			      &src_hook, flags, src_direction);
1142  	req_ctx->src = src_hook.next;
1143  	crypt->src_buf = src_hook.phys_next;
1144  	if (!buf)
1145  		goto free_buf_src;
1146  
1147  	lastlen = buf->buf_len;
1148  	if (lastlen >= authsize)
1149  		crypt->icv_rev_aes = buf->phys_addr +
1150  				     buf->buf_len - authsize;
1151  
1152  	req_ctx->dst = NULL;
1153  
1154  	if (req->src != req->dst) {
1155  		struct buffer_desc dst_hook;
1156  
1157  		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1158  		src_direction = DMA_TO_DEVICE;
1159  
1160  		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1161  				      &dst_hook, flags, DMA_FROM_DEVICE);
1162  		req_ctx->dst = dst_hook.next;
1163  		crypt->dst_buf = dst_hook.phys_next;
1164  
1165  		if (!buf)
1166  			goto free_buf_dst;
1167  
1168  		if (encrypt) {
1169  			lastlen = buf->buf_len;
1170  			if (lastlen >= authsize)
1171  				crypt->icv_rev_aes = buf->phys_addr +
1172  						     buf->buf_len - authsize;
1173  		}
1174  	}
1175  
1176  	if (unlikely(lastlen < authsize)) {
1177  		dma_addr_t dma;
1178  		/* The 12 hmac bytes are scattered,
1179  		 * we need to copy them into a safe buffer */
1180  		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags, &dma);
1181  		if (unlikely(!req_ctx->hmac_virt))
1182  			goto free_buf_dst;
1183  		crypt->icv_rev_aes = dma;
1184  		if (!encrypt) {
1185  			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1186  						 req->src, cryptlen, authsize, 0);
1187  		}
1188  		req_ctx->encrypt = encrypt;
1189  	} else {
1190  		req_ctx->hmac_virt = NULL;
1191  	}
1192  
1193  	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1194  	qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
1195  	BUG_ON(qmgr_stat_overflow(send_qid));
1196  	return -EINPROGRESS;
1197  
1198  free_buf_dst:
1199  	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1200  free_buf_src:
1201  	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1202  	crypt->ctl_flags = CTL_FLAG_UNUSED;
1203  	return -ENOMEM;
1204  }
1205  
aead_setup(struct crypto_aead * tfm,unsigned int authsize)1206  static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1207  {
1208  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1209  	unsigned int digest_len = crypto_aead_maxauthsize(tfm);
1210  	int ret;
1211  
1212  	if (!ctx->enckey_len && !ctx->authkey_len)
1213  		return 0;
1214  	init_completion(&ctx->completion);
1215  	atomic_inc(&ctx->configuring);
1216  
1217  	reset_sa_dir(&ctx->encrypt);
1218  	reset_sa_dir(&ctx->decrypt);
1219  
1220  	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1221  	if (ret)
1222  		goto out;
1223  	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1224  	if (ret)
1225  		goto out;
1226  	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1227  			 ctx->authkey_len, digest_len);
1228  	if (ret)
1229  		goto out;
1230  	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1231  			 ctx->authkey_len, digest_len);
1232  out:
1233  	if (!atomic_dec_and_test(&ctx->configuring))
1234  		wait_for_completion(&ctx->completion);
1235  	return ret;
1236  }
1237  
aead_setauthsize(struct crypto_aead * tfm,unsigned int authsize)1238  static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1239  {
1240  	int max = crypto_aead_maxauthsize(tfm) >> 2;
1241  
1242  	if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
1243  		return -EINVAL;
1244  	return aead_setup(tfm, authsize);
1245  }
1246  
aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1247  static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1248  		       unsigned int keylen)
1249  {
1250  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1251  	struct crypto_authenc_keys keys;
1252  
1253  	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1254  		goto badkey;
1255  
1256  	if (keys.authkeylen > sizeof(ctx->authkey))
1257  		goto badkey;
1258  
1259  	if (keys.enckeylen > sizeof(ctx->enckey))
1260  		goto badkey;
1261  
1262  	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1263  	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1264  	ctx->authkey_len = keys.authkeylen;
1265  	ctx->enckey_len = keys.enckeylen;
1266  
1267  	memzero_explicit(&keys, sizeof(keys));
1268  	return aead_setup(tfm, crypto_aead_authsize(tfm));
1269  badkey:
1270  	memzero_explicit(&keys, sizeof(keys));
1271  	return -EINVAL;
1272  }
1273  
des3_aead_setkey(struct crypto_aead * tfm,const u8 * key,unsigned int keylen)1274  static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1275  			    unsigned int keylen)
1276  {
1277  	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1278  	struct crypto_authenc_keys keys;
1279  	int err;
1280  
1281  	err = crypto_authenc_extractkeys(&keys, key, keylen);
1282  	if (unlikely(err))
1283  		goto badkey;
1284  
1285  	err = -EINVAL;
1286  	if (keys.authkeylen > sizeof(ctx->authkey))
1287  		goto badkey;
1288  
1289  	err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1290  	if (err)
1291  		goto badkey;
1292  
1293  	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1294  	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1295  	ctx->authkey_len = keys.authkeylen;
1296  	ctx->enckey_len = keys.enckeylen;
1297  
1298  	memzero_explicit(&keys, sizeof(keys));
1299  	return aead_setup(tfm, crypto_aead_authsize(tfm));
1300  badkey:
1301  	memzero_explicit(&keys, sizeof(keys));
1302  	return err;
1303  }
1304  
aead_encrypt(struct aead_request * req)1305  static int aead_encrypt(struct aead_request *req)
1306  {
1307  	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1308  }
1309  
aead_decrypt(struct aead_request * req)1310  static int aead_decrypt(struct aead_request *req)
1311  {
1312  	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1313  }
1314  
1315  static struct ixp_alg ixp4xx_algos[] = {
1316  {
1317  	.crypto	= {
1318  		.base.cra_name		= "cbc(des)",
1319  		.base.cra_blocksize	= DES_BLOCK_SIZE,
1320  
1321  		.min_keysize		= DES_KEY_SIZE,
1322  		.max_keysize		= DES_KEY_SIZE,
1323  		.ivsize			= DES_BLOCK_SIZE,
1324  	},
1325  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1326  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1327  
1328  }, {
1329  	.crypto	= {
1330  		.base.cra_name		= "ecb(des)",
1331  		.base.cra_blocksize	= DES_BLOCK_SIZE,
1332  		.min_keysize		= DES_KEY_SIZE,
1333  		.max_keysize		= DES_KEY_SIZE,
1334  	},
1335  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1336  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1337  }, {
1338  	.crypto	= {
1339  		.base.cra_name		= "cbc(des3_ede)",
1340  		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1341  
1342  		.min_keysize		= DES3_EDE_KEY_SIZE,
1343  		.max_keysize		= DES3_EDE_KEY_SIZE,
1344  		.ivsize			= DES3_EDE_BLOCK_SIZE,
1345  		.setkey			= ablk_des3_setkey,
1346  	},
1347  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1348  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1349  }, {
1350  	.crypto	= {
1351  		.base.cra_name		= "ecb(des3_ede)",
1352  		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1353  
1354  		.min_keysize		= DES3_EDE_KEY_SIZE,
1355  		.max_keysize		= DES3_EDE_KEY_SIZE,
1356  		.setkey			= ablk_des3_setkey,
1357  	},
1358  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1359  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1360  }, {
1361  	.crypto	= {
1362  		.base.cra_name		= "cbc(aes)",
1363  		.base.cra_blocksize	= AES_BLOCK_SIZE,
1364  
1365  		.min_keysize		= AES_MIN_KEY_SIZE,
1366  		.max_keysize		= AES_MAX_KEY_SIZE,
1367  		.ivsize			= AES_BLOCK_SIZE,
1368  	},
1369  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1370  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1371  }, {
1372  	.crypto	= {
1373  		.base.cra_name		= "ecb(aes)",
1374  		.base.cra_blocksize	= AES_BLOCK_SIZE,
1375  
1376  		.min_keysize		= AES_MIN_KEY_SIZE,
1377  		.max_keysize		= AES_MAX_KEY_SIZE,
1378  	},
1379  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1380  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1381  }, {
1382  	.crypto	= {
1383  		.base.cra_name		= "ctr(aes)",
1384  		.base.cra_blocksize	= 1,
1385  
1386  		.min_keysize		= AES_MIN_KEY_SIZE,
1387  		.max_keysize		= AES_MAX_KEY_SIZE,
1388  		.ivsize			= AES_BLOCK_SIZE,
1389  	},
1390  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1391  	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1392  }, {
1393  	.crypto	= {
1394  		.base.cra_name		= "rfc3686(ctr(aes))",
1395  		.base.cra_blocksize	= 1,
1396  
1397  		.min_keysize		= AES_MIN_KEY_SIZE,
1398  		.max_keysize		= AES_MAX_KEY_SIZE,
1399  		.ivsize			= AES_BLOCK_SIZE,
1400  		.setkey			= ablk_rfc3686_setkey,
1401  		.encrypt		= ablk_rfc3686_crypt,
1402  		.decrypt		= ablk_rfc3686_crypt,
1403  	},
1404  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1405  	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1406  } };
1407  
1408  static struct ixp_aead_alg ixp4xx_aeads[] = {
1409  {
1410  	.crypto	= {
1411  		.base = {
1412  			.cra_name	= "authenc(hmac(md5),cbc(des))",
1413  			.cra_blocksize	= DES_BLOCK_SIZE,
1414  		},
1415  		.ivsize		= DES_BLOCK_SIZE,
1416  		.maxauthsize	= MD5_DIGEST_SIZE,
1417  	},
1418  	.hash = &hash_alg_md5,
1419  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1420  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1421  }, {
1422  	.crypto	= {
1423  		.base = {
1424  			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1425  			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1426  		},
1427  		.ivsize		= DES3_EDE_BLOCK_SIZE,
1428  		.maxauthsize	= MD5_DIGEST_SIZE,
1429  		.setkey		= des3_aead_setkey,
1430  	},
1431  	.hash = &hash_alg_md5,
1432  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1433  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1434  }, {
1435  	.crypto	= {
1436  		.base = {
1437  			.cra_name	= "authenc(hmac(sha1),cbc(des))",
1438  			.cra_blocksize	= DES_BLOCK_SIZE,
1439  		},
1440  			.ivsize		= DES_BLOCK_SIZE,
1441  			.maxauthsize	= SHA1_DIGEST_SIZE,
1442  	},
1443  	.hash = &hash_alg_sha1,
1444  	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1445  	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1446  }, {
1447  	.crypto	= {
1448  		.base = {
1449  			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1450  			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1451  		},
1452  		.ivsize		= DES3_EDE_BLOCK_SIZE,
1453  		.maxauthsize	= SHA1_DIGEST_SIZE,
1454  		.setkey		= des3_aead_setkey,
1455  	},
1456  	.hash = &hash_alg_sha1,
1457  	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1458  	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1459  }, {
1460  	.crypto	= {
1461  		.base = {
1462  			.cra_name	= "authenc(hmac(md5),cbc(aes))",
1463  			.cra_blocksize	= AES_BLOCK_SIZE,
1464  		},
1465  		.ivsize		= AES_BLOCK_SIZE,
1466  		.maxauthsize	= MD5_DIGEST_SIZE,
1467  	},
1468  	.hash = &hash_alg_md5,
1469  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1470  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1471  }, {
1472  	.crypto	= {
1473  		.base = {
1474  			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1475  			.cra_blocksize	= AES_BLOCK_SIZE,
1476  		},
1477  		.ivsize		= AES_BLOCK_SIZE,
1478  		.maxauthsize	= SHA1_DIGEST_SIZE,
1479  	},
1480  	.hash = &hash_alg_sha1,
1481  	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1482  	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1483  } };
1484  
1485  #define IXP_POSTFIX "-ixp4xx"
1486  
ixp_crypto_probe(struct platform_device * _pdev)1487  static int ixp_crypto_probe(struct platform_device *_pdev)
1488  {
1489  	struct device *dev = &_pdev->dev;
1490  	int num = ARRAY_SIZE(ixp4xx_algos);
1491  	int i, err;
1492  
1493  	pdev = _pdev;
1494  
1495  	err = init_ixp_crypto(dev);
1496  	if (err)
1497  		return err;
1498  
1499  	for (i = 0; i < num; i++) {
1500  		struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1501  
1502  		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1503  			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1504  			     CRYPTO_MAX_ALG_NAME)
1505  			continue;
1506  		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1507  			continue;
1508  
1509  		/* block ciphers */
1510  		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1511  				      CRYPTO_ALG_ASYNC |
1512  				      CRYPTO_ALG_ALLOCATES_MEMORY |
1513  				      CRYPTO_ALG_NEED_FALLBACK;
1514  		if (!cra->setkey)
1515  			cra->setkey = ablk_setkey;
1516  		if (!cra->encrypt)
1517  			cra->encrypt = ablk_encrypt;
1518  		if (!cra->decrypt)
1519  			cra->decrypt = ablk_decrypt;
1520  		cra->init = init_tfm_ablk;
1521  		cra->exit = exit_tfm_ablk;
1522  
1523  		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1524  		cra->base.cra_module = THIS_MODULE;
1525  		cra->base.cra_alignmask = 3;
1526  		cra->base.cra_priority = 300;
1527  		if (crypto_register_skcipher(cra))
1528  			dev_err(&pdev->dev, "Failed to register '%s'\n",
1529  				cra->base.cra_name);
1530  		else
1531  			ixp4xx_algos[i].registered = 1;
1532  	}
1533  
1534  	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1535  		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1536  
1537  		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1538  			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1539  		    CRYPTO_MAX_ALG_NAME)
1540  			continue;
1541  		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1542  			continue;
1543  
1544  		/* authenc */
1545  		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1546  				      CRYPTO_ALG_ASYNC |
1547  				      CRYPTO_ALG_ALLOCATES_MEMORY;
1548  		cra->setkey = cra->setkey ?: aead_setkey;
1549  		cra->setauthsize = aead_setauthsize;
1550  		cra->encrypt = aead_encrypt;
1551  		cra->decrypt = aead_decrypt;
1552  		cra->init = init_tfm_aead;
1553  		cra->exit = exit_tfm_aead;
1554  
1555  		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1556  		cra->base.cra_module = THIS_MODULE;
1557  		cra->base.cra_alignmask = 3;
1558  		cra->base.cra_priority = 300;
1559  
1560  		if (crypto_register_aead(cra))
1561  			dev_err(&pdev->dev, "Failed to register '%s'\n",
1562  				cra->base.cra_driver_name);
1563  		else
1564  			ixp4xx_aeads[i].registered = 1;
1565  	}
1566  	return 0;
1567  }
1568  
ixp_crypto_remove(struct platform_device * pdev)1569  static int ixp_crypto_remove(struct platform_device *pdev)
1570  {
1571  	int num = ARRAY_SIZE(ixp4xx_algos);
1572  	int i;
1573  
1574  	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1575  		if (ixp4xx_aeads[i].registered)
1576  			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1577  	}
1578  
1579  	for (i = 0; i < num; i++) {
1580  		if (ixp4xx_algos[i].registered)
1581  			crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1582  	}
1583  	release_ixp_crypto(&pdev->dev);
1584  
1585  	return 0;
1586  }
1587  static const struct of_device_id ixp4xx_crypto_of_match[] = {
1588  	{
1589  		.compatible = "intel,ixp4xx-crypto",
1590  	},
1591  	{},
1592  };
1593  
1594  static struct platform_driver ixp_crypto_driver = {
1595  	.probe = ixp_crypto_probe,
1596  	.remove = ixp_crypto_remove,
1597  	.driver = {
1598  		.name = "ixp4xx_crypto",
1599  		.of_match_table = ixp4xx_crypto_of_match,
1600  	},
1601  };
1602  module_platform_driver(ixp_crypto_driver);
1603  
1604  MODULE_LICENSE("GPL");
1605  MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1606  MODULE_DESCRIPTION("IXP4xx hardware crypto");
1607  
1608