10a625fd2SDavid S. Miller /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 20a625fd2SDavid S. Miller * 3eb7caf35SDavid S. Miller * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> 40a625fd2SDavid S. Miller */ 50a625fd2SDavid S. Miller 60a625fd2SDavid S. Miller #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70a625fd2SDavid S. Miller 80a625fd2SDavid S. Miller #include <linux/kernel.h> 90a625fd2SDavid S. Miller #include <linux/module.h> 100a625fd2SDavid S. Miller #include <linux/of.h> 110a625fd2SDavid S. Miller #include <linux/of_device.h> 120a625fd2SDavid S. Miller #include <linux/cpumask.h> 130a625fd2SDavid S. Miller #include <linux/slab.h> 140a625fd2SDavid S. Miller #include <linux/interrupt.h> 150a625fd2SDavid S. Miller #include <linux/crypto.h> 160a625fd2SDavid S. Miller #include <crypto/md5.h> 170a625fd2SDavid S. Miller #include <crypto/sha.h> 180a625fd2SDavid S. Miller #include <crypto/aes.h> 190a625fd2SDavid S. Miller #include <crypto/des.h> 200a625fd2SDavid S. Miller #include <linux/mutex.h> 210a625fd2SDavid S. Miller #include <linux/delay.h> 220a625fd2SDavid S. Miller #include <linux/sched.h> 230a625fd2SDavid S. Miller 240a625fd2SDavid S. Miller #include <crypto/internal/hash.h> 250a625fd2SDavid S. Miller #include <crypto/scatterwalk.h> 260a625fd2SDavid S. Miller #include <crypto/algapi.h> 270a625fd2SDavid S. Miller 280a625fd2SDavid S. Miller #include <asm/hypervisor.h> 290a625fd2SDavid S. Miller #include <asm/mdesc.h> 300a625fd2SDavid S. Miller 310a625fd2SDavid S. Miller #include "n2_core.h" 320a625fd2SDavid S. Miller 330a625fd2SDavid S. Miller #define DRV_MODULE_NAME "n2_crypto" 34eb7caf35SDavid S. Miller #define DRV_MODULE_VERSION "0.2" 35eb7caf35SDavid S. Miller #define DRV_MODULE_RELDATE "July 28, 2011" 360a625fd2SDavid S. Miller 370a625fd2SDavid S. Miller static char version[] __devinitdata = 380a625fd2SDavid S. Miller DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 390a625fd2SDavid S. Miller 400a625fd2SDavid S. Miller MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 410a625fd2SDavid S. Miller MODULE_DESCRIPTION("Niagara2 Crypto driver"); 420a625fd2SDavid S. Miller MODULE_LICENSE("GPL"); 430a625fd2SDavid S. Miller MODULE_VERSION(DRV_MODULE_VERSION); 440a625fd2SDavid S. Miller 450a625fd2SDavid S. Miller #define N2_CRA_PRIORITY 300 460a625fd2SDavid S. Miller 470a625fd2SDavid S. Miller static DEFINE_MUTEX(spu_lock); 480a625fd2SDavid S. Miller 490a625fd2SDavid S. Miller struct spu_queue { 500a625fd2SDavid S. Miller cpumask_t sharing; 510a625fd2SDavid S. Miller unsigned long qhandle; 520a625fd2SDavid S. Miller 530a625fd2SDavid S. Miller spinlock_t lock; 540a625fd2SDavid S. Miller u8 q_type; 550a625fd2SDavid S. Miller void *q; 560a625fd2SDavid S. Miller unsigned long head; 570a625fd2SDavid S. Miller unsigned long tail; 580a625fd2SDavid S. Miller struct list_head jobs; 590a625fd2SDavid S. Miller 600a625fd2SDavid S. Miller unsigned long devino; 610a625fd2SDavid S. Miller 620a625fd2SDavid S. Miller char irq_name[32]; 630a625fd2SDavid S. Miller unsigned int irq; 640a625fd2SDavid S. Miller 650a625fd2SDavid S. Miller struct list_head list; 660a625fd2SDavid S. Miller }; 670a625fd2SDavid S. Miller 680a625fd2SDavid S. Miller static struct spu_queue **cpu_to_cwq; 690a625fd2SDavid S. Miller static struct spu_queue **cpu_to_mau; 700a625fd2SDavid S. Miller 710a625fd2SDavid S. Miller static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 720a625fd2SDavid S. Miller { 730a625fd2SDavid S. Miller if (q->q_type == HV_NCS_QTYPE_MAU) { 740a625fd2SDavid S. Miller off += MAU_ENTRY_SIZE; 750a625fd2SDavid S. Miller if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 760a625fd2SDavid S. Miller off = 0; 770a625fd2SDavid S. Miller } else { 780a625fd2SDavid S. Miller off += CWQ_ENTRY_SIZE; 790a625fd2SDavid S. Miller if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 800a625fd2SDavid S. Miller off = 0; 810a625fd2SDavid S. Miller } 820a625fd2SDavid S. Miller return off; 830a625fd2SDavid S. Miller } 840a625fd2SDavid S. Miller 850a625fd2SDavid S. Miller struct n2_request_common { 860a625fd2SDavid S. Miller struct list_head entry; 870a625fd2SDavid S. Miller unsigned int offset; 880a625fd2SDavid S. Miller }; 890a625fd2SDavid S. Miller #define OFFSET_NOT_RUNNING (~(unsigned int)0) 900a625fd2SDavid S. Miller 910a625fd2SDavid S. Miller /* An async job request records the final tail value it used in 920a625fd2SDavid S. Miller * n2_request_common->offset, test to see if that offset is in 930a625fd2SDavid S. Miller * the range old_head, new_head, inclusive. 940a625fd2SDavid S. Miller */ 950a625fd2SDavid S. Miller static inline bool job_finished(struct spu_queue *q, unsigned int offset, 960a625fd2SDavid S. Miller unsigned long old_head, unsigned long new_head) 970a625fd2SDavid S. Miller { 980a625fd2SDavid S. Miller if (old_head <= new_head) { 990a625fd2SDavid S. Miller if (offset > old_head && offset <= new_head) 1000a625fd2SDavid S. Miller return true; 1010a625fd2SDavid S. Miller } else { 1020a625fd2SDavid S. Miller if (offset > old_head || offset <= new_head) 1030a625fd2SDavid S. Miller return true; 1040a625fd2SDavid S. Miller } 1050a625fd2SDavid S. Miller return false; 1060a625fd2SDavid S. Miller } 1070a625fd2SDavid S. Miller 1080a625fd2SDavid S. Miller /* When the HEAD marker is unequal to the actual HEAD, we get 1090a625fd2SDavid S. Miller * a virtual device INO interrupt. We should process the 1100a625fd2SDavid S. Miller * completed CWQ entries and adjust the HEAD marker to clear 1110a625fd2SDavid S. Miller * the IRQ. 1120a625fd2SDavid S. Miller */ 1130a625fd2SDavid S. Miller static irqreturn_t cwq_intr(int irq, void *dev_id) 1140a625fd2SDavid S. Miller { 1150a625fd2SDavid S. Miller unsigned long off, new_head, hv_ret; 1160a625fd2SDavid S. Miller struct spu_queue *q = dev_id; 1170a625fd2SDavid S. Miller 1180a625fd2SDavid S. Miller pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 1190a625fd2SDavid S. Miller smp_processor_id(), q->qhandle); 1200a625fd2SDavid S. Miller 1210a625fd2SDavid S. Miller spin_lock(&q->lock); 1220a625fd2SDavid S. Miller 1230a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 1240a625fd2SDavid S. Miller 1250a625fd2SDavid S. Miller pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 1260a625fd2SDavid S. Miller smp_processor_id(), new_head, hv_ret); 1270a625fd2SDavid S. Miller 1280a625fd2SDavid S. Miller for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 1290a625fd2SDavid S. Miller /* XXX ... XXX */ 1300a625fd2SDavid S. Miller } 1310a625fd2SDavid S. Miller 1320a625fd2SDavid S. Miller hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 1330a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 1340a625fd2SDavid S. Miller q->head = new_head; 1350a625fd2SDavid S. Miller 1360a625fd2SDavid S. Miller spin_unlock(&q->lock); 1370a625fd2SDavid S. Miller 1380a625fd2SDavid S. Miller return IRQ_HANDLED; 1390a625fd2SDavid S. Miller } 1400a625fd2SDavid S. Miller 1410a625fd2SDavid S. Miller static irqreturn_t mau_intr(int irq, void *dev_id) 1420a625fd2SDavid S. Miller { 1430a625fd2SDavid S. Miller struct spu_queue *q = dev_id; 1440a625fd2SDavid S. Miller unsigned long head, hv_ret; 1450a625fd2SDavid S. Miller 1460a625fd2SDavid S. Miller spin_lock(&q->lock); 1470a625fd2SDavid S. Miller 1480a625fd2SDavid S. Miller pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 1490a625fd2SDavid S. Miller smp_processor_id(), q->qhandle); 1500a625fd2SDavid S. Miller 1510a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 1520a625fd2SDavid S. Miller 1530a625fd2SDavid S. Miller pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 1540a625fd2SDavid S. Miller smp_processor_id(), head, hv_ret); 1550a625fd2SDavid S. Miller 1560a625fd2SDavid S. Miller sun4v_ncs_sethead_marker(q->qhandle, head); 1570a625fd2SDavid S. Miller 1580a625fd2SDavid S. Miller spin_unlock(&q->lock); 1590a625fd2SDavid S. Miller 1600a625fd2SDavid S. Miller return IRQ_HANDLED; 1610a625fd2SDavid S. Miller } 1620a625fd2SDavid S. Miller 1630a625fd2SDavid S. Miller static void *spu_queue_next(struct spu_queue *q, void *cur) 1640a625fd2SDavid S. Miller { 1650a625fd2SDavid S. Miller return q->q + spu_next_offset(q, cur - q->q); 1660a625fd2SDavid S. Miller } 1670a625fd2SDavid S. Miller 1680a625fd2SDavid S. Miller static int spu_queue_num_free(struct spu_queue *q) 1690a625fd2SDavid S. Miller { 1700a625fd2SDavid S. Miller unsigned long head = q->head; 1710a625fd2SDavid S. Miller unsigned long tail = q->tail; 1720a625fd2SDavid S. Miller unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 1730a625fd2SDavid S. Miller unsigned long diff; 1740a625fd2SDavid S. Miller 1750a625fd2SDavid S. Miller if (head > tail) 1760a625fd2SDavid S. Miller diff = head - tail; 1770a625fd2SDavid S. Miller else 1780a625fd2SDavid S. Miller diff = (end - tail) + head; 1790a625fd2SDavid S. Miller 1800a625fd2SDavid S. Miller return (diff / CWQ_ENTRY_SIZE) - 1; 1810a625fd2SDavid S. Miller } 1820a625fd2SDavid S. Miller 1830a625fd2SDavid S. Miller static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 1840a625fd2SDavid S. Miller { 1850a625fd2SDavid S. Miller int avail = spu_queue_num_free(q); 1860a625fd2SDavid S. Miller 1870a625fd2SDavid S. Miller if (avail >= num_entries) 1880a625fd2SDavid S. Miller return q->q + q->tail; 1890a625fd2SDavid S. Miller 1900a625fd2SDavid S. Miller return NULL; 1910a625fd2SDavid S. Miller } 1920a625fd2SDavid S. Miller 1930a625fd2SDavid S. Miller static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 1940a625fd2SDavid S. Miller { 1950a625fd2SDavid S. Miller unsigned long hv_ret, new_tail; 1960a625fd2SDavid S. Miller 1970a625fd2SDavid S. Miller new_tail = spu_next_offset(q, last - q->q); 1980a625fd2SDavid S. Miller 1990a625fd2SDavid S. Miller hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 2000a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 2010a625fd2SDavid S. Miller q->tail = new_tail; 2020a625fd2SDavid S. Miller return hv_ret; 2030a625fd2SDavid S. Miller } 2040a625fd2SDavid S. Miller 2050a625fd2SDavid S. Miller static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 2060a625fd2SDavid S. Miller int enc_type, int auth_type, 2070a625fd2SDavid S. Miller unsigned int hash_len, 2080a625fd2SDavid S. Miller bool sfas, bool sob, bool eob, bool encrypt, 2090a625fd2SDavid S. Miller int opcode) 2100a625fd2SDavid S. Miller { 2110a625fd2SDavid S. Miller u64 word = (len - 1) & CONTROL_LEN; 2120a625fd2SDavid S. Miller 2130a625fd2SDavid S. Miller word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 2140a625fd2SDavid S. Miller word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 2150a625fd2SDavid S. Miller word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 2160a625fd2SDavid S. Miller if (sfas) 2170a625fd2SDavid S. Miller word |= CONTROL_STORE_FINAL_AUTH_STATE; 2180a625fd2SDavid S. Miller if (sob) 2190a625fd2SDavid S. Miller word |= CONTROL_START_OF_BLOCK; 2200a625fd2SDavid S. Miller if (eob) 2210a625fd2SDavid S. Miller word |= CONTROL_END_OF_BLOCK; 2220a625fd2SDavid S. Miller if (encrypt) 2230a625fd2SDavid S. Miller word |= CONTROL_ENCRYPT; 2240a625fd2SDavid S. Miller if (hmac_key_len) 2250a625fd2SDavid S. Miller word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 2260a625fd2SDavid S. Miller if (hash_len) 2270a625fd2SDavid S. Miller word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 2280a625fd2SDavid S. Miller 2290a625fd2SDavid S. Miller return word; 2300a625fd2SDavid S. Miller } 2310a625fd2SDavid S. Miller 2320a625fd2SDavid S. Miller #if 0 2330a625fd2SDavid S. Miller static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 2340a625fd2SDavid S. Miller { 2350a625fd2SDavid S. Miller if (this_len >= 64 || 2360a625fd2SDavid S. Miller qp->head != qp->tail) 2370a625fd2SDavid S. Miller return true; 2380a625fd2SDavid S. Miller return false; 2390a625fd2SDavid S. Miller } 2400a625fd2SDavid S. Miller #endif 2410a625fd2SDavid S. Miller 2423a2c0346SDavid S. Miller struct n2_ahash_alg { 2433a2c0346SDavid S. Miller struct list_head entry; 2443a2c0346SDavid S. Miller const char *hash_zero; 2453a2c0346SDavid S. Miller const u32 *hash_init; 2463a2c0346SDavid S. Miller u8 hw_op_hashsz; 2473a2c0346SDavid S. Miller u8 digest_size; 2483a2c0346SDavid S. Miller u8 auth_type; 249dc4ccfd1SDavid S. Miller u8 hmac_type; 2503a2c0346SDavid S. Miller struct ahash_alg alg; 2513a2c0346SDavid S. Miller }; 2523a2c0346SDavid S. Miller 2533a2c0346SDavid S. Miller static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) 2543a2c0346SDavid S. Miller { 2553a2c0346SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 2563a2c0346SDavid S. Miller struct ahash_alg *ahash_alg; 2573a2c0346SDavid S. Miller 2583a2c0346SDavid S. Miller ahash_alg = container_of(alg, struct ahash_alg, halg.base); 2593a2c0346SDavid S. Miller 2603a2c0346SDavid S. Miller return container_of(ahash_alg, struct n2_ahash_alg, alg); 2613a2c0346SDavid S. Miller } 2623a2c0346SDavid S. Miller 263dc4ccfd1SDavid S. Miller struct n2_hmac_alg { 264dc4ccfd1SDavid S. Miller const char *child_alg; 265dc4ccfd1SDavid S. Miller struct n2_ahash_alg derived; 266dc4ccfd1SDavid S. Miller }; 267dc4ccfd1SDavid S. Miller 268dc4ccfd1SDavid S. Miller static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) 269dc4ccfd1SDavid S. Miller { 270dc4ccfd1SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 271dc4ccfd1SDavid S. Miller struct ahash_alg *ahash_alg; 272dc4ccfd1SDavid S. Miller 273dc4ccfd1SDavid S. Miller ahash_alg = container_of(alg, struct ahash_alg, halg.base); 274dc4ccfd1SDavid S. Miller 275dc4ccfd1SDavid S. Miller return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); 276dc4ccfd1SDavid S. Miller } 277dc4ccfd1SDavid S. Miller 2780a625fd2SDavid S. Miller struct n2_hash_ctx { 279c9aa55e5SDavid S. Miller struct crypto_ahash *fallback_tfm; 280c9aa55e5SDavid S. Miller }; 2810a625fd2SDavid S. Miller 282dc4ccfd1SDavid S. Miller #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ 283dc4ccfd1SDavid S. Miller 284dc4ccfd1SDavid S. Miller struct n2_hmac_ctx { 285dc4ccfd1SDavid S. Miller struct n2_hash_ctx base; 286dc4ccfd1SDavid S. Miller 287dc4ccfd1SDavid S. Miller struct crypto_shash *child_shash; 288dc4ccfd1SDavid S. Miller 289dc4ccfd1SDavid S. Miller int hash_key_len; 290dc4ccfd1SDavid S. Miller unsigned char hash_key[N2_HASH_KEY_MAX]; 291dc4ccfd1SDavid S. Miller }; 292dc4ccfd1SDavid S. Miller 293c9aa55e5SDavid S. Miller struct n2_hash_req_ctx { 2940a625fd2SDavid S. Miller union { 2950a625fd2SDavid S. Miller struct md5_state md5; 2960a625fd2SDavid S. Miller struct sha1_state sha1; 2970a625fd2SDavid S. Miller struct sha256_state sha256; 2980a625fd2SDavid S. Miller } u; 2990a625fd2SDavid S. Miller 300c9aa55e5SDavid S. Miller struct ahash_request fallback_req; 3010a625fd2SDavid S. Miller }; 3020a625fd2SDavid S. Miller 3030a625fd2SDavid S. Miller static int n2_hash_async_init(struct ahash_request *req) 3040a625fd2SDavid S. Miller { 305c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3060a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3070a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3080a625fd2SDavid S. Miller 309c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 310c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 3110a625fd2SDavid S. Miller 312c9aa55e5SDavid S. Miller return crypto_ahash_init(&rctx->fallback_req); 3130a625fd2SDavid S. Miller } 3140a625fd2SDavid S. Miller 3150a625fd2SDavid S. Miller static int n2_hash_async_update(struct ahash_request *req) 3160a625fd2SDavid S. Miller { 317c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3180a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3190a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3200a625fd2SDavid S. Miller 321c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 322c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 323c9aa55e5SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 324c9aa55e5SDavid S. Miller rctx->fallback_req.src = req->src; 3250a625fd2SDavid S. Miller 326c9aa55e5SDavid S. Miller return crypto_ahash_update(&rctx->fallback_req); 3270a625fd2SDavid S. Miller } 3280a625fd2SDavid S. Miller 3290a625fd2SDavid S. Miller static int n2_hash_async_final(struct ahash_request *req) 3300a625fd2SDavid S. Miller { 331c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3320a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3330a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3340a625fd2SDavid S. Miller 335c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 336c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 337c9aa55e5SDavid S. Miller rctx->fallback_req.result = req->result; 3380a625fd2SDavid S. Miller 339c9aa55e5SDavid S. Miller return crypto_ahash_final(&rctx->fallback_req); 3400a625fd2SDavid S. Miller } 3410a625fd2SDavid S. Miller 3420a625fd2SDavid S. Miller static int n2_hash_async_finup(struct ahash_request *req) 3430a625fd2SDavid S. Miller { 344c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3450a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3460a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3470a625fd2SDavid S. Miller 348c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 349c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 350c9aa55e5SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 351c9aa55e5SDavid S. Miller rctx->fallback_req.src = req->src; 352c9aa55e5SDavid S. Miller rctx->fallback_req.result = req->result; 3530a625fd2SDavid S. Miller 354c9aa55e5SDavid S. Miller return crypto_ahash_finup(&rctx->fallback_req); 3550a625fd2SDavid S. Miller } 3560a625fd2SDavid S. Miller 3570a625fd2SDavid S. Miller static int n2_hash_cra_init(struct crypto_tfm *tfm) 3580a625fd2SDavid S. Miller { 3590a625fd2SDavid S. Miller const char *fallback_driver_name = tfm->__crt_alg->cra_name; 3600a625fd2SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 3610a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 3620a625fd2SDavid S. Miller struct crypto_ahash *fallback_tfm; 3630a625fd2SDavid S. Miller int err; 3640a625fd2SDavid S. Miller 3650a625fd2SDavid S. Miller fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 3660a625fd2SDavid S. Miller CRYPTO_ALG_NEED_FALLBACK); 3670a625fd2SDavid S. Miller if (IS_ERR(fallback_tfm)) { 3680a625fd2SDavid S. Miller pr_warning("Fallback driver '%s' could not be loaded!\n", 3690a625fd2SDavid S. Miller fallback_driver_name); 3700a625fd2SDavid S. Miller err = PTR_ERR(fallback_tfm); 3710a625fd2SDavid S. Miller goto out; 3720a625fd2SDavid S. Miller } 3730a625fd2SDavid S. Miller 374c9aa55e5SDavid S. Miller crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 375c9aa55e5SDavid S. Miller crypto_ahash_reqsize(fallback_tfm))); 376c9aa55e5SDavid S. Miller 377c9aa55e5SDavid S. Miller ctx->fallback_tfm = fallback_tfm; 3780a625fd2SDavid S. Miller return 0; 3790a625fd2SDavid S. Miller 3800a625fd2SDavid S. Miller out: 3810a625fd2SDavid S. Miller return err; 3820a625fd2SDavid S. Miller } 3830a625fd2SDavid S. Miller 3840a625fd2SDavid S. Miller static void n2_hash_cra_exit(struct crypto_tfm *tfm) 3850a625fd2SDavid S. Miller { 3860a625fd2SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 3870a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 3880a625fd2SDavid S. Miller 389c9aa55e5SDavid S. Miller crypto_free_ahash(ctx->fallback_tfm); 3900a625fd2SDavid S. Miller } 3910a625fd2SDavid S. Miller 392dc4ccfd1SDavid S. Miller static int n2_hmac_cra_init(struct crypto_tfm *tfm) 393dc4ccfd1SDavid S. Miller { 394dc4ccfd1SDavid S. Miller const char *fallback_driver_name = tfm->__crt_alg->cra_name; 395dc4ccfd1SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 396dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 397dc4ccfd1SDavid S. Miller struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 398dc4ccfd1SDavid S. Miller struct crypto_ahash *fallback_tfm; 399dc4ccfd1SDavid S. Miller struct crypto_shash *child_shash; 400dc4ccfd1SDavid S. Miller int err; 401dc4ccfd1SDavid S. Miller 402dc4ccfd1SDavid S. Miller fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 403dc4ccfd1SDavid S. Miller CRYPTO_ALG_NEED_FALLBACK); 404dc4ccfd1SDavid S. Miller if (IS_ERR(fallback_tfm)) { 405dc4ccfd1SDavid S. Miller pr_warning("Fallback driver '%s' could not be loaded!\n", 406dc4ccfd1SDavid S. Miller fallback_driver_name); 407dc4ccfd1SDavid S. Miller err = PTR_ERR(fallback_tfm); 408dc4ccfd1SDavid S. Miller goto out; 409dc4ccfd1SDavid S. Miller } 410dc4ccfd1SDavid S. Miller 411dc4ccfd1SDavid S. Miller child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); 412dc4ccfd1SDavid S. Miller if (IS_ERR(child_shash)) { 413dc4ccfd1SDavid S. Miller pr_warning("Child shash '%s' could not be loaded!\n", 414dc4ccfd1SDavid S. Miller n2alg->child_alg); 415dc4ccfd1SDavid S. Miller err = PTR_ERR(child_shash); 416dc4ccfd1SDavid S. Miller goto out_free_fallback; 417dc4ccfd1SDavid S. Miller } 418dc4ccfd1SDavid S. Miller 419dc4ccfd1SDavid S. Miller crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 420dc4ccfd1SDavid S. Miller crypto_ahash_reqsize(fallback_tfm))); 421dc4ccfd1SDavid S. Miller 422dc4ccfd1SDavid S. Miller ctx->child_shash = child_shash; 423dc4ccfd1SDavid S. Miller ctx->base.fallback_tfm = fallback_tfm; 424dc4ccfd1SDavid S. Miller return 0; 425dc4ccfd1SDavid S. Miller 426dc4ccfd1SDavid S. Miller out_free_fallback: 427dc4ccfd1SDavid S. Miller crypto_free_ahash(fallback_tfm); 428dc4ccfd1SDavid S. Miller 429dc4ccfd1SDavid S. Miller out: 430dc4ccfd1SDavid S. Miller return err; 431dc4ccfd1SDavid S. Miller } 432dc4ccfd1SDavid S. Miller 433dc4ccfd1SDavid S. Miller static void n2_hmac_cra_exit(struct crypto_tfm *tfm) 434dc4ccfd1SDavid S. Miller { 435dc4ccfd1SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 436dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 437dc4ccfd1SDavid S. Miller 438dc4ccfd1SDavid S. Miller crypto_free_ahash(ctx->base.fallback_tfm); 439dc4ccfd1SDavid S. Miller crypto_free_shash(ctx->child_shash); 440dc4ccfd1SDavid S. Miller } 441dc4ccfd1SDavid S. Miller 442dc4ccfd1SDavid S. Miller static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, 443dc4ccfd1SDavid S. Miller unsigned int keylen) 444dc4ccfd1SDavid S. Miller { 445dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 446dc4ccfd1SDavid S. Miller struct crypto_shash *child_shash = ctx->child_shash; 447dc4ccfd1SDavid S. Miller struct crypto_ahash *fallback_tfm; 448dc4ccfd1SDavid S. Miller struct { 449dc4ccfd1SDavid S. Miller struct shash_desc shash; 450dc4ccfd1SDavid S. Miller char ctx[crypto_shash_descsize(child_shash)]; 451dc4ccfd1SDavid S. Miller } desc; 452dc4ccfd1SDavid S. Miller int err, bs, ds; 453dc4ccfd1SDavid S. Miller 454dc4ccfd1SDavid S. Miller fallback_tfm = ctx->base.fallback_tfm; 455dc4ccfd1SDavid S. Miller err = crypto_ahash_setkey(fallback_tfm, key, keylen); 456dc4ccfd1SDavid S. Miller if (err) 457dc4ccfd1SDavid S. Miller return err; 458dc4ccfd1SDavid S. Miller 459dc4ccfd1SDavid S. Miller desc.shash.tfm = child_shash; 460dc4ccfd1SDavid S. Miller desc.shash.flags = crypto_ahash_get_flags(tfm) & 461dc4ccfd1SDavid S. Miller CRYPTO_TFM_REQ_MAY_SLEEP; 462dc4ccfd1SDavid S. Miller 463dc4ccfd1SDavid S. Miller bs = crypto_shash_blocksize(child_shash); 464dc4ccfd1SDavid S. Miller ds = crypto_shash_digestsize(child_shash); 465dc4ccfd1SDavid S. Miller BUG_ON(ds > N2_HASH_KEY_MAX); 466dc4ccfd1SDavid S. Miller if (keylen > bs) { 467dc4ccfd1SDavid S. Miller err = crypto_shash_digest(&desc.shash, key, keylen, 468dc4ccfd1SDavid S. Miller ctx->hash_key); 469dc4ccfd1SDavid S. Miller if (err) 470dc4ccfd1SDavid S. Miller return err; 471dc4ccfd1SDavid S. Miller keylen = ds; 472dc4ccfd1SDavid S. Miller } else if (keylen <= N2_HASH_KEY_MAX) 473dc4ccfd1SDavid S. Miller memcpy(ctx->hash_key, key, keylen); 474dc4ccfd1SDavid S. Miller 475dc4ccfd1SDavid S. Miller ctx->hash_key_len = keylen; 476dc4ccfd1SDavid S. Miller 477dc4ccfd1SDavid S. Miller return err; 478dc4ccfd1SDavid S. Miller } 479dc4ccfd1SDavid S. Miller 4800a625fd2SDavid S. Miller static unsigned long wait_for_tail(struct spu_queue *qp) 4810a625fd2SDavid S. Miller { 4820a625fd2SDavid S. Miller unsigned long head, hv_ret; 4830a625fd2SDavid S. Miller 4840a625fd2SDavid S. Miller do { 4850a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 4860a625fd2SDavid S. Miller if (hv_ret != HV_EOK) { 4870a625fd2SDavid S. Miller pr_err("Hypervisor error on gethead\n"); 4880a625fd2SDavid S. Miller break; 4890a625fd2SDavid S. Miller } 4900a625fd2SDavid S. Miller if (head == qp->tail) { 4910a625fd2SDavid S. Miller qp->head = head; 4920a625fd2SDavid S. Miller break; 4930a625fd2SDavid S. Miller } 4940a625fd2SDavid S. Miller } while (1); 4950a625fd2SDavid S. Miller return hv_ret; 4960a625fd2SDavid S. Miller } 4970a625fd2SDavid S. Miller 4980a625fd2SDavid S. Miller static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 4990a625fd2SDavid S. Miller struct cwq_initial_entry *ent) 5000a625fd2SDavid S. Miller { 5010a625fd2SDavid S. Miller unsigned long hv_ret = spu_queue_submit(qp, ent); 5020a625fd2SDavid S. Miller 5030a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 5040a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 5050a625fd2SDavid S. Miller 5060a625fd2SDavid S. Miller return hv_ret; 5070a625fd2SDavid S. Miller } 5080a625fd2SDavid S. Miller 5093a2c0346SDavid S. Miller static int n2_do_async_digest(struct ahash_request *req, 5100a625fd2SDavid S. Miller unsigned int auth_type, unsigned int digest_size, 511dc4ccfd1SDavid S. Miller unsigned int result_size, void *hash_loc, 512dc4ccfd1SDavid S. Miller unsigned long auth_key, unsigned int auth_key_len) 5130a625fd2SDavid S. Miller { 5140a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 5150a625fd2SDavid S. Miller struct cwq_initial_entry *ent; 5160a625fd2SDavid S. Miller struct crypto_hash_walk walk; 5170a625fd2SDavid S. Miller struct spu_queue *qp; 5180a625fd2SDavid S. Miller unsigned long flags; 5190a625fd2SDavid S. Miller int err = -ENODEV; 5200a625fd2SDavid S. Miller int nbytes, cpu; 5210a625fd2SDavid S. Miller 5220a625fd2SDavid S. Miller /* The total effective length of the operation may not 5230a625fd2SDavid S. Miller * exceed 2^16. 5240a625fd2SDavid S. Miller */ 5250a625fd2SDavid S. Miller if (unlikely(req->nbytes > (1 << 16))) { 526c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 52765a23d67SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 5280a625fd2SDavid S. Miller 529c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 530c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = 531c9aa55e5SDavid S. Miller req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 532c9aa55e5SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 533c9aa55e5SDavid S. Miller rctx->fallback_req.src = req->src; 534c9aa55e5SDavid S. Miller rctx->fallback_req.result = req->result; 535c9aa55e5SDavid S. Miller 536c9aa55e5SDavid S. Miller return crypto_ahash_digest(&rctx->fallback_req); 5370a625fd2SDavid S. Miller } 5380a625fd2SDavid S. Miller 5390a625fd2SDavid S. Miller nbytes = crypto_hash_walk_first(req, &walk); 5400a625fd2SDavid S. Miller 5410a625fd2SDavid S. Miller cpu = get_cpu(); 5420a625fd2SDavid S. Miller qp = cpu_to_cwq[cpu]; 5430a625fd2SDavid S. Miller if (!qp) 5440a625fd2SDavid S. Miller goto out; 5450a625fd2SDavid S. Miller 5460a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 5470a625fd2SDavid S. Miller 5480a625fd2SDavid S. Miller /* XXX can do better, improve this later by doing a by-hand scatterlist 5490a625fd2SDavid S. Miller * XXX walk, etc. 5500a625fd2SDavid S. Miller */ 5510a625fd2SDavid S. Miller ent = qp->q + qp->tail; 5520a625fd2SDavid S. Miller 553dc4ccfd1SDavid S. Miller ent->control = control_word_base(nbytes, auth_key_len, 0, 5540a625fd2SDavid S. Miller auth_type, digest_size, 5550a625fd2SDavid S. Miller false, true, false, false, 5560a625fd2SDavid S. Miller OPCODE_INPLACE_BIT | 5570a625fd2SDavid S. Miller OPCODE_AUTH_MAC); 5580a625fd2SDavid S. Miller ent->src_addr = __pa(walk.data); 559dc4ccfd1SDavid S. Miller ent->auth_key_addr = auth_key; 5600a625fd2SDavid S. Miller ent->auth_iv_addr = __pa(hash_loc); 5610a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 5620a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 5630a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 5640a625fd2SDavid S. Miller ent->dest_addr = __pa(hash_loc); 5650a625fd2SDavid S. Miller 5660a625fd2SDavid S. Miller nbytes = crypto_hash_walk_done(&walk, 0); 5670a625fd2SDavid S. Miller while (nbytes > 0) { 5680a625fd2SDavid S. Miller ent = spu_queue_next(qp, ent); 5690a625fd2SDavid S. Miller 5700a625fd2SDavid S. Miller ent->control = (nbytes - 1); 5710a625fd2SDavid S. Miller ent->src_addr = __pa(walk.data); 5720a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 5730a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 5740a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 5750a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 5760a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 5770a625fd2SDavid S. Miller ent->dest_addr = 0UL; 5780a625fd2SDavid S. Miller 5790a625fd2SDavid S. Miller nbytes = crypto_hash_walk_done(&walk, 0); 5800a625fd2SDavid S. Miller } 5810a625fd2SDavid S. Miller ent->control |= CONTROL_END_OF_BLOCK; 5820a625fd2SDavid S. Miller 5830a625fd2SDavid S. Miller if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 5840a625fd2SDavid S. Miller err = -EINVAL; 5850a625fd2SDavid S. Miller else 5860a625fd2SDavid S. Miller err = 0; 5870a625fd2SDavid S. Miller 5880a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 5890a625fd2SDavid S. Miller 5900a625fd2SDavid S. Miller if (!err) 5910a625fd2SDavid S. Miller memcpy(req->result, hash_loc, result_size); 5920a625fd2SDavid S. Miller out: 5930a625fd2SDavid S. Miller put_cpu(); 5940a625fd2SDavid S. Miller 5950a625fd2SDavid S. Miller return err; 5960a625fd2SDavid S. Miller } 5970a625fd2SDavid S. Miller 5983a2c0346SDavid S. Miller static int n2_hash_async_digest(struct ahash_request *req) 5990a625fd2SDavid S. Miller { 6003a2c0346SDavid S. Miller struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); 601c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 6023a2c0346SDavid S. Miller int ds; 6030a625fd2SDavid S. Miller 6043a2c0346SDavid S. Miller ds = n2alg->digest_size; 6050a625fd2SDavid S. Miller if (unlikely(req->nbytes == 0)) { 6063a2c0346SDavid S. Miller memcpy(req->result, n2alg->hash_zero, ds); 6070a625fd2SDavid S. Miller return 0; 6080a625fd2SDavid S. Miller } 6093a2c0346SDavid S. Miller memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); 6100a625fd2SDavid S. Miller 6113a2c0346SDavid S. Miller return n2_do_async_digest(req, n2alg->auth_type, 6123a2c0346SDavid S. Miller n2alg->hw_op_hashsz, ds, 613dc4ccfd1SDavid S. Miller &rctx->u, 0UL, 0); 614dc4ccfd1SDavid S. Miller } 615dc4ccfd1SDavid S. Miller 616dc4ccfd1SDavid S. Miller static int n2_hmac_async_digest(struct ahash_request *req) 617dc4ccfd1SDavid S. Miller { 618dc4ccfd1SDavid S. Miller struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); 619dc4ccfd1SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 620dc4ccfd1SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 621dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 622dc4ccfd1SDavid S. Miller int ds; 623dc4ccfd1SDavid S. Miller 624dc4ccfd1SDavid S. Miller ds = n2alg->derived.digest_size; 625dc4ccfd1SDavid S. Miller if (unlikely(req->nbytes == 0) || 626dc4ccfd1SDavid S. Miller unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { 627dc4ccfd1SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 628dc4ccfd1SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 629dc4ccfd1SDavid S. Miller 630dc4ccfd1SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 631dc4ccfd1SDavid S. Miller rctx->fallback_req.base.flags = 632dc4ccfd1SDavid S. Miller req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 633dc4ccfd1SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 634dc4ccfd1SDavid S. Miller rctx->fallback_req.src = req->src; 635dc4ccfd1SDavid S. Miller rctx->fallback_req.result = req->result; 636dc4ccfd1SDavid S. Miller 637dc4ccfd1SDavid S. Miller return crypto_ahash_digest(&rctx->fallback_req); 638dc4ccfd1SDavid S. Miller } 639dc4ccfd1SDavid S. Miller memcpy(&rctx->u, n2alg->derived.hash_init, 640dc4ccfd1SDavid S. Miller n2alg->derived.hw_op_hashsz); 641dc4ccfd1SDavid S. Miller 642dc4ccfd1SDavid S. Miller return n2_do_async_digest(req, n2alg->derived.hmac_type, 643dc4ccfd1SDavid S. Miller n2alg->derived.hw_op_hashsz, ds, 644dc4ccfd1SDavid S. Miller &rctx->u, 645dc4ccfd1SDavid S. Miller __pa(&ctx->hash_key), 646dc4ccfd1SDavid S. Miller ctx->hash_key_len); 6470a625fd2SDavid S. Miller } 6480a625fd2SDavid S. Miller 6490a625fd2SDavid S. Miller struct n2_cipher_context { 6500a625fd2SDavid S. Miller int key_len; 6510a625fd2SDavid S. Miller int enc_type; 6520a625fd2SDavid S. Miller union { 6530a625fd2SDavid S. Miller u8 aes[AES_MAX_KEY_SIZE]; 6540a625fd2SDavid S. Miller u8 des[DES_KEY_SIZE]; 6550a625fd2SDavid S. Miller u8 des3[3 * DES_KEY_SIZE]; 6560a625fd2SDavid S. Miller u8 arc4[258]; /* S-box, X, Y */ 6570a625fd2SDavid S. Miller } key; 6580a625fd2SDavid S. Miller }; 6590a625fd2SDavid S. Miller 6600a625fd2SDavid S. Miller #define N2_CHUNK_ARR_LEN 16 6610a625fd2SDavid S. Miller 6620a625fd2SDavid S. Miller struct n2_crypto_chunk { 6630a625fd2SDavid S. Miller struct list_head entry; 6640a625fd2SDavid S. Miller unsigned long iv_paddr : 44; 6650a625fd2SDavid S. Miller unsigned long arr_len : 20; 6660a625fd2SDavid S. Miller unsigned long dest_paddr; 6670a625fd2SDavid S. Miller unsigned long dest_final; 6680a625fd2SDavid S. Miller struct { 6690a625fd2SDavid S. Miller unsigned long src_paddr : 44; 6700a625fd2SDavid S. Miller unsigned long src_len : 20; 6710a625fd2SDavid S. Miller } arr[N2_CHUNK_ARR_LEN]; 6720a625fd2SDavid S. Miller }; 6730a625fd2SDavid S. Miller 6740a625fd2SDavid S. Miller struct n2_request_context { 6750a625fd2SDavid S. Miller struct ablkcipher_walk walk; 6760a625fd2SDavid S. Miller struct list_head chunk_list; 6770a625fd2SDavid S. Miller struct n2_crypto_chunk chunk; 6780a625fd2SDavid S. Miller u8 temp_iv[16]; 6790a625fd2SDavid S. Miller }; 6800a625fd2SDavid S. Miller 6810a625fd2SDavid S. Miller /* The SPU allows some level of flexibility for partial cipher blocks 6820a625fd2SDavid S. Miller * being specified in a descriptor. 6830a625fd2SDavid S. Miller * 6840a625fd2SDavid S. Miller * It merely requires that every descriptor's length field is at least 6850a625fd2SDavid S. Miller * as large as the cipher block size. This means that a cipher block 6860a625fd2SDavid S. Miller * can span at most 2 descriptors. However, this does not allow a 6870a625fd2SDavid S. Miller * partial block to span into the final descriptor as that would 6880a625fd2SDavid S. Miller * violate the rule (since every descriptor's length must be at lest 6890a625fd2SDavid S. Miller * the block size). So, for example, assuming an 8 byte block size: 6900a625fd2SDavid S. Miller * 6910a625fd2SDavid S. Miller * 0xe --> 0xa --> 0x8 6920a625fd2SDavid S. Miller * 6930a625fd2SDavid S. Miller * is a valid length sequence, whereas: 6940a625fd2SDavid S. Miller * 6950a625fd2SDavid S. Miller * 0xe --> 0xb --> 0x7 6960a625fd2SDavid S. Miller * 6970a625fd2SDavid S. Miller * is not a valid sequence. 6980a625fd2SDavid S. Miller */ 6990a625fd2SDavid S. Miller 7000a625fd2SDavid S. Miller struct n2_cipher_alg { 7010a625fd2SDavid S. Miller struct list_head entry; 7020a625fd2SDavid S. Miller u8 enc_type; 7030a625fd2SDavid S. Miller struct crypto_alg alg; 7040a625fd2SDavid S. Miller }; 7050a625fd2SDavid S. Miller 7060a625fd2SDavid S. Miller static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 7070a625fd2SDavid S. Miller { 7080a625fd2SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 7090a625fd2SDavid S. Miller 7100a625fd2SDavid S. Miller return container_of(alg, struct n2_cipher_alg, alg); 7110a625fd2SDavid S. Miller } 7120a625fd2SDavid S. Miller 7130a625fd2SDavid S. Miller struct n2_cipher_request_context { 7140a625fd2SDavid S. Miller struct ablkcipher_walk walk; 7150a625fd2SDavid S. Miller }; 7160a625fd2SDavid S. Miller 7170a625fd2SDavid S. Miller static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7180a625fd2SDavid S. Miller unsigned int keylen) 7190a625fd2SDavid S. Miller { 7200a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7210a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7220a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7230a625fd2SDavid S. Miller 7240a625fd2SDavid S. Miller ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 7250a625fd2SDavid S. Miller 7260a625fd2SDavid S. Miller switch (keylen) { 7270a625fd2SDavid S. Miller case AES_KEYSIZE_128: 7280a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES128; 7290a625fd2SDavid S. Miller break; 7300a625fd2SDavid S. Miller case AES_KEYSIZE_192: 7310a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES192; 7320a625fd2SDavid S. Miller break; 7330a625fd2SDavid S. Miller case AES_KEYSIZE_256: 7340a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES256; 7350a625fd2SDavid S. Miller break; 7360a625fd2SDavid S. Miller default: 7370a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 7380a625fd2SDavid S. Miller return -EINVAL; 7390a625fd2SDavid S. Miller } 7400a625fd2SDavid S. Miller 7410a625fd2SDavid S. Miller ctx->key_len = keylen; 7420a625fd2SDavid S. Miller memcpy(ctx->key.aes, key, keylen); 7430a625fd2SDavid S. Miller return 0; 7440a625fd2SDavid S. Miller } 7450a625fd2SDavid S. Miller 7460a625fd2SDavid S. Miller static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7470a625fd2SDavid S. Miller unsigned int keylen) 7480a625fd2SDavid S. Miller { 7490a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7500a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7510a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7520a625fd2SDavid S. Miller u32 tmp[DES_EXPKEY_WORDS]; 7530a625fd2SDavid S. Miller int err; 7540a625fd2SDavid S. Miller 7550a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 7560a625fd2SDavid S. Miller 7570a625fd2SDavid S. Miller if (keylen != DES_KEY_SIZE) { 7580a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 7590a625fd2SDavid S. Miller return -EINVAL; 7600a625fd2SDavid S. Miller } 7610a625fd2SDavid S. Miller 7620a625fd2SDavid S. Miller err = des_ekey(tmp, key); 7630a625fd2SDavid S. Miller if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 7640a625fd2SDavid S. Miller tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 7650a625fd2SDavid S. Miller return -EINVAL; 7660a625fd2SDavid S. Miller } 7670a625fd2SDavid S. Miller 7680a625fd2SDavid S. Miller ctx->key_len = keylen; 7690a625fd2SDavid S. Miller memcpy(ctx->key.des, key, keylen); 7700a625fd2SDavid S. Miller return 0; 7710a625fd2SDavid S. Miller } 7720a625fd2SDavid S. Miller 7730a625fd2SDavid S. Miller static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7740a625fd2SDavid S. Miller unsigned int keylen) 7750a625fd2SDavid S. Miller { 7760a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7770a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7780a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7790a625fd2SDavid S. Miller 7800a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 7810a625fd2SDavid S. Miller 7820a625fd2SDavid S. Miller if (keylen != (3 * DES_KEY_SIZE)) { 7830a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 7840a625fd2SDavid S. Miller return -EINVAL; 7850a625fd2SDavid S. Miller } 7860a625fd2SDavid S. Miller ctx->key_len = keylen; 7870a625fd2SDavid S. Miller memcpy(ctx->key.des3, key, keylen); 7880a625fd2SDavid S. Miller return 0; 7890a625fd2SDavid S. Miller } 7900a625fd2SDavid S. Miller 7910a625fd2SDavid S. Miller static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7920a625fd2SDavid S. Miller unsigned int keylen) 7930a625fd2SDavid S. Miller { 7940a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7950a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7960a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7970a625fd2SDavid S. Miller u8 *s = ctx->key.arc4; 7980a625fd2SDavid S. Miller u8 *x = s + 256; 7990a625fd2SDavid S. Miller u8 *y = x + 1; 8000a625fd2SDavid S. Miller int i, j, k; 8010a625fd2SDavid S. Miller 8020a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 8030a625fd2SDavid S. Miller 8040a625fd2SDavid S. Miller j = k = 0; 8050a625fd2SDavid S. Miller *x = 0; 8060a625fd2SDavid S. Miller *y = 0; 8070a625fd2SDavid S. Miller for (i = 0; i < 256; i++) 8080a625fd2SDavid S. Miller s[i] = i; 8090a625fd2SDavid S. Miller for (i = 0; i < 256; i++) { 8100a625fd2SDavid S. Miller u8 a = s[i]; 8110a625fd2SDavid S. Miller j = (j + key[k] + a) & 0xff; 8120a625fd2SDavid S. Miller s[i] = s[j]; 8130a625fd2SDavid S. Miller s[j] = a; 8140a625fd2SDavid S. Miller if (++k >= keylen) 8150a625fd2SDavid S. Miller k = 0; 8160a625fd2SDavid S. Miller } 8170a625fd2SDavid S. Miller 8180a625fd2SDavid S. Miller return 0; 8190a625fd2SDavid S. Miller } 8200a625fd2SDavid S. Miller 8210a625fd2SDavid S. Miller static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 8220a625fd2SDavid S. Miller { 8230a625fd2SDavid S. Miller int this_len = nbytes; 8240a625fd2SDavid S. Miller 8250a625fd2SDavid S. Miller this_len -= (nbytes & (block_size - 1)); 8260a625fd2SDavid S. Miller return this_len > (1 << 16) ? (1 << 16) : this_len; 8270a625fd2SDavid S. Miller } 8280a625fd2SDavid S. Miller 8290a625fd2SDavid S. Miller static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 8300a625fd2SDavid S. Miller struct spu_queue *qp, bool encrypt) 8310a625fd2SDavid S. Miller { 8320a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 8330a625fd2SDavid S. Miller struct cwq_initial_entry *ent; 8340a625fd2SDavid S. Miller bool in_place; 8350a625fd2SDavid S. Miller int i; 8360a625fd2SDavid S. Miller 8370a625fd2SDavid S. Miller ent = spu_queue_alloc(qp, cp->arr_len); 8380a625fd2SDavid S. Miller if (!ent) { 8390a625fd2SDavid S. Miller pr_info("queue_alloc() of %d fails\n", 8400a625fd2SDavid S. Miller cp->arr_len); 8410a625fd2SDavid S. Miller return -EBUSY; 8420a625fd2SDavid S. Miller } 8430a625fd2SDavid S. Miller 8440a625fd2SDavid S. Miller in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 8450a625fd2SDavid S. Miller 8460a625fd2SDavid S. Miller ent->control = control_word_base(cp->arr[0].src_len, 8470a625fd2SDavid S. Miller 0, ctx->enc_type, 0, 0, 8480a625fd2SDavid S. Miller false, true, false, encrypt, 8490a625fd2SDavid S. Miller OPCODE_ENCRYPT | 8500a625fd2SDavid S. Miller (in_place ? OPCODE_INPLACE_BIT : 0)); 8510a625fd2SDavid S. Miller ent->src_addr = cp->arr[0].src_paddr; 8520a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 8530a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 8540a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 8550a625fd2SDavid S. Miller ent->enc_key_addr = __pa(&ctx->key); 8560a625fd2SDavid S. Miller ent->enc_iv_addr = cp->iv_paddr; 8570a625fd2SDavid S. Miller ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 8580a625fd2SDavid S. Miller 8590a625fd2SDavid S. Miller for (i = 1; i < cp->arr_len; i++) { 8600a625fd2SDavid S. Miller ent = spu_queue_next(qp, ent); 8610a625fd2SDavid S. Miller 8620a625fd2SDavid S. Miller ent->control = cp->arr[i].src_len - 1; 8630a625fd2SDavid S. Miller ent->src_addr = cp->arr[i].src_paddr; 8640a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 8650a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 8660a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 8670a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 8680a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 8690a625fd2SDavid S. Miller ent->dest_addr = 0UL; 8700a625fd2SDavid S. Miller } 8710a625fd2SDavid S. Miller ent->control |= CONTROL_END_OF_BLOCK; 8720a625fd2SDavid S. Miller 8730a625fd2SDavid S. Miller return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 8740a625fd2SDavid S. Miller } 8750a625fd2SDavid S. Miller 8760a625fd2SDavid S. Miller static int n2_compute_chunks(struct ablkcipher_request *req) 8770a625fd2SDavid S. Miller { 8780a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 8790a625fd2SDavid S. Miller struct ablkcipher_walk *walk = &rctx->walk; 8800a625fd2SDavid S. Miller struct n2_crypto_chunk *chunk; 8810a625fd2SDavid S. Miller unsigned long dest_prev; 8820a625fd2SDavid S. Miller unsigned int tot_len; 8830a625fd2SDavid S. Miller bool prev_in_place; 8840a625fd2SDavid S. Miller int err, nbytes; 8850a625fd2SDavid S. Miller 8860a625fd2SDavid S. Miller ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 8870a625fd2SDavid S. Miller err = ablkcipher_walk_phys(req, walk); 8880a625fd2SDavid S. Miller if (err) 8890a625fd2SDavid S. Miller return err; 8900a625fd2SDavid S. Miller 8910a625fd2SDavid S. Miller INIT_LIST_HEAD(&rctx->chunk_list); 8920a625fd2SDavid S. Miller 8930a625fd2SDavid S. Miller chunk = &rctx->chunk; 8940a625fd2SDavid S. Miller INIT_LIST_HEAD(&chunk->entry); 8950a625fd2SDavid S. Miller 8960a625fd2SDavid S. Miller chunk->iv_paddr = 0UL; 8970a625fd2SDavid S. Miller chunk->arr_len = 0; 8980a625fd2SDavid S. Miller chunk->dest_paddr = 0UL; 8990a625fd2SDavid S. Miller 9000a625fd2SDavid S. Miller prev_in_place = false; 9010a625fd2SDavid S. Miller dest_prev = ~0UL; 9020a625fd2SDavid S. Miller tot_len = 0; 9030a625fd2SDavid S. Miller 9040a625fd2SDavid S. Miller while ((nbytes = walk->nbytes) != 0) { 9050a625fd2SDavid S. Miller unsigned long dest_paddr, src_paddr; 9060a625fd2SDavid S. Miller bool in_place; 9070a625fd2SDavid S. Miller int this_len; 9080a625fd2SDavid S. Miller 9090a625fd2SDavid S. Miller src_paddr = (page_to_phys(walk->src.page) + 9100a625fd2SDavid S. Miller walk->src.offset); 9110a625fd2SDavid S. Miller dest_paddr = (page_to_phys(walk->dst.page) + 9120a625fd2SDavid S. Miller walk->dst.offset); 9130a625fd2SDavid S. Miller in_place = (src_paddr == dest_paddr); 9140a625fd2SDavid S. Miller this_len = cipher_descriptor_len(nbytes, walk->blocksize); 9150a625fd2SDavid S. Miller 9160a625fd2SDavid S. Miller if (chunk->arr_len != 0) { 9170a625fd2SDavid S. Miller if (in_place != prev_in_place || 9180a625fd2SDavid S. Miller (!prev_in_place && 9190a625fd2SDavid S. Miller dest_paddr != dest_prev) || 9200a625fd2SDavid S. Miller chunk->arr_len == N2_CHUNK_ARR_LEN || 9210a625fd2SDavid S. Miller tot_len + this_len > (1 << 16)) { 9220a625fd2SDavid S. Miller chunk->dest_final = dest_prev; 9230a625fd2SDavid S. Miller list_add_tail(&chunk->entry, 9240a625fd2SDavid S. Miller &rctx->chunk_list); 9250a625fd2SDavid S. Miller chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 9260a625fd2SDavid S. Miller if (!chunk) { 9270a625fd2SDavid S. Miller err = -ENOMEM; 9280a625fd2SDavid S. Miller break; 9290a625fd2SDavid S. Miller } 9300a625fd2SDavid S. Miller INIT_LIST_HEAD(&chunk->entry); 9310a625fd2SDavid S. Miller } 9320a625fd2SDavid S. Miller } 9330a625fd2SDavid S. Miller if (chunk->arr_len == 0) { 9340a625fd2SDavid S. Miller chunk->dest_paddr = dest_paddr; 9350a625fd2SDavid S. Miller tot_len = 0; 9360a625fd2SDavid S. Miller } 9370a625fd2SDavid S. Miller chunk->arr[chunk->arr_len].src_paddr = src_paddr; 9380a625fd2SDavid S. Miller chunk->arr[chunk->arr_len].src_len = this_len; 9390a625fd2SDavid S. Miller chunk->arr_len++; 9400a625fd2SDavid S. Miller 9410a625fd2SDavid S. Miller dest_prev = dest_paddr + this_len; 9420a625fd2SDavid S. Miller prev_in_place = in_place; 9430a625fd2SDavid S. Miller tot_len += this_len; 9440a625fd2SDavid S. Miller 9450a625fd2SDavid S. Miller err = ablkcipher_walk_done(req, walk, nbytes - this_len); 9460a625fd2SDavid S. Miller if (err) 9470a625fd2SDavid S. Miller break; 9480a625fd2SDavid S. Miller } 9490a625fd2SDavid S. Miller if (!err && chunk->arr_len != 0) { 9500a625fd2SDavid S. Miller chunk->dest_final = dest_prev; 9510a625fd2SDavid S. Miller list_add_tail(&chunk->entry, &rctx->chunk_list); 9520a625fd2SDavid S. Miller } 9530a625fd2SDavid S. Miller 9540a625fd2SDavid S. Miller return err; 9550a625fd2SDavid S. Miller } 9560a625fd2SDavid S. Miller 9570a625fd2SDavid S. Miller static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 9580a625fd2SDavid S. Miller { 9590a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 9600a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 9610a625fd2SDavid S. Miller 9620a625fd2SDavid S. Miller if (final_iv) 9630a625fd2SDavid S. Miller memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 9640a625fd2SDavid S. Miller 9650a625fd2SDavid S. Miller ablkcipher_walk_complete(&rctx->walk); 9660a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 9670a625fd2SDavid S. Miller list_del(&c->entry); 9680a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 9690a625fd2SDavid S. Miller kfree(c); 9700a625fd2SDavid S. Miller } 9710a625fd2SDavid S. Miller 9720a625fd2SDavid S. Miller } 9730a625fd2SDavid S. Miller 9740a625fd2SDavid S. Miller static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 9750a625fd2SDavid S. Miller { 9760a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 9770a625fd2SDavid S. Miller struct crypto_tfm *tfm = req->base.tfm; 9780a625fd2SDavid S. Miller int err = n2_compute_chunks(req); 9790a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 9800a625fd2SDavid S. Miller unsigned long flags, hv_ret; 9810a625fd2SDavid S. Miller struct spu_queue *qp; 9820a625fd2SDavid S. Miller 9830a625fd2SDavid S. Miller if (err) 9840a625fd2SDavid S. Miller return err; 9850a625fd2SDavid S. Miller 9860a625fd2SDavid S. Miller qp = cpu_to_cwq[get_cpu()]; 9870a625fd2SDavid S. Miller err = -ENODEV; 9880a625fd2SDavid S. Miller if (!qp) 9890a625fd2SDavid S. Miller goto out; 9900a625fd2SDavid S. Miller 9910a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 9920a625fd2SDavid S. Miller 9930a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 9940a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, encrypt); 9950a625fd2SDavid S. Miller if (err) 9960a625fd2SDavid S. Miller break; 9970a625fd2SDavid S. Miller list_del(&c->entry); 9980a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 9990a625fd2SDavid S. Miller kfree(c); 10000a625fd2SDavid S. Miller } 10010a625fd2SDavid S. Miller if (!err) { 10020a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 10030a625fd2SDavid S. Miller if (hv_ret != HV_EOK) 10040a625fd2SDavid S. Miller err = -EINVAL; 10050a625fd2SDavid S. Miller } 10060a625fd2SDavid S. Miller 10070a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 10080a625fd2SDavid S. Miller 1009e27303b2SThomas Meyer out: 10100a625fd2SDavid S. Miller put_cpu(); 10110a625fd2SDavid S. Miller 10120a625fd2SDavid S. Miller n2_chunk_complete(req, NULL); 10130a625fd2SDavid S. Miller return err; 10140a625fd2SDavid S. Miller } 10150a625fd2SDavid S. Miller 10160a625fd2SDavid S. Miller static int n2_encrypt_ecb(struct ablkcipher_request *req) 10170a625fd2SDavid S. Miller { 10180a625fd2SDavid S. Miller return n2_do_ecb(req, true); 10190a625fd2SDavid S. Miller } 10200a625fd2SDavid S. Miller 10210a625fd2SDavid S. Miller static int n2_decrypt_ecb(struct ablkcipher_request *req) 10220a625fd2SDavid S. Miller { 10230a625fd2SDavid S. Miller return n2_do_ecb(req, false); 10240a625fd2SDavid S. Miller } 10250a625fd2SDavid S. Miller 10260a625fd2SDavid S. Miller static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 10270a625fd2SDavid S. Miller { 10280a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 10290a625fd2SDavid S. Miller struct crypto_tfm *tfm = req->base.tfm; 10300a625fd2SDavid S. Miller unsigned long flags, hv_ret, iv_paddr; 10310a625fd2SDavid S. Miller int err = n2_compute_chunks(req); 10320a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 10330a625fd2SDavid S. Miller struct spu_queue *qp; 10340a625fd2SDavid S. Miller void *final_iv_addr; 10350a625fd2SDavid S. Miller 10360a625fd2SDavid S. Miller final_iv_addr = NULL; 10370a625fd2SDavid S. Miller 10380a625fd2SDavid S. Miller if (err) 10390a625fd2SDavid S. Miller return err; 10400a625fd2SDavid S. Miller 10410a625fd2SDavid S. Miller qp = cpu_to_cwq[get_cpu()]; 10420a625fd2SDavid S. Miller err = -ENODEV; 10430a625fd2SDavid S. Miller if (!qp) 10440a625fd2SDavid S. Miller goto out; 10450a625fd2SDavid S. Miller 10460a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 10470a625fd2SDavid S. Miller 10480a625fd2SDavid S. Miller if (encrypt) { 10490a625fd2SDavid S. Miller iv_paddr = __pa(rctx->walk.iv); 10500a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 10510a625fd2SDavid S. Miller entry) { 10520a625fd2SDavid S. Miller c->iv_paddr = iv_paddr; 10530a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, true); 10540a625fd2SDavid S. Miller if (err) 10550a625fd2SDavid S. Miller break; 10560a625fd2SDavid S. Miller iv_paddr = c->dest_final - rctx->walk.blocksize; 10570a625fd2SDavid S. Miller list_del(&c->entry); 10580a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 10590a625fd2SDavid S. Miller kfree(c); 10600a625fd2SDavid S. Miller } 10610a625fd2SDavid S. Miller final_iv_addr = __va(iv_paddr); 10620a625fd2SDavid S. Miller } else { 10630a625fd2SDavid S. Miller list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 10640a625fd2SDavid S. Miller entry) { 10650a625fd2SDavid S. Miller if (c == &rctx->chunk) { 10660a625fd2SDavid S. Miller iv_paddr = __pa(rctx->walk.iv); 10670a625fd2SDavid S. Miller } else { 10680a625fd2SDavid S. Miller iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 10690a625fd2SDavid S. Miller tmp->arr[tmp->arr_len-1].src_len - 10700a625fd2SDavid S. Miller rctx->walk.blocksize); 10710a625fd2SDavid S. Miller } 10720a625fd2SDavid S. Miller if (!final_iv_addr) { 10730a625fd2SDavid S. Miller unsigned long pa; 10740a625fd2SDavid S. Miller 10750a625fd2SDavid S. Miller pa = (c->arr[c->arr_len-1].src_paddr + 10760a625fd2SDavid S. Miller c->arr[c->arr_len-1].src_len - 10770a625fd2SDavid S. Miller rctx->walk.blocksize); 10780a625fd2SDavid S. Miller final_iv_addr = rctx->temp_iv; 10790a625fd2SDavid S. Miller memcpy(rctx->temp_iv, __va(pa), 10800a625fd2SDavid S. Miller rctx->walk.blocksize); 10810a625fd2SDavid S. Miller } 10820a625fd2SDavid S. Miller c->iv_paddr = iv_paddr; 10830a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, false); 10840a625fd2SDavid S. Miller if (err) 10850a625fd2SDavid S. Miller break; 10860a625fd2SDavid S. Miller list_del(&c->entry); 10870a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 10880a625fd2SDavid S. Miller kfree(c); 10890a625fd2SDavid S. Miller } 10900a625fd2SDavid S. Miller } 10910a625fd2SDavid S. Miller if (!err) { 10920a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 10930a625fd2SDavid S. Miller if (hv_ret != HV_EOK) 10940a625fd2SDavid S. Miller err = -EINVAL; 10950a625fd2SDavid S. Miller } 10960a625fd2SDavid S. Miller 10970a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 10980a625fd2SDavid S. Miller 1099e27303b2SThomas Meyer out: 11000a625fd2SDavid S. Miller put_cpu(); 11010a625fd2SDavid S. Miller 11020a625fd2SDavid S. Miller n2_chunk_complete(req, err ? NULL : final_iv_addr); 11030a625fd2SDavid S. Miller return err; 11040a625fd2SDavid S. Miller } 11050a625fd2SDavid S. Miller 11060a625fd2SDavid S. Miller static int n2_encrypt_chaining(struct ablkcipher_request *req) 11070a625fd2SDavid S. Miller { 11080a625fd2SDavid S. Miller return n2_do_chaining(req, true); 11090a625fd2SDavid S. Miller } 11100a625fd2SDavid S. Miller 11110a625fd2SDavid S. Miller static int n2_decrypt_chaining(struct ablkcipher_request *req) 11120a625fd2SDavid S. Miller { 11130a625fd2SDavid S. Miller return n2_do_chaining(req, false); 11140a625fd2SDavid S. Miller } 11150a625fd2SDavid S. Miller 11160a625fd2SDavid S. Miller struct n2_cipher_tmpl { 11170a625fd2SDavid S. Miller const char *name; 11180a625fd2SDavid S. Miller const char *drv_name; 11190a625fd2SDavid S. Miller u8 block_size; 11200a625fd2SDavid S. Miller u8 enc_type; 11210a625fd2SDavid S. Miller struct ablkcipher_alg ablkcipher; 11220a625fd2SDavid S. Miller }; 11230a625fd2SDavid S. Miller 11240a625fd2SDavid S. Miller static const struct n2_cipher_tmpl cipher_tmpls[] = { 11250a625fd2SDavid S. Miller /* ARC4: only ECB is supported (chaining bits ignored) */ 11260a625fd2SDavid S. Miller { .name = "ecb(arc4)", 11270a625fd2SDavid S. Miller .drv_name = "ecb-arc4", 11280a625fd2SDavid S. Miller .block_size = 1, 11290a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 11300a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11310a625fd2SDavid S. Miller .ablkcipher = { 11320a625fd2SDavid S. Miller .min_keysize = 1, 11330a625fd2SDavid S. Miller .max_keysize = 256, 11340a625fd2SDavid S. Miller .setkey = n2_arc4_setkey, 11350a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11360a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11370a625fd2SDavid S. Miller }, 11380a625fd2SDavid S. Miller }, 11390a625fd2SDavid S. Miller 11400a625fd2SDavid S. Miller /* DES: ECB CBC and CFB are supported */ 11410a625fd2SDavid S. Miller { .name = "ecb(des)", 11420a625fd2SDavid S. Miller .drv_name = "ecb-des", 11430a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11440a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11450a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11460a625fd2SDavid S. Miller .ablkcipher = { 11470a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11480a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11490a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11500a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11510a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11520a625fd2SDavid S. Miller }, 11530a625fd2SDavid S. Miller }, 11540a625fd2SDavid S. Miller { .name = "cbc(des)", 11550a625fd2SDavid S. Miller .drv_name = "cbc-des", 11560a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11570a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11580a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 11590a625fd2SDavid S. Miller .ablkcipher = { 11600a625fd2SDavid S. Miller .ivsize = DES_BLOCK_SIZE, 11610a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11620a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11630a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11640a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11650a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11660a625fd2SDavid S. Miller }, 11670a625fd2SDavid S. Miller }, 11680a625fd2SDavid S. Miller { .name = "cfb(des)", 11690a625fd2SDavid S. Miller .drv_name = "cfb-des", 11700a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11710a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11720a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CFB), 11730a625fd2SDavid S. Miller .ablkcipher = { 11740a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11750a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11760a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11770a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11780a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11790a625fd2SDavid S. Miller }, 11800a625fd2SDavid S. Miller }, 11810a625fd2SDavid S. Miller 11820a625fd2SDavid S. Miller /* 3DES: ECB CBC and CFB are supported */ 11830a625fd2SDavid S. Miller { .name = "ecb(des3_ede)", 11840a625fd2SDavid S. Miller .drv_name = "ecb-3des", 11850a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11860a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 11870a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11880a625fd2SDavid S. Miller .ablkcipher = { 11890a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 11900a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 11910a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 11920a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11930a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11940a625fd2SDavid S. Miller }, 11950a625fd2SDavid S. Miller }, 11960a625fd2SDavid S. Miller { .name = "cbc(des3_ede)", 11970a625fd2SDavid S. Miller .drv_name = "cbc-3des", 11980a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11990a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 12000a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 12010a625fd2SDavid S. Miller .ablkcipher = { 12020a625fd2SDavid S. Miller .ivsize = DES_BLOCK_SIZE, 12030a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 12040a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 12050a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 12060a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12070a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 12080a625fd2SDavid S. Miller }, 12090a625fd2SDavid S. Miller }, 12100a625fd2SDavid S. Miller { .name = "cfb(des3_ede)", 12110a625fd2SDavid S. Miller .drv_name = "cfb-3des", 12120a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 12130a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 12140a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CFB), 12150a625fd2SDavid S. Miller .ablkcipher = { 12160a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 12170a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 12180a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 12190a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12200a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 12210a625fd2SDavid S. Miller }, 12220a625fd2SDavid S. Miller }, 12230a625fd2SDavid S. Miller /* AES: ECB CBC and CTR are supported */ 12240a625fd2SDavid S. Miller { .name = "ecb(aes)", 12250a625fd2SDavid S. Miller .drv_name = "ecb-aes", 12260a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 12270a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 12280a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 12290a625fd2SDavid S. Miller .ablkcipher = { 12300a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 12310a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 12320a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 12330a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 12340a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 12350a625fd2SDavid S. Miller }, 12360a625fd2SDavid S. Miller }, 12370a625fd2SDavid S. Miller { .name = "cbc(aes)", 12380a625fd2SDavid S. Miller .drv_name = "cbc-aes", 12390a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 12400a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 12410a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 12420a625fd2SDavid S. Miller .ablkcipher = { 12430a625fd2SDavid S. Miller .ivsize = AES_BLOCK_SIZE, 12440a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 12450a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 12460a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 12470a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12480a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 12490a625fd2SDavid S. Miller }, 12500a625fd2SDavid S. Miller }, 12510a625fd2SDavid S. Miller { .name = "ctr(aes)", 12520a625fd2SDavid S. Miller .drv_name = "ctr-aes", 12530a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 12540a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 12550a625fd2SDavid S. Miller ENC_TYPE_CHAINING_COUNTER), 12560a625fd2SDavid S. Miller .ablkcipher = { 12570a625fd2SDavid S. Miller .ivsize = AES_BLOCK_SIZE, 12580a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 12590a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 12600a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 12610a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12620a625fd2SDavid S. Miller .decrypt = n2_encrypt_chaining, 12630a625fd2SDavid S. Miller }, 12640a625fd2SDavid S. Miller }, 12650a625fd2SDavid S. Miller 12660a625fd2SDavid S. Miller }; 12670a625fd2SDavid S. Miller #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 12680a625fd2SDavid S. Miller 12690a625fd2SDavid S. Miller static LIST_HEAD(cipher_algs); 12700a625fd2SDavid S. Miller 12710a625fd2SDavid S. Miller struct n2_hash_tmpl { 12720a625fd2SDavid S. Miller const char *name; 12733a2c0346SDavid S. Miller const char *hash_zero; 12743a2c0346SDavid S. Miller const u32 *hash_init; 12753a2c0346SDavid S. Miller u8 hw_op_hashsz; 12760a625fd2SDavid S. Miller u8 digest_size; 12770a625fd2SDavid S. Miller u8 block_size; 12783a2c0346SDavid S. Miller u8 auth_type; 1279dc4ccfd1SDavid S. Miller u8 hmac_type; 12800a625fd2SDavid S. Miller }; 12813a2c0346SDavid S. Miller 12823a2c0346SDavid S. Miller static const char md5_zero[MD5_DIGEST_SIZE] = { 12833a2c0346SDavid S. Miller 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 12843a2c0346SDavid S. Miller 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, 12853a2c0346SDavid S. Miller }; 12863a2c0346SDavid S. Miller static const u32 md5_init[MD5_HASH_WORDS] = { 12873a2c0346SDavid S. Miller cpu_to_le32(0x67452301), 12883a2c0346SDavid S. Miller cpu_to_le32(0xefcdab89), 12893a2c0346SDavid S. Miller cpu_to_le32(0x98badcfe), 12903a2c0346SDavid S. Miller cpu_to_le32(0x10325476), 12913a2c0346SDavid S. Miller }; 12923a2c0346SDavid S. Miller static const char sha1_zero[SHA1_DIGEST_SIZE] = { 12933a2c0346SDavid S. Miller 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 12943a2c0346SDavid S. Miller 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 12953a2c0346SDavid S. Miller 0x07, 0x09 12963a2c0346SDavid S. Miller }; 12973a2c0346SDavid S. Miller static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 12983a2c0346SDavid S. Miller SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 12993a2c0346SDavid S. Miller }; 13003a2c0346SDavid S. Miller static const char sha256_zero[SHA256_DIGEST_SIZE] = { 13013a2c0346SDavid S. Miller 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 13023a2c0346SDavid S. Miller 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 13033a2c0346SDavid S. Miller 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 13043a2c0346SDavid S. Miller 0x1b, 0x78, 0x52, 0xb8, 0x55 13053a2c0346SDavid S. Miller }; 13063a2c0346SDavid S. Miller static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 13073a2c0346SDavid S. Miller SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 13083a2c0346SDavid S. Miller SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 13093a2c0346SDavid S. Miller }; 13103a2c0346SDavid S. Miller static const char sha224_zero[SHA224_DIGEST_SIZE] = { 13113a2c0346SDavid S. Miller 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, 13123a2c0346SDavid S. Miller 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, 13133a2c0346SDavid S. Miller 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, 13143a2c0346SDavid S. Miller 0x2f 13153a2c0346SDavid S. Miller }; 13163a2c0346SDavid S. Miller static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 13173a2c0346SDavid S. Miller SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 13183a2c0346SDavid S. Miller SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 13193a2c0346SDavid S. Miller }; 13203a2c0346SDavid S. Miller 13210a625fd2SDavid S. Miller static const struct n2_hash_tmpl hash_tmpls[] = { 13220a625fd2SDavid S. Miller { .name = "md5", 13233a2c0346SDavid S. Miller .hash_zero = md5_zero, 13243a2c0346SDavid S. Miller .hash_init = md5_init, 13253a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_MD5, 1326dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_HMAC_MD5, 13273a2c0346SDavid S. Miller .hw_op_hashsz = MD5_DIGEST_SIZE, 13280a625fd2SDavid S. Miller .digest_size = MD5_DIGEST_SIZE, 13290a625fd2SDavid S. Miller .block_size = MD5_HMAC_BLOCK_SIZE }, 13300a625fd2SDavid S. Miller { .name = "sha1", 13313a2c0346SDavid S. Miller .hash_zero = sha1_zero, 13323a2c0346SDavid S. Miller .hash_init = sha1_init, 13333a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_SHA1, 1334dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_HMAC_SHA1, 13353a2c0346SDavid S. Miller .hw_op_hashsz = SHA1_DIGEST_SIZE, 13360a625fd2SDavid S. Miller .digest_size = SHA1_DIGEST_SIZE, 13370a625fd2SDavid S. Miller .block_size = SHA1_BLOCK_SIZE }, 13380a625fd2SDavid S. Miller { .name = "sha256", 13393a2c0346SDavid S. Miller .hash_zero = sha256_zero, 13403a2c0346SDavid S. Miller .hash_init = sha256_init, 13413a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_SHA256, 1342dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_HMAC_SHA256, 13433a2c0346SDavid S. Miller .hw_op_hashsz = SHA256_DIGEST_SIZE, 13440a625fd2SDavid S. Miller .digest_size = SHA256_DIGEST_SIZE, 13450a625fd2SDavid S. Miller .block_size = SHA256_BLOCK_SIZE }, 13460a625fd2SDavid S. Miller { .name = "sha224", 13473a2c0346SDavid S. Miller .hash_zero = sha224_zero, 13483a2c0346SDavid S. Miller .hash_init = sha224_init, 13493a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_SHA256, 1350dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_RESERVED, 13513a2c0346SDavid S. Miller .hw_op_hashsz = SHA256_DIGEST_SIZE, 13520a625fd2SDavid S. Miller .digest_size = SHA224_DIGEST_SIZE, 13530a625fd2SDavid S. Miller .block_size = SHA224_BLOCK_SIZE }, 13540a625fd2SDavid S. Miller }; 13550a625fd2SDavid S. Miller #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 13560a625fd2SDavid S. Miller 13570a625fd2SDavid S. Miller static LIST_HEAD(ahash_algs); 1358dc4ccfd1SDavid S. Miller static LIST_HEAD(hmac_algs); 13590a625fd2SDavid S. Miller 13600a625fd2SDavid S. Miller static int algs_registered; 13610a625fd2SDavid S. Miller 13620a625fd2SDavid S. Miller static void __n2_unregister_algs(void) 13630a625fd2SDavid S. Miller { 13640a625fd2SDavid S. Miller struct n2_cipher_alg *cipher, *cipher_tmp; 13650a625fd2SDavid S. Miller struct n2_ahash_alg *alg, *alg_tmp; 1366dc4ccfd1SDavid S. Miller struct n2_hmac_alg *hmac, *hmac_tmp; 13670a625fd2SDavid S. Miller 13680a625fd2SDavid S. Miller list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 13690a625fd2SDavid S. Miller crypto_unregister_alg(&cipher->alg); 13700a625fd2SDavid S. Miller list_del(&cipher->entry); 13710a625fd2SDavid S. Miller kfree(cipher); 13720a625fd2SDavid S. Miller } 1373dc4ccfd1SDavid S. Miller list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { 1374dc4ccfd1SDavid S. Miller crypto_unregister_ahash(&hmac->derived.alg); 1375dc4ccfd1SDavid S. Miller list_del(&hmac->derived.entry); 1376dc4ccfd1SDavid S. Miller kfree(hmac); 1377dc4ccfd1SDavid S. Miller } 13780a625fd2SDavid S. Miller list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 13790a625fd2SDavid S. Miller crypto_unregister_ahash(&alg->alg); 13800a625fd2SDavid S. Miller list_del(&alg->entry); 13810a625fd2SDavid S. Miller kfree(alg); 13820a625fd2SDavid S. Miller } 13830a625fd2SDavid S. Miller } 13840a625fd2SDavid S. Miller 13850a625fd2SDavid S. Miller static int n2_cipher_cra_init(struct crypto_tfm *tfm) 13860a625fd2SDavid S. Miller { 13870a625fd2SDavid S. Miller tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 13880a625fd2SDavid S. Miller return 0; 13890a625fd2SDavid S. Miller } 13900a625fd2SDavid S. Miller 13910a625fd2SDavid S. Miller static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 13920a625fd2SDavid S. Miller { 13930a625fd2SDavid S. Miller struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 13940a625fd2SDavid S. Miller struct crypto_alg *alg; 13950a625fd2SDavid S. Miller int err; 13960a625fd2SDavid S. Miller 13970a625fd2SDavid S. Miller if (!p) 13980a625fd2SDavid S. Miller return -ENOMEM; 13990a625fd2SDavid S. Miller 14000a625fd2SDavid S. Miller alg = &p->alg; 14010a625fd2SDavid S. Miller 14020a625fd2SDavid S. Miller snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 14030a625fd2SDavid S. Miller snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 14040a625fd2SDavid S. Miller alg->cra_priority = N2_CRA_PRIORITY; 14050a625fd2SDavid S. Miller alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 14060a625fd2SDavid S. Miller alg->cra_blocksize = tmpl->block_size; 14070a625fd2SDavid S. Miller p->enc_type = tmpl->enc_type; 14080a625fd2SDavid S. Miller alg->cra_ctxsize = sizeof(struct n2_cipher_context); 14090a625fd2SDavid S. Miller alg->cra_type = &crypto_ablkcipher_type; 14100a625fd2SDavid S. Miller alg->cra_u.ablkcipher = tmpl->ablkcipher; 14110a625fd2SDavid S. Miller alg->cra_init = n2_cipher_cra_init; 14120a625fd2SDavid S. Miller alg->cra_module = THIS_MODULE; 14130a625fd2SDavid S. Miller 14140a625fd2SDavid S. Miller list_add(&p->entry, &cipher_algs); 14150a625fd2SDavid S. Miller err = crypto_register_alg(alg); 14160a625fd2SDavid S. Miller if (err) { 141738511108SDavid S. Miller pr_err("%s alg registration failed\n", alg->cra_name); 14180a625fd2SDavid S. Miller list_del(&p->entry); 14190a625fd2SDavid S. Miller kfree(p); 142038511108SDavid S. Miller } else { 142138511108SDavid S. Miller pr_info("%s alg registered\n", alg->cra_name); 14220a625fd2SDavid S. Miller } 14230a625fd2SDavid S. Miller return err; 14240a625fd2SDavid S. Miller } 14250a625fd2SDavid S. Miller 1426dc4ccfd1SDavid S. Miller static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) 1427dc4ccfd1SDavid S. Miller { 1428dc4ccfd1SDavid S. Miller struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1429dc4ccfd1SDavid S. Miller struct ahash_alg *ahash; 1430dc4ccfd1SDavid S. Miller struct crypto_alg *base; 1431dc4ccfd1SDavid S. Miller int err; 1432dc4ccfd1SDavid S. Miller 1433dc4ccfd1SDavid S. Miller if (!p) 1434dc4ccfd1SDavid S. Miller return -ENOMEM; 1435dc4ccfd1SDavid S. Miller 1436dc4ccfd1SDavid S. Miller p->child_alg = n2ahash->alg.halg.base.cra_name; 1437dc4ccfd1SDavid S. Miller memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); 1438dc4ccfd1SDavid S. Miller INIT_LIST_HEAD(&p->derived.entry); 1439dc4ccfd1SDavid S. Miller 1440dc4ccfd1SDavid S. Miller ahash = &p->derived.alg; 1441dc4ccfd1SDavid S. Miller ahash->digest = n2_hmac_async_digest; 1442dc4ccfd1SDavid S. Miller ahash->setkey = n2_hmac_async_setkey; 1443dc4ccfd1SDavid S. Miller 1444dc4ccfd1SDavid S. Miller base = &ahash->halg.base; 1445dc4ccfd1SDavid S. Miller snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); 1446dc4ccfd1SDavid S. Miller snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); 1447dc4ccfd1SDavid S. Miller 1448dc4ccfd1SDavid S. Miller base->cra_ctxsize = sizeof(struct n2_hmac_ctx); 1449dc4ccfd1SDavid S. Miller base->cra_init = n2_hmac_cra_init; 1450dc4ccfd1SDavid S. Miller base->cra_exit = n2_hmac_cra_exit; 1451dc4ccfd1SDavid S. Miller 1452dc4ccfd1SDavid S. Miller list_add(&p->derived.entry, &hmac_algs); 1453dc4ccfd1SDavid S. Miller err = crypto_register_ahash(ahash); 1454dc4ccfd1SDavid S. Miller if (err) { 1455dc4ccfd1SDavid S. Miller pr_err("%s alg registration failed\n", base->cra_name); 1456dc4ccfd1SDavid S. Miller list_del(&p->derived.entry); 1457dc4ccfd1SDavid S. Miller kfree(p); 1458dc4ccfd1SDavid S. Miller } else { 1459dc4ccfd1SDavid S. Miller pr_info("%s alg registered\n", base->cra_name); 1460dc4ccfd1SDavid S. Miller } 1461dc4ccfd1SDavid S. Miller return err; 1462dc4ccfd1SDavid S. Miller } 1463dc4ccfd1SDavid S. Miller 14640a625fd2SDavid S. Miller static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 14650a625fd2SDavid S. Miller { 14660a625fd2SDavid S. Miller struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 14670a625fd2SDavid S. Miller struct hash_alg_common *halg; 14680a625fd2SDavid S. Miller struct crypto_alg *base; 14690a625fd2SDavid S. Miller struct ahash_alg *ahash; 14700a625fd2SDavid S. Miller int err; 14710a625fd2SDavid S. Miller 14720a625fd2SDavid S. Miller if (!p) 14730a625fd2SDavid S. Miller return -ENOMEM; 14740a625fd2SDavid S. Miller 14753a2c0346SDavid S. Miller p->hash_zero = tmpl->hash_zero; 14763a2c0346SDavid S. Miller p->hash_init = tmpl->hash_init; 14773a2c0346SDavid S. Miller p->auth_type = tmpl->auth_type; 1478dc4ccfd1SDavid S. Miller p->hmac_type = tmpl->hmac_type; 14793a2c0346SDavid S. Miller p->hw_op_hashsz = tmpl->hw_op_hashsz; 14803a2c0346SDavid S. Miller p->digest_size = tmpl->digest_size; 14813a2c0346SDavid S. Miller 14820a625fd2SDavid S. Miller ahash = &p->alg; 14830a625fd2SDavid S. Miller ahash->init = n2_hash_async_init; 14840a625fd2SDavid S. Miller ahash->update = n2_hash_async_update; 14850a625fd2SDavid S. Miller ahash->final = n2_hash_async_final; 14860a625fd2SDavid S. Miller ahash->finup = n2_hash_async_finup; 14873a2c0346SDavid S. Miller ahash->digest = n2_hash_async_digest; 14880a625fd2SDavid S. Miller 14890a625fd2SDavid S. Miller halg = &ahash->halg; 14900a625fd2SDavid S. Miller halg->digestsize = tmpl->digest_size; 14910a625fd2SDavid S. Miller 14920a625fd2SDavid S. Miller base = &halg->base; 14930a625fd2SDavid S. Miller snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 14940a625fd2SDavid S. Miller snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 14950a625fd2SDavid S. Miller base->cra_priority = N2_CRA_PRIORITY; 14960a625fd2SDavid S. Miller base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; 14970a625fd2SDavid S. Miller base->cra_blocksize = tmpl->block_size; 14980a625fd2SDavid S. Miller base->cra_ctxsize = sizeof(struct n2_hash_ctx); 14990a625fd2SDavid S. Miller base->cra_module = THIS_MODULE; 15000a625fd2SDavid S. Miller base->cra_init = n2_hash_cra_init; 15010a625fd2SDavid S. Miller base->cra_exit = n2_hash_cra_exit; 15020a625fd2SDavid S. Miller 15030a625fd2SDavid S. Miller list_add(&p->entry, &ahash_algs); 15040a625fd2SDavid S. Miller err = crypto_register_ahash(ahash); 15050a625fd2SDavid S. Miller if (err) { 150638511108SDavid S. Miller pr_err("%s alg registration failed\n", base->cra_name); 15070a625fd2SDavid S. Miller list_del(&p->entry); 15080a625fd2SDavid S. Miller kfree(p); 150938511108SDavid S. Miller } else { 151038511108SDavid S. Miller pr_info("%s alg registered\n", base->cra_name); 15110a625fd2SDavid S. Miller } 1512dc4ccfd1SDavid S. Miller if (!err && p->hmac_type != AUTH_TYPE_RESERVED) 1513dc4ccfd1SDavid S. Miller err = __n2_register_one_hmac(p); 15140a625fd2SDavid S. Miller return err; 15150a625fd2SDavid S. Miller } 15160a625fd2SDavid S. Miller 15170a625fd2SDavid S. Miller static int __devinit n2_register_algs(void) 15180a625fd2SDavid S. Miller { 15190a625fd2SDavid S. Miller int i, err = 0; 15200a625fd2SDavid S. Miller 15210a625fd2SDavid S. Miller mutex_lock(&spu_lock); 15220a625fd2SDavid S. Miller if (algs_registered++) 15230a625fd2SDavid S. Miller goto out; 15240a625fd2SDavid S. Miller 15250a625fd2SDavid S. Miller for (i = 0; i < NUM_HASH_TMPLS; i++) { 15260a625fd2SDavid S. Miller err = __n2_register_one_ahash(&hash_tmpls[i]); 15270a625fd2SDavid S. Miller if (err) { 15280a625fd2SDavid S. Miller __n2_unregister_algs(); 15290a625fd2SDavid S. Miller goto out; 15300a625fd2SDavid S. Miller } 15310a625fd2SDavid S. Miller } 15320a625fd2SDavid S. Miller for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 15330a625fd2SDavid S. Miller err = __n2_register_one_cipher(&cipher_tmpls[i]); 15340a625fd2SDavid S. Miller if (err) { 15350a625fd2SDavid S. Miller __n2_unregister_algs(); 15360a625fd2SDavid S. Miller goto out; 15370a625fd2SDavid S. Miller } 15380a625fd2SDavid S. Miller } 15390a625fd2SDavid S. Miller 15400a625fd2SDavid S. Miller out: 15410a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 15420a625fd2SDavid S. Miller return err; 15430a625fd2SDavid S. Miller } 15440a625fd2SDavid S. Miller 1545dffa1844SDennis Gilmore static void __devexit n2_unregister_algs(void) 15460a625fd2SDavid S. Miller { 15470a625fd2SDavid S. Miller mutex_lock(&spu_lock); 15480a625fd2SDavid S. Miller if (!--algs_registered) 15490a625fd2SDavid S. Miller __n2_unregister_algs(); 15500a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 15510a625fd2SDavid S. Miller } 15520a625fd2SDavid S. Miller 15530a625fd2SDavid S. Miller /* To map CWQ queues to interrupt sources, the hypervisor API provides 15540a625fd2SDavid S. Miller * a devino. This isn't very useful to us because all of the 15552dc11581SGrant Likely * interrupts listed in the device_node have been translated to 15560a625fd2SDavid S. Miller * Linux virtual IRQ cookie numbers. 15570a625fd2SDavid S. Miller * 15580a625fd2SDavid S. Miller * So we have to back-translate, going through the 'intr' and 'ino' 15590a625fd2SDavid S. Miller * property tables of the n2cp MDESC node, matching it with the OF 15600a625fd2SDavid S. Miller * 'interrupts' property entries, in order to to figure out which 15610a625fd2SDavid S. Miller * devino goes to which already-translated IRQ. 15620a625fd2SDavid S. Miller */ 15632dc11581SGrant Likely static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, 15640a625fd2SDavid S. Miller unsigned long dev_ino) 15650a625fd2SDavid S. Miller { 15660a625fd2SDavid S. Miller const unsigned int *dev_intrs; 15670a625fd2SDavid S. Miller unsigned int intr; 15680a625fd2SDavid S. Miller int i; 15690a625fd2SDavid S. Miller 15700a625fd2SDavid S. Miller for (i = 0; i < ip->num_intrs; i++) { 15710a625fd2SDavid S. Miller if (ip->ino_table[i].ino == dev_ino) 15720a625fd2SDavid S. Miller break; 15730a625fd2SDavid S. Miller } 15740a625fd2SDavid S. Miller if (i == ip->num_intrs) 15750a625fd2SDavid S. Miller return -ENODEV; 15760a625fd2SDavid S. Miller 15770a625fd2SDavid S. Miller intr = ip->ino_table[i].intr; 15780a625fd2SDavid S. Miller 1579ff6c7341SDavid S. Miller dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 15800a625fd2SDavid S. Miller if (!dev_intrs) 15810a625fd2SDavid S. Miller return -ENODEV; 15820a625fd2SDavid S. Miller 158319e4875fSGrant Likely for (i = 0; i < dev->archdata.num_irqs; i++) { 15840a625fd2SDavid S. Miller if (dev_intrs[i] == intr) 15850a625fd2SDavid S. Miller return i; 15860a625fd2SDavid S. Miller } 15870a625fd2SDavid S. Miller 15880a625fd2SDavid S. Miller return -ENODEV; 15890a625fd2SDavid S. Miller } 15900a625fd2SDavid S. Miller 15912dc11581SGrant Likely static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, 15920a625fd2SDavid S. Miller const char *irq_name, struct spu_queue *p, 15930a625fd2SDavid S. Miller irq_handler_t handler) 15940a625fd2SDavid S. Miller { 15950a625fd2SDavid S. Miller unsigned long herr; 15960a625fd2SDavid S. Miller int index; 15970a625fd2SDavid S. Miller 15980a625fd2SDavid S. Miller herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 15990a625fd2SDavid S. Miller if (herr) 16000a625fd2SDavid S. Miller return -EINVAL; 16010a625fd2SDavid S. Miller 16020a625fd2SDavid S. Miller index = find_devino_index(dev, ip, p->devino); 16030a625fd2SDavid S. Miller if (index < 0) 16040a625fd2SDavid S. Miller return index; 16050a625fd2SDavid S. Miller 160619e4875fSGrant Likely p->irq = dev->archdata.irqs[index]; 16070a625fd2SDavid S. Miller 16080a625fd2SDavid S. Miller sprintf(p->irq_name, "%s-%d", irq_name, index); 16090a625fd2SDavid S. Miller 16100a625fd2SDavid S. Miller return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, 16110a625fd2SDavid S. Miller p->irq_name, p); 16120a625fd2SDavid S. Miller } 16130a625fd2SDavid S. Miller 16140a625fd2SDavid S. Miller static struct kmem_cache *queue_cache[2]; 16150a625fd2SDavid S. Miller 16160a625fd2SDavid S. Miller static void *new_queue(unsigned long q_type) 16170a625fd2SDavid S. Miller { 16180a625fd2SDavid S. Miller return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 16190a625fd2SDavid S. Miller } 16200a625fd2SDavid S. Miller 16210a625fd2SDavid S. Miller static void free_queue(void *p, unsigned long q_type) 16220a625fd2SDavid S. Miller { 16230a625fd2SDavid S. Miller return kmem_cache_free(queue_cache[q_type - 1], p); 16240a625fd2SDavid S. Miller } 16250a625fd2SDavid S. Miller 16260a625fd2SDavid S. Miller static int queue_cache_init(void) 16270a625fd2SDavid S. Miller { 16280a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 16290a625fd2SDavid S. Miller queue_cache[HV_NCS_QTYPE_MAU - 1] = 1630527b9525SDavid S. Miller kmem_cache_create("mau_queue", 16310a625fd2SDavid S. Miller (MAU_NUM_ENTRIES * 16320a625fd2SDavid S. Miller MAU_ENTRY_SIZE), 16330a625fd2SDavid S. Miller MAU_ENTRY_SIZE, 0, NULL); 16340a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 16350a625fd2SDavid S. Miller return -ENOMEM; 16360a625fd2SDavid S. Miller 16370a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 16380a625fd2SDavid S. Miller queue_cache[HV_NCS_QTYPE_CWQ - 1] = 16390a625fd2SDavid S. Miller kmem_cache_create("cwq_queue", 16400a625fd2SDavid S. Miller (CWQ_NUM_ENTRIES * 16410a625fd2SDavid S. Miller CWQ_ENTRY_SIZE), 16420a625fd2SDavid S. Miller CWQ_ENTRY_SIZE, 0, NULL); 16430a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 16440a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 16450a625fd2SDavid S. Miller return -ENOMEM; 16460a625fd2SDavid S. Miller } 16470a625fd2SDavid S. Miller return 0; 16480a625fd2SDavid S. Miller } 16490a625fd2SDavid S. Miller 16500a625fd2SDavid S. Miller static void queue_cache_destroy(void) 16510a625fd2SDavid S. Miller { 16520a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 16530a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 16540a625fd2SDavid S. Miller } 16550a625fd2SDavid S. Miller 16560a625fd2SDavid S. Miller static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 16570a625fd2SDavid S. Miller { 16580a625fd2SDavid S. Miller cpumask_var_t old_allowed; 16590a625fd2SDavid S. Miller unsigned long hv_ret; 16600a625fd2SDavid S. Miller 16610a625fd2SDavid S. Miller if (cpumask_empty(&p->sharing)) 16620a625fd2SDavid S. Miller return -EINVAL; 16630a625fd2SDavid S. Miller 16640a625fd2SDavid S. Miller if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) 16650a625fd2SDavid S. Miller return -ENOMEM; 16660a625fd2SDavid S. Miller 16670a625fd2SDavid S. Miller cpumask_copy(old_allowed, ¤t->cpus_allowed); 16680a625fd2SDavid S. Miller 16690a625fd2SDavid S. Miller set_cpus_allowed_ptr(current, &p->sharing); 16700a625fd2SDavid S. Miller 16710a625fd2SDavid S. Miller hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 16720a625fd2SDavid S. Miller CWQ_NUM_ENTRIES, &p->qhandle); 16730a625fd2SDavid S. Miller if (!hv_ret) 16740a625fd2SDavid S. Miller sun4v_ncs_sethead_marker(p->qhandle, 0); 16750a625fd2SDavid S. Miller 16760a625fd2SDavid S. Miller set_cpus_allowed_ptr(current, old_allowed); 16770a625fd2SDavid S. Miller 16780a625fd2SDavid S. Miller free_cpumask_var(old_allowed); 16790a625fd2SDavid S. Miller 16800a625fd2SDavid S. Miller return (hv_ret ? -EINVAL : 0); 16810a625fd2SDavid S. Miller } 16820a625fd2SDavid S. Miller 16830a625fd2SDavid S. Miller static int spu_queue_setup(struct spu_queue *p) 16840a625fd2SDavid S. Miller { 16850a625fd2SDavid S. Miller int err; 16860a625fd2SDavid S. Miller 16870a625fd2SDavid S. Miller p->q = new_queue(p->q_type); 16880a625fd2SDavid S. Miller if (!p->q) 16890a625fd2SDavid S. Miller return -ENOMEM; 16900a625fd2SDavid S. Miller 16910a625fd2SDavid S. Miller err = spu_queue_register(p, p->q_type); 16920a625fd2SDavid S. Miller if (err) { 16930a625fd2SDavid S. Miller free_queue(p->q, p->q_type); 16940a625fd2SDavid S. Miller p->q = NULL; 16950a625fd2SDavid S. Miller } 16960a625fd2SDavid S. Miller 16970a625fd2SDavid S. Miller return err; 16980a625fd2SDavid S. Miller } 16990a625fd2SDavid S. Miller 17000a625fd2SDavid S. Miller static void spu_queue_destroy(struct spu_queue *p) 17010a625fd2SDavid S. Miller { 17020a625fd2SDavid S. Miller unsigned long hv_ret; 17030a625fd2SDavid S. Miller 17040a625fd2SDavid S. Miller if (!p->q) 17050a625fd2SDavid S. Miller return; 17060a625fd2SDavid S. Miller 17070a625fd2SDavid S. Miller hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 17080a625fd2SDavid S. Miller 17090a625fd2SDavid S. Miller if (!hv_ret) 17100a625fd2SDavid S. Miller free_queue(p->q, p->q_type); 17110a625fd2SDavid S. Miller } 17120a625fd2SDavid S. Miller 17130a625fd2SDavid S. Miller static void spu_list_destroy(struct list_head *list) 17140a625fd2SDavid S. Miller { 17150a625fd2SDavid S. Miller struct spu_queue *p, *n; 17160a625fd2SDavid S. Miller 17170a625fd2SDavid S. Miller list_for_each_entry_safe(p, n, list, list) { 17180a625fd2SDavid S. Miller int i; 17190a625fd2SDavid S. Miller 17200a625fd2SDavid S. Miller for (i = 0; i < NR_CPUS; i++) { 17210a625fd2SDavid S. Miller if (cpu_to_cwq[i] == p) 17220a625fd2SDavid S. Miller cpu_to_cwq[i] = NULL; 17230a625fd2SDavid S. Miller } 17240a625fd2SDavid S. Miller 17250a625fd2SDavid S. Miller if (p->irq) { 17260a625fd2SDavid S. Miller free_irq(p->irq, p); 17270a625fd2SDavid S. Miller p->irq = 0; 17280a625fd2SDavid S. Miller } 17290a625fd2SDavid S. Miller spu_queue_destroy(p); 17300a625fd2SDavid S. Miller list_del(&p->list); 17310a625fd2SDavid S. Miller kfree(p); 17320a625fd2SDavid S. Miller } 17330a625fd2SDavid S. Miller } 17340a625fd2SDavid S. Miller 17350a625fd2SDavid S. Miller /* Walk the backward arcs of a CWQ 'exec-unit' node, 17360a625fd2SDavid S. Miller * gathering cpu membership information. 17370a625fd2SDavid S. Miller */ 17380a625fd2SDavid S. Miller static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 17392dc11581SGrant Likely struct platform_device *dev, 17400a625fd2SDavid S. Miller u64 node, struct spu_queue *p, 17410a625fd2SDavid S. Miller struct spu_queue **table) 17420a625fd2SDavid S. Miller { 17430a625fd2SDavid S. Miller u64 arc; 17440a625fd2SDavid S. Miller 17450a625fd2SDavid S. Miller mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 17460a625fd2SDavid S. Miller u64 tgt = mdesc_arc_target(mdesc, arc); 17470a625fd2SDavid S. Miller const char *name = mdesc_node_name(mdesc, tgt); 17480a625fd2SDavid S. Miller const u64 *id; 17490a625fd2SDavid S. Miller 17500a625fd2SDavid S. Miller if (strcmp(name, "cpu")) 17510a625fd2SDavid S. Miller continue; 17520a625fd2SDavid S. Miller id = mdesc_get_property(mdesc, tgt, "id", NULL); 17530a625fd2SDavid S. Miller if (table[*id] != NULL) { 17540a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", 1755ff6c7341SDavid S. Miller dev->dev.of_node->full_name); 17560a625fd2SDavid S. Miller return -EINVAL; 17570a625fd2SDavid S. Miller } 17580a625fd2SDavid S. Miller cpu_set(*id, p->sharing); 17590a625fd2SDavid S. Miller table[*id] = p; 17600a625fd2SDavid S. Miller } 17610a625fd2SDavid S. Miller return 0; 17620a625fd2SDavid S. Miller } 17630a625fd2SDavid S. Miller 17640a625fd2SDavid S. Miller /* Process an 'exec-unit' MDESC node of type 'cwq'. */ 17650a625fd2SDavid S. Miller static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 17662dc11581SGrant Likely struct platform_device *dev, struct mdesc_handle *mdesc, 17670a625fd2SDavid S. Miller u64 node, const char *iname, unsigned long q_type, 17680a625fd2SDavid S. Miller irq_handler_t handler, struct spu_queue **table) 17690a625fd2SDavid S. Miller { 17700a625fd2SDavid S. Miller struct spu_queue *p; 17710a625fd2SDavid S. Miller int err; 17720a625fd2SDavid S. Miller 17730a625fd2SDavid S. Miller p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 17740a625fd2SDavid S. Miller if (!p) { 17750a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", 1776ff6c7341SDavid S. Miller dev->dev.of_node->full_name); 17770a625fd2SDavid S. Miller return -ENOMEM; 17780a625fd2SDavid S. Miller } 17790a625fd2SDavid S. Miller 17800a625fd2SDavid S. Miller cpus_clear(p->sharing); 17810a625fd2SDavid S. Miller spin_lock_init(&p->lock); 17820a625fd2SDavid S. Miller p->q_type = q_type; 17830a625fd2SDavid S. Miller INIT_LIST_HEAD(&p->jobs); 17840a625fd2SDavid S. Miller list_add(&p->list, list); 17850a625fd2SDavid S. Miller 17860a625fd2SDavid S. Miller err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 17870a625fd2SDavid S. Miller if (err) 17880a625fd2SDavid S. Miller return err; 17890a625fd2SDavid S. Miller 17900a625fd2SDavid S. Miller err = spu_queue_setup(p); 17910a625fd2SDavid S. Miller if (err) 17920a625fd2SDavid S. Miller return err; 17930a625fd2SDavid S. Miller 17940a625fd2SDavid S. Miller return spu_map_ino(dev, ip, iname, p, handler); 17950a625fd2SDavid S. Miller } 17960a625fd2SDavid S. Miller 17972dc11581SGrant Likely static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, 17980a625fd2SDavid S. Miller struct spu_mdesc_info *ip, struct list_head *list, 17990a625fd2SDavid S. Miller const char *exec_name, unsigned long q_type, 18000a625fd2SDavid S. Miller irq_handler_t handler, struct spu_queue **table) 18010a625fd2SDavid S. Miller { 18020a625fd2SDavid S. Miller int err = 0; 18030a625fd2SDavid S. Miller u64 node; 18040a625fd2SDavid S. Miller 18050a625fd2SDavid S. Miller mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 18060a625fd2SDavid S. Miller const char *type; 18070a625fd2SDavid S. Miller 18080a625fd2SDavid S. Miller type = mdesc_get_property(mdesc, node, "type", NULL); 18090a625fd2SDavid S. Miller if (!type || strcmp(type, exec_name)) 18100a625fd2SDavid S. Miller continue; 18110a625fd2SDavid S. Miller 18120a625fd2SDavid S. Miller err = handle_exec_unit(ip, list, dev, mdesc, node, 18130a625fd2SDavid S. Miller exec_name, q_type, handler, table); 18140a625fd2SDavid S. Miller if (err) { 18150a625fd2SDavid S. Miller spu_list_destroy(list); 18160a625fd2SDavid S. Miller break; 18170a625fd2SDavid S. Miller } 18180a625fd2SDavid S. Miller } 18190a625fd2SDavid S. Miller 18200a625fd2SDavid S. Miller return err; 18210a625fd2SDavid S. Miller } 18220a625fd2SDavid S. Miller 18230a625fd2SDavid S. Miller static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, 18240a625fd2SDavid S. Miller struct spu_mdesc_info *ip) 18250a625fd2SDavid S. Miller { 1826eb7caf35SDavid S. Miller const u64 *ino; 1827eb7caf35SDavid S. Miller int ino_len; 18280a625fd2SDavid S. Miller int i; 18290a625fd2SDavid S. Miller 18300a625fd2SDavid S. Miller ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1831eb7caf35SDavid S. Miller if (!ino) { 1832eb7caf35SDavid S. Miller printk("NO 'ino'\n"); 18330a625fd2SDavid S. Miller return -ENODEV; 1834eb7caf35SDavid S. Miller } 18350a625fd2SDavid S. Miller 1836eb7caf35SDavid S. Miller ip->num_intrs = ino_len / sizeof(u64); 18370a625fd2SDavid S. Miller ip->ino_table = kzalloc((sizeof(struct ino_blob) * 18380a625fd2SDavid S. Miller ip->num_intrs), 18390a625fd2SDavid S. Miller GFP_KERNEL); 18400a625fd2SDavid S. Miller if (!ip->ino_table) 18410a625fd2SDavid S. Miller return -ENOMEM; 18420a625fd2SDavid S. Miller 18430a625fd2SDavid S. Miller for (i = 0; i < ip->num_intrs; i++) { 18440a625fd2SDavid S. Miller struct ino_blob *b = &ip->ino_table[i]; 1845eb7caf35SDavid S. Miller b->intr = i + 1; 18460a625fd2SDavid S. Miller b->ino = ino[i]; 18470a625fd2SDavid S. Miller } 18480a625fd2SDavid S. Miller 18490a625fd2SDavid S. Miller return 0; 18500a625fd2SDavid S. Miller } 18510a625fd2SDavid S. Miller 18520a625fd2SDavid S. Miller static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, 18532dc11581SGrant Likely struct platform_device *dev, 18540a625fd2SDavid S. Miller struct spu_mdesc_info *ip, 18550a625fd2SDavid S. Miller const char *node_name) 18560a625fd2SDavid S. Miller { 18570a625fd2SDavid S. Miller const unsigned int *reg; 18580a625fd2SDavid S. Miller u64 node; 18590a625fd2SDavid S. Miller 1860ff6c7341SDavid S. Miller reg = of_get_property(dev->dev.of_node, "reg", NULL); 18610a625fd2SDavid S. Miller if (!reg) 18620a625fd2SDavid S. Miller return -ENODEV; 18630a625fd2SDavid S. Miller 18640a625fd2SDavid S. Miller mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 18650a625fd2SDavid S. Miller const char *name; 18660a625fd2SDavid S. Miller const u64 *chdl; 18670a625fd2SDavid S. Miller 18680a625fd2SDavid S. Miller name = mdesc_get_property(mdesc, node, "name", NULL); 18690a625fd2SDavid S. Miller if (!name || strcmp(name, node_name)) 18700a625fd2SDavid S. Miller continue; 18710a625fd2SDavid S. Miller chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 18720a625fd2SDavid S. Miller if (!chdl || (*chdl != *reg)) 18730a625fd2SDavid S. Miller continue; 18740a625fd2SDavid S. Miller ip->cfg_handle = *chdl; 18750a625fd2SDavid S. Miller return get_irq_props(mdesc, node, ip); 18760a625fd2SDavid S. Miller } 18770a625fd2SDavid S. Miller 18780a625fd2SDavid S. Miller return -ENODEV; 18790a625fd2SDavid S. Miller } 18800a625fd2SDavid S. Miller 18810a625fd2SDavid S. Miller static unsigned long n2_spu_hvapi_major; 18820a625fd2SDavid S. Miller static unsigned long n2_spu_hvapi_minor; 18830a625fd2SDavid S. Miller 18840a625fd2SDavid S. Miller static int __devinit n2_spu_hvapi_register(void) 18850a625fd2SDavid S. Miller { 18860a625fd2SDavid S. Miller int err; 18870a625fd2SDavid S. Miller 18880a625fd2SDavid S. Miller n2_spu_hvapi_major = 2; 18890a625fd2SDavid S. Miller n2_spu_hvapi_minor = 0; 18900a625fd2SDavid S. Miller 18910a625fd2SDavid S. Miller err = sun4v_hvapi_register(HV_GRP_NCS, 18920a625fd2SDavid S. Miller n2_spu_hvapi_major, 18930a625fd2SDavid S. Miller &n2_spu_hvapi_minor); 18940a625fd2SDavid S. Miller 18950a625fd2SDavid S. Miller if (!err) 18960a625fd2SDavid S. Miller pr_info("Registered NCS HVAPI version %lu.%lu\n", 18970a625fd2SDavid S. Miller n2_spu_hvapi_major, 18980a625fd2SDavid S. Miller n2_spu_hvapi_minor); 18990a625fd2SDavid S. Miller 19000a625fd2SDavid S. Miller return err; 19010a625fd2SDavid S. Miller } 19020a625fd2SDavid S. Miller 19030a625fd2SDavid S. Miller static void n2_spu_hvapi_unregister(void) 19040a625fd2SDavid S. Miller { 19050a625fd2SDavid S. Miller sun4v_hvapi_unregister(HV_GRP_NCS); 19060a625fd2SDavid S. Miller } 19070a625fd2SDavid S. Miller 19080a625fd2SDavid S. Miller static int global_ref; 19090a625fd2SDavid S. Miller 19100a625fd2SDavid S. Miller static int __devinit grab_global_resources(void) 19110a625fd2SDavid S. Miller { 19120a625fd2SDavid S. Miller int err = 0; 19130a625fd2SDavid S. Miller 19140a625fd2SDavid S. Miller mutex_lock(&spu_lock); 19150a625fd2SDavid S. Miller 19160a625fd2SDavid S. Miller if (global_ref++) 19170a625fd2SDavid S. Miller goto out; 19180a625fd2SDavid S. Miller 19190a625fd2SDavid S. Miller err = n2_spu_hvapi_register(); 19200a625fd2SDavid S. Miller if (err) 19210a625fd2SDavid S. Miller goto out; 19220a625fd2SDavid S. Miller 19230a625fd2SDavid S. Miller err = queue_cache_init(); 19240a625fd2SDavid S. Miller if (err) 19250a625fd2SDavid S. Miller goto out_hvapi_release; 19260a625fd2SDavid S. Miller 19270a625fd2SDavid S. Miller err = -ENOMEM; 19280a625fd2SDavid S. Miller cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 19290a625fd2SDavid S. Miller GFP_KERNEL); 19300a625fd2SDavid S. Miller if (!cpu_to_cwq) 19310a625fd2SDavid S. Miller goto out_queue_cache_destroy; 19320a625fd2SDavid S. Miller 19330a625fd2SDavid S. Miller cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 19340a625fd2SDavid S. Miller GFP_KERNEL); 19350a625fd2SDavid S. Miller if (!cpu_to_mau) 19360a625fd2SDavid S. Miller goto out_free_cwq_table; 19370a625fd2SDavid S. Miller 19380a625fd2SDavid S. Miller err = 0; 19390a625fd2SDavid S. Miller 19400a625fd2SDavid S. Miller out: 19410a625fd2SDavid S. Miller if (err) 19420a625fd2SDavid S. Miller global_ref--; 19430a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 19440a625fd2SDavid S. Miller return err; 19450a625fd2SDavid S. Miller 19460a625fd2SDavid S. Miller out_free_cwq_table: 19470a625fd2SDavid S. Miller kfree(cpu_to_cwq); 19480a625fd2SDavid S. Miller cpu_to_cwq = NULL; 19490a625fd2SDavid S. Miller 19500a625fd2SDavid S. Miller out_queue_cache_destroy: 19510a625fd2SDavid S. Miller queue_cache_destroy(); 19520a625fd2SDavid S. Miller 19530a625fd2SDavid S. Miller out_hvapi_release: 19540a625fd2SDavid S. Miller n2_spu_hvapi_unregister(); 19550a625fd2SDavid S. Miller goto out; 19560a625fd2SDavid S. Miller } 19570a625fd2SDavid S. Miller 19580a625fd2SDavid S. Miller static void release_global_resources(void) 19590a625fd2SDavid S. Miller { 19600a625fd2SDavid S. Miller mutex_lock(&spu_lock); 19610a625fd2SDavid S. Miller if (!--global_ref) { 19620a625fd2SDavid S. Miller kfree(cpu_to_cwq); 19630a625fd2SDavid S. Miller cpu_to_cwq = NULL; 19640a625fd2SDavid S. Miller 19650a625fd2SDavid S. Miller kfree(cpu_to_mau); 19660a625fd2SDavid S. Miller cpu_to_mau = NULL; 19670a625fd2SDavid S. Miller 19680a625fd2SDavid S. Miller queue_cache_destroy(); 19690a625fd2SDavid S. Miller n2_spu_hvapi_unregister(); 19700a625fd2SDavid S. Miller } 19710a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 19720a625fd2SDavid S. Miller } 19730a625fd2SDavid S. Miller 19740a625fd2SDavid S. Miller static struct n2_crypto * __devinit alloc_n2cp(void) 19750a625fd2SDavid S. Miller { 19760a625fd2SDavid S. Miller struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 19770a625fd2SDavid S. Miller 19780a625fd2SDavid S. Miller if (np) 19790a625fd2SDavid S. Miller INIT_LIST_HEAD(&np->cwq_list); 19800a625fd2SDavid S. Miller 19810a625fd2SDavid S. Miller return np; 19820a625fd2SDavid S. Miller } 19830a625fd2SDavid S. Miller 19840a625fd2SDavid S. Miller static void free_n2cp(struct n2_crypto *np) 19850a625fd2SDavid S. Miller { 19860a625fd2SDavid S. Miller if (np->cwq_info.ino_table) { 19870a625fd2SDavid S. Miller kfree(np->cwq_info.ino_table); 19880a625fd2SDavid S. Miller np->cwq_info.ino_table = NULL; 19890a625fd2SDavid S. Miller } 19900a625fd2SDavid S. Miller 19910a625fd2SDavid S. Miller kfree(np); 19920a625fd2SDavid S. Miller } 19930a625fd2SDavid S. Miller 19940a625fd2SDavid S. Miller static void __devinit n2_spu_driver_version(void) 19950a625fd2SDavid S. Miller { 19960a625fd2SDavid S. Miller static int n2_spu_version_printed; 19970a625fd2SDavid S. Miller 19980a625fd2SDavid S. Miller if (n2_spu_version_printed++ == 0) 19990a625fd2SDavid S. Miller pr_info("%s", version); 20000a625fd2SDavid S. Miller } 20010a625fd2SDavid S. Miller 20024ebb24f7SGrant Likely static int __devinit n2_crypto_probe(struct platform_device *dev) 20030a625fd2SDavid S. Miller { 20040a625fd2SDavid S. Miller struct mdesc_handle *mdesc; 20050a625fd2SDavid S. Miller const char *full_name; 20060a625fd2SDavid S. Miller struct n2_crypto *np; 20070a625fd2SDavid S. Miller int err; 20080a625fd2SDavid S. Miller 20090a625fd2SDavid S. Miller n2_spu_driver_version(); 20100a625fd2SDavid S. Miller 2011ff6c7341SDavid S. Miller full_name = dev->dev.of_node->full_name; 20120a625fd2SDavid S. Miller pr_info("Found N2CP at %s\n", full_name); 20130a625fd2SDavid S. Miller 20140a625fd2SDavid S. Miller np = alloc_n2cp(); 20150a625fd2SDavid S. Miller if (!np) { 20160a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", 20170a625fd2SDavid S. Miller full_name); 20180a625fd2SDavid S. Miller return -ENOMEM; 20190a625fd2SDavid S. Miller } 20200a625fd2SDavid S. Miller 20210a625fd2SDavid S. Miller err = grab_global_resources(); 20220a625fd2SDavid S. Miller if (err) { 20230a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab " 20240a625fd2SDavid S. Miller "global resources.\n", full_name); 20250a625fd2SDavid S. Miller goto out_free_n2cp; 20260a625fd2SDavid S. Miller } 20270a625fd2SDavid S. Miller 20280a625fd2SDavid S. Miller mdesc = mdesc_grab(); 20290a625fd2SDavid S. Miller 20300a625fd2SDavid S. Miller if (!mdesc) { 20310a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", 20320a625fd2SDavid S. Miller full_name); 20330a625fd2SDavid S. Miller err = -ENODEV; 20340a625fd2SDavid S. Miller goto out_free_global; 20350a625fd2SDavid S. Miller } 20360a625fd2SDavid S. Miller err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 20370a625fd2SDavid S. Miller if (err) { 20380a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", 20390a625fd2SDavid S. Miller full_name); 20400a625fd2SDavid S. Miller mdesc_release(mdesc); 20410a625fd2SDavid S. Miller goto out_free_global; 20420a625fd2SDavid S. Miller } 20430a625fd2SDavid S. Miller 20440a625fd2SDavid S. Miller err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 20450a625fd2SDavid S. Miller "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 20460a625fd2SDavid S. Miller cpu_to_cwq); 20470a625fd2SDavid S. Miller mdesc_release(mdesc); 20480a625fd2SDavid S. Miller 20490a625fd2SDavid S. Miller if (err) { 20500a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", 20510a625fd2SDavid S. Miller full_name); 20520a625fd2SDavid S. Miller goto out_free_global; 20530a625fd2SDavid S. Miller } 20540a625fd2SDavid S. Miller 20550a625fd2SDavid S. Miller err = n2_register_algs(); 20560a625fd2SDavid S. Miller if (err) { 20570a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to register algorithms.\n", 20580a625fd2SDavid S. Miller full_name); 20590a625fd2SDavid S. Miller goto out_free_spu_list; 20600a625fd2SDavid S. Miller } 20610a625fd2SDavid S. Miller 20620a625fd2SDavid S. Miller dev_set_drvdata(&dev->dev, np); 20630a625fd2SDavid S. Miller 20640a625fd2SDavid S. Miller return 0; 20650a625fd2SDavid S. Miller 20660a625fd2SDavid S. Miller out_free_spu_list: 20670a625fd2SDavid S. Miller spu_list_destroy(&np->cwq_list); 20680a625fd2SDavid S. Miller 20690a625fd2SDavid S. Miller out_free_global: 20700a625fd2SDavid S. Miller release_global_resources(); 20710a625fd2SDavid S. Miller 20720a625fd2SDavid S. Miller out_free_n2cp: 20730a625fd2SDavid S. Miller free_n2cp(np); 20740a625fd2SDavid S. Miller 20750a625fd2SDavid S. Miller return err; 20760a625fd2SDavid S. Miller } 20770a625fd2SDavid S. Miller 20782dc11581SGrant Likely static int __devexit n2_crypto_remove(struct platform_device *dev) 20790a625fd2SDavid S. Miller { 20800a625fd2SDavid S. Miller struct n2_crypto *np = dev_get_drvdata(&dev->dev); 20810a625fd2SDavid S. Miller 20820a625fd2SDavid S. Miller n2_unregister_algs(); 20830a625fd2SDavid S. Miller 20840a625fd2SDavid S. Miller spu_list_destroy(&np->cwq_list); 20850a625fd2SDavid S. Miller 20860a625fd2SDavid S. Miller release_global_resources(); 20870a625fd2SDavid S. Miller 20880a625fd2SDavid S. Miller free_n2cp(np); 20890a625fd2SDavid S. Miller 20900a625fd2SDavid S. Miller return 0; 20910a625fd2SDavid S. Miller } 20920a625fd2SDavid S. Miller 20930a625fd2SDavid S. Miller static struct n2_mau * __devinit alloc_ncp(void) 20940a625fd2SDavid S. Miller { 20950a625fd2SDavid S. Miller struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 20960a625fd2SDavid S. Miller 20970a625fd2SDavid S. Miller if (mp) 20980a625fd2SDavid S. Miller INIT_LIST_HEAD(&mp->mau_list); 20990a625fd2SDavid S. Miller 21000a625fd2SDavid S. Miller return mp; 21010a625fd2SDavid S. Miller } 21020a625fd2SDavid S. Miller 21030a625fd2SDavid S. Miller static void free_ncp(struct n2_mau *mp) 21040a625fd2SDavid S. Miller { 21050a625fd2SDavid S. Miller if (mp->mau_info.ino_table) { 21060a625fd2SDavid S. Miller kfree(mp->mau_info.ino_table); 21070a625fd2SDavid S. Miller mp->mau_info.ino_table = NULL; 21080a625fd2SDavid S. Miller } 21090a625fd2SDavid S. Miller 21100a625fd2SDavid S. Miller kfree(mp); 21110a625fd2SDavid S. Miller } 21120a625fd2SDavid S. Miller 21134ebb24f7SGrant Likely static int __devinit n2_mau_probe(struct platform_device *dev) 21140a625fd2SDavid S. Miller { 21150a625fd2SDavid S. Miller struct mdesc_handle *mdesc; 21160a625fd2SDavid S. Miller const char *full_name; 21170a625fd2SDavid S. Miller struct n2_mau *mp; 21180a625fd2SDavid S. Miller int err; 21190a625fd2SDavid S. Miller 21200a625fd2SDavid S. Miller n2_spu_driver_version(); 21210a625fd2SDavid S. Miller 2122ff6c7341SDavid S. Miller full_name = dev->dev.of_node->full_name; 21230a625fd2SDavid S. Miller pr_info("Found NCP at %s\n", full_name); 21240a625fd2SDavid S. Miller 21250a625fd2SDavid S. Miller mp = alloc_ncp(); 21260a625fd2SDavid S. Miller if (!mp) { 21270a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", 21280a625fd2SDavid S. Miller full_name); 21290a625fd2SDavid S. Miller return -ENOMEM; 21300a625fd2SDavid S. Miller } 21310a625fd2SDavid S. Miller 21320a625fd2SDavid S. Miller err = grab_global_resources(); 21330a625fd2SDavid S. Miller if (err) { 21340a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab " 21350a625fd2SDavid S. Miller "global resources.\n", full_name); 21360a625fd2SDavid S. Miller goto out_free_ncp; 21370a625fd2SDavid S. Miller } 21380a625fd2SDavid S. Miller 21390a625fd2SDavid S. Miller mdesc = mdesc_grab(); 21400a625fd2SDavid S. Miller 21410a625fd2SDavid S. Miller if (!mdesc) { 21420a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", 21430a625fd2SDavid S. Miller full_name); 21440a625fd2SDavid S. Miller err = -ENODEV; 21450a625fd2SDavid S. Miller goto out_free_global; 21460a625fd2SDavid S. Miller } 21470a625fd2SDavid S. Miller 21480a625fd2SDavid S. Miller err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 21490a625fd2SDavid S. Miller if (err) { 21500a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", 21510a625fd2SDavid S. Miller full_name); 21520a625fd2SDavid S. Miller mdesc_release(mdesc); 21530a625fd2SDavid S. Miller goto out_free_global; 21540a625fd2SDavid S. Miller } 21550a625fd2SDavid S. Miller 21560a625fd2SDavid S. Miller err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 21570a625fd2SDavid S. Miller "mau", HV_NCS_QTYPE_MAU, mau_intr, 21580a625fd2SDavid S. Miller cpu_to_mau); 21590a625fd2SDavid S. Miller mdesc_release(mdesc); 21600a625fd2SDavid S. Miller 21610a625fd2SDavid S. Miller if (err) { 21620a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", 21630a625fd2SDavid S. Miller full_name); 21640a625fd2SDavid S. Miller goto out_free_global; 21650a625fd2SDavid S. Miller } 21660a625fd2SDavid S. Miller 21670a625fd2SDavid S. Miller dev_set_drvdata(&dev->dev, mp); 21680a625fd2SDavid S. Miller 21690a625fd2SDavid S. Miller return 0; 21700a625fd2SDavid S. Miller 21710a625fd2SDavid S. Miller out_free_global: 21720a625fd2SDavid S. Miller release_global_resources(); 21730a625fd2SDavid S. Miller 21740a625fd2SDavid S. Miller out_free_ncp: 21750a625fd2SDavid S. Miller free_ncp(mp); 21760a625fd2SDavid S. Miller 21770a625fd2SDavid S. Miller return err; 21780a625fd2SDavid S. Miller } 21790a625fd2SDavid S. Miller 21802dc11581SGrant Likely static int __devexit n2_mau_remove(struct platform_device *dev) 21810a625fd2SDavid S. Miller { 21820a625fd2SDavid S. Miller struct n2_mau *mp = dev_get_drvdata(&dev->dev); 21830a625fd2SDavid S. Miller 21840a625fd2SDavid S. Miller spu_list_destroy(&mp->mau_list); 21850a625fd2SDavid S. Miller 21860a625fd2SDavid S. Miller release_global_resources(); 21870a625fd2SDavid S. Miller 21880a625fd2SDavid S. Miller free_ncp(mp); 21890a625fd2SDavid S. Miller 21900a625fd2SDavid S. Miller return 0; 21910a625fd2SDavid S. Miller } 21920a625fd2SDavid S. Miller 21930a625fd2SDavid S. Miller static struct of_device_id n2_crypto_match[] = { 21940a625fd2SDavid S. Miller { 21950a625fd2SDavid S. Miller .name = "n2cp", 21960a625fd2SDavid S. Miller .compatible = "SUNW,n2-cwq", 21970a625fd2SDavid S. Miller }, 21980a625fd2SDavid S. Miller { 21990a625fd2SDavid S. Miller .name = "n2cp", 22000a625fd2SDavid S. Miller .compatible = "SUNW,vf-cwq", 22010a625fd2SDavid S. Miller }, 2202eb7caf35SDavid S. Miller { 2203eb7caf35SDavid S. Miller .name = "n2cp", 2204eb7caf35SDavid S. Miller .compatible = "SUNW,kt-cwq", 2205eb7caf35SDavid S. Miller }, 22060a625fd2SDavid S. Miller {}, 22070a625fd2SDavid S. Miller }; 22080a625fd2SDavid S. Miller 22090a625fd2SDavid S. Miller MODULE_DEVICE_TABLE(of, n2_crypto_match); 22100a625fd2SDavid S. Miller 22114ebb24f7SGrant Likely static struct platform_driver n2_crypto_driver = { 2212ff6c7341SDavid S. Miller .driver = { 22130a625fd2SDavid S. Miller .name = "n2cp", 2214ff6c7341SDavid S. Miller .owner = THIS_MODULE, 2215ff6c7341SDavid S. Miller .of_match_table = n2_crypto_match, 2216ff6c7341SDavid S. Miller }, 22170a625fd2SDavid S. Miller .probe = n2_crypto_probe, 22180a625fd2SDavid S. Miller .remove = __devexit_p(n2_crypto_remove), 22190a625fd2SDavid S. Miller }; 22200a625fd2SDavid S. Miller 22210a625fd2SDavid S. Miller static struct of_device_id n2_mau_match[] = { 22220a625fd2SDavid S. Miller { 22230a625fd2SDavid S. Miller .name = "ncp", 22240a625fd2SDavid S. Miller .compatible = "SUNW,n2-mau", 22250a625fd2SDavid S. Miller }, 22260a625fd2SDavid S. Miller { 22270a625fd2SDavid S. Miller .name = "ncp", 22280a625fd2SDavid S. Miller .compatible = "SUNW,vf-mau", 22290a625fd2SDavid S. Miller }, 2230eb7caf35SDavid S. Miller { 2231eb7caf35SDavid S. Miller .name = "ncp", 2232eb7caf35SDavid S. Miller .compatible = "SUNW,kt-mau", 2233eb7caf35SDavid S. Miller }, 22340a625fd2SDavid S. Miller {}, 22350a625fd2SDavid S. Miller }; 22360a625fd2SDavid S. Miller 22370a625fd2SDavid S. Miller MODULE_DEVICE_TABLE(of, n2_mau_match); 22380a625fd2SDavid S. Miller 22394ebb24f7SGrant Likely static struct platform_driver n2_mau_driver = { 2240ff6c7341SDavid S. Miller .driver = { 22410a625fd2SDavid S. Miller .name = "ncp", 2242ff6c7341SDavid S. Miller .owner = THIS_MODULE, 2243ff6c7341SDavid S. Miller .of_match_table = n2_mau_match, 2244ff6c7341SDavid S. Miller }, 22450a625fd2SDavid S. Miller .probe = n2_mau_probe, 22460a625fd2SDavid S. Miller .remove = __devexit_p(n2_mau_remove), 22470a625fd2SDavid S. Miller }; 22480a625fd2SDavid S. Miller 22490a625fd2SDavid S. Miller static int __init n2_init(void) 22500a625fd2SDavid S. Miller { 22514ebb24f7SGrant Likely int err = platform_driver_register(&n2_crypto_driver); 22520a625fd2SDavid S. Miller 22530a625fd2SDavid S. Miller if (!err) { 22544ebb24f7SGrant Likely err = platform_driver_register(&n2_mau_driver); 22550a625fd2SDavid S. Miller if (err) 22564ebb24f7SGrant Likely platform_driver_unregister(&n2_crypto_driver); 22570a625fd2SDavid S. Miller } 22580a625fd2SDavid S. Miller return err; 22590a625fd2SDavid S. Miller } 22600a625fd2SDavid S. Miller 22610a625fd2SDavid S. Miller static void __exit n2_exit(void) 22620a625fd2SDavid S. Miller { 22634ebb24f7SGrant Likely platform_driver_unregister(&n2_mau_driver); 22644ebb24f7SGrant Likely platform_driver_unregister(&n2_crypto_driver); 22650a625fd2SDavid S. Miller } 22660a625fd2SDavid S. Miller 22670a625fd2SDavid S. Miller module_init(n2_init); 22680a625fd2SDavid S. Miller module_exit(n2_exit); 2269