10a625fd2SDavid S. Miller /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 20a625fd2SDavid S. Miller * 30a625fd2SDavid S. Miller * Copyright (C) 2010 David S. Miller <davem@davemloft.net> 40a625fd2SDavid S. Miller */ 50a625fd2SDavid S. Miller 60a625fd2SDavid S. Miller #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 70a625fd2SDavid S. Miller 80a625fd2SDavid S. Miller #include <linux/kernel.h> 90a625fd2SDavid S. Miller #include <linux/module.h> 100a625fd2SDavid S. Miller #include <linux/of.h> 110a625fd2SDavid S. Miller #include <linux/of_device.h> 120a625fd2SDavid S. Miller #include <linux/cpumask.h> 130a625fd2SDavid S. Miller #include <linux/slab.h> 140a625fd2SDavid S. Miller #include <linux/interrupt.h> 150a625fd2SDavid S. Miller #include <linux/crypto.h> 160a625fd2SDavid S. Miller #include <crypto/md5.h> 170a625fd2SDavid S. Miller #include <crypto/sha.h> 180a625fd2SDavid S. Miller #include <crypto/aes.h> 190a625fd2SDavid S. Miller #include <crypto/des.h> 200a625fd2SDavid S. Miller #include <linux/mutex.h> 210a625fd2SDavid S. Miller #include <linux/delay.h> 220a625fd2SDavid S. Miller #include <linux/sched.h> 230a625fd2SDavid S. Miller 240a625fd2SDavid S. Miller #include <crypto/internal/hash.h> 250a625fd2SDavid S. Miller #include <crypto/scatterwalk.h> 260a625fd2SDavid S. Miller #include <crypto/algapi.h> 270a625fd2SDavid S. Miller 280a625fd2SDavid S. Miller #include <asm/hypervisor.h> 290a625fd2SDavid S. Miller #include <asm/mdesc.h> 300a625fd2SDavid S. Miller 310a625fd2SDavid S. Miller #include "n2_core.h" 320a625fd2SDavid S. Miller 330a625fd2SDavid S. Miller #define DRV_MODULE_NAME "n2_crypto" 340a625fd2SDavid S. Miller #define DRV_MODULE_VERSION "0.1" 350a625fd2SDavid S. Miller #define DRV_MODULE_RELDATE "April 29, 2010" 360a625fd2SDavid S. Miller 370a625fd2SDavid S. Miller static char version[] __devinitdata = 380a625fd2SDavid S. Miller DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 390a625fd2SDavid S. Miller 400a625fd2SDavid S. Miller MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 410a625fd2SDavid S. Miller MODULE_DESCRIPTION("Niagara2 Crypto driver"); 420a625fd2SDavid S. Miller MODULE_LICENSE("GPL"); 430a625fd2SDavid S. Miller MODULE_VERSION(DRV_MODULE_VERSION); 440a625fd2SDavid S. Miller 450a625fd2SDavid S. Miller #define N2_CRA_PRIORITY 300 460a625fd2SDavid S. Miller 470a625fd2SDavid S. Miller static DEFINE_MUTEX(spu_lock); 480a625fd2SDavid S. Miller 490a625fd2SDavid S. Miller struct spu_queue { 500a625fd2SDavid S. Miller cpumask_t sharing; 510a625fd2SDavid S. Miller unsigned long qhandle; 520a625fd2SDavid S. Miller 530a625fd2SDavid S. Miller spinlock_t lock; 540a625fd2SDavid S. Miller u8 q_type; 550a625fd2SDavid S. Miller void *q; 560a625fd2SDavid S. Miller unsigned long head; 570a625fd2SDavid S. Miller unsigned long tail; 580a625fd2SDavid S. Miller struct list_head jobs; 590a625fd2SDavid S. Miller 600a625fd2SDavid S. Miller unsigned long devino; 610a625fd2SDavid S. Miller 620a625fd2SDavid S. Miller char irq_name[32]; 630a625fd2SDavid S. Miller unsigned int irq; 640a625fd2SDavid S. Miller 650a625fd2SDavid S. Miller struct list_head list; 660a625fd2SDavid S. Miller }; 670a625fd2SDavid S. Miller 680a625fd2SDavid S. Miller static struct spu_queue **cpu_to_cwq; 690a625fd2SDavid S. Miller static struct spu_queue **cpu_to_mau; 700a625fd2SDavid S. Miller 710a625fd2SDavid S. Miller static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 720a625fd2SDavid S. Miller { 730a625fd2SDavid S. Miller if (q->q_type == HV_NCS_QTYPE_MAU) { 740a625fd2SDavid S. Miller off += MAU_ENTRY_SIZE; 750a625fd2SDavid S. Miller if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 760a625fd2SDavid S. Miller off = 0; 770a625fd2SDavid S. Miller } else { 780a625fd2SDavid S. Miller off += CWQ_ENTRY_SIZE; 790a625fd2SDavid S. Miller if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 800a625fd2SDavid S. Miller off = 0; 810a625fd2SDavid S. Miller } 820a625fd2SDavid S. Miller return off; 830a625fd2SDavid S. Miller } 840a625fd2SDavid S. Miller 850a625fd2SDavid S. Miller struct n2_request_common { 860a625fd2SDavid S. Miller struct list_head entry; 870a625fd2SDavid S. Miller unsigned int offset; 880a625fd2SDavid S. Miller }; 890a625fd2SDavid S. Miller #define OFFSET_NOT_RUNNING (~(unsigned int)0) 900a625fd2SDavid S. Miller 910a625fd2SDavid S. Miller /* An async job request records the final tail value it used in 920a625fd2SDavid S. Miller * n2_request_common->offset, test to see if that offset is in 930a625fd2SDavid S. Miller * the range old_head, new_head, inclusive. 940a625fd2SDavid S. Miller */ 950a625fd2SDavid S. Miller static inline bool job_finished(struct spu_queue *q, unsigned int offset, 960a625fd2SDavid S. Miller unsigned long old_head, unsigned long new_head) 970a625fd2SDavid S. Miller { 980a625fd2SDavid S. Miller if (old_head <= new_head) { 990a625fd2SDavid S. Miller if (offset > old_head && offset <= new_head) 1000a625fd2SDavid S. Miller return true; 1010a625fd2SDavid S. Miller } else { 1020a625fd2SDavid S. Miller if (offset > old_head || offset <= new_head) 1030a625fd2SDavid S. Miller return true; 1040a625fd2SDavid S. Miller } 1050a625fd2SDavid S. Miller return false; 1060a625fd2SDavid S. Miller } 1070a625fd2SDavid S. Miller 1080a625fd2SDavid S. Miller /* When the HEAD marker is unequal to the actual HEAD, we get 1090a625fd2SDavid S. Miller * a virtual device INO interrupt. We should process the 1100a625fd2SDavid S. Miller * completed CWQ entries and adjust the HEAD marker to clear 1110a625fd2SDavid S. Miller * the IRQ. 1120a625fd2SDavid S. Miller */ 1130a625fd2SDavid S. Miller static irqreturn_t cwq_intr(int irq, void *dev_id) 1140a625fd2SDavid S. Miller { 1150a625fd2SDavid S. Miller unsigned long off, new_head, hv_ret; 1160a625fd2SDavid S. Miller struct spu_queue *q = dev_id; 1170a625fd2SDavid S. Miller 1180a625fd2SDavid S. Miller pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 1190a625fd2SDavid S. Miller smp_processor_id(), q->qhandle); 1200a625fd2SDavid S. Miller 1210a625fd2SDavid S. Miller spin_lock(&q->lock); 1220a625fd2SDavid S. Miller 1230a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 1240a625fd2SDavid S. Miller 1250a625fd2SDavid S. Miller pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 1260a625fd2SDavid S. Miller smp_processor_id(), new_head, hv_ret); 1270a625fd2SDavid S. Miller 1280a625fd2SDavid S. Miller for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 1290a625fd2SDavid S. Miller /* XXX ... XXX */ 1300a625fd2SDavid S. Miller } 1310a625fd2SDavid S. Miller 1320a625fd2SDavid S. Miller hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 1330a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 1340a625fd2SDavid S. Miller q->head = new_head; 1350a625fd2SDavid S. Miller 1360a625fd2SDavid S. Miller spin_unlock(&q->lock); 1370a625fd2SDavid S. Miller 1380a625fd2SDavid S. Miller return IRQ_HANDLED; 1390a625fd2SDavid S. Miller } 1400a625fd2SDavid S. Miller 1410a625fd2SDavid S. Miller static irqreturn_t mau_intr(int irq, void *dev_id) 1420a625fd2SDavid S. Miller { 1430a625fd2SDavid S. Miller struct spu_queue *q = dev_id; 1440a625fd2SDavid S. Miller unsigned long head, hv_ret; 1450a625fd2SDavid S. Miller 1460a625fd2SDavid S. Miller spin_lock(&q->lock); 1470a625fd2SDavid S. Miller 1480a625fd2SDavid S. Miller pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 1490a625fd2SDavid S. Miller smp_processor_id(), q->qhandle); 1500a625fd2SDavid S. Miller 1510a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 1520a625fd2SDavid S. Miller 1530a625fd2SDavid S. Miller pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 1540a625fd2SDavid S. Miller smp_processor_id(), head, hv_ret); 1550a625fd2SDavid S. Miller 1560a625fd2SDavid S. Miller sun4v_ncs_sethead_marker(q->qhandle, head); 1570a625fd2SDavid S. Miller 1580a625fd2SDavid S. Miller spin_unlock(&q->lock); 1590a625fd2SDavid S. Miller 1600a625fd2SDavid S. Miller return IRQ_HANDLED; 1610a625fd2SDavid S. Miller } 1620a625fd2SDavid S. Miller 1630a625fd2SDavid S. Miller static void *spu_queue_next(struct spu_queue *q, void *cur) 1640a625fd2SDavid S. Miller { 1650a625fd2SDavid S. Miller return q->q + spu_next_offset(q, cur - q->q); 1660a625fd2SDavid S. Miller } 1670a625fd2SDavid S. Miller 1680a625fd2SDavid S. Miller static int spu_queue_num_free(struct spu_queue *q) 1690a625fd2SDavid S. Miller { 1700a625fd2SDavid S. Miller unsigned long head = q->head; 1710a625fd2SDavid S. Miller unsigned long tail = q->tail; 1720a625fd2SDavid S. Miller unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 1730a625fd2SDavid S. Miller unsigned long diff; 1740a625fd2SDavid S. Miller 1750a625fd2SDavid S. Miller if (head > tail) 1760a625fd2SDavid S. Miller diff = head - tail; 1770a625fd2SDavid S. Miller else 1780a625fd2SDavid S. Miller diff = (end - tail) + head; 1790a625fd2SDavid S. Miller 1800a625fd2SDavid S. Miller return (diff / CWQ_ENTRY_SIZE) - 1; 1810a625fd2SDavid S. Miller } 1820a625fd2SDavid S. Miller 1830a625fd2SDavid S. Miller static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 1840a625fd2SDavid S. Miller { 1850a625fd2SDavid S. Miller int avail = spu_queue_num_free(q); 1860a625fd2SDavid S. Miller 1870a625fd2SDavid S. Miller if (avail >= num_entries) 1880a625fd2SDavid S. Miller return q->q + q->tail; 1890a625fd2SDavid S. Miller 1900a625fd2SDavid S. Miller return NULL; 1910a625fd2SDavid S. Miller } 1920a625fd2SDavid S. Miller 1930a625fd2SDavid S. Miller static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 1940a625fd2SDavid S. Miller { 1950a625fd2SDavid S. Miller unsigned long hv_ret, new_tail; 1960a625fd2SDavid S. Miller 1970a625fd2SDavid S. Miller new_tail = spu_next_offset(q, last - q->q); 1980a625fd2SDavid S. Miller 1990a625fd2SDavid S. Miller hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 2000a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 2010a625fd2SDavid S. Miller q->tail = new_tail; 2020a625fd2SDavid S. Miller return hv_ret; 2030a625fd2SDavid S. Miller } 2040a625fd2SDavid S. Miller 2050a625fd2SDavid S. Miller static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 2060a625fd2SDavid S. Miller int enc_type, int auth_type, 2070a625fd2SDavid S. Miller unsigned int hash_len, 2080a625fd2SDavid S. Miller bool sfas, bool sob, bool eob, bool encrypt, 2090a625fd2SDavid S. Miller int opcode) 2100a625fd2SDavid S. Miller { 2110a625fd2SDavid S. Miller u64 word = (len - 1) & CONTROL_LEN; 2120a625fd2SDavid S. Miller 2130a625fd2SDavid S. Miller word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 2140a625fd2SDavid S. Miller word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 2150a625fd2SDavid S. Miller word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 2160a625fd2SDavid S. Miller if (sfas) 2170a625fd2SDavid S. Miller word |= CONTROL_STORE_FINAL_AUTH_STATE; 2180a625fd2SDavid S. Miller if (sob) 2190a625fd2SDavid S. Miller word |= CONTROL_START_OF_BLOCK; 2200a625fd2SDavid S. Miller if (eob) 2210a625fd2SDavid S. Miller word |= CONTROL_END_OF_BLOCK; 2220a625fd2SDavid S. Miller if (encrypt) 2230a625fd2SDavid S. Miller word |= CONTROL_ENCRYPT; 2240a625fd2SDavid S. Miller if (hmac_key_len) 2250a625fd2SDavid S. Miller word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 2260a625fd2SDavid S. Miller if (hash_len) 2270a625fd2SDavid S. Miller word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 2280a625fd2SDavid S. Miller 2290a625fd2SDavid S. Miller return word; 2300a625fd2SDavid S. Miller } 2310a625fd2SDavid S. Miller 2320a625fd2SDavid S. Miller #if 0 2330a625fd2SDavid S. Miller static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 2340a625fd2SDavid S. Miller { 2350a625fd2SDavid S. Miller if (this_len >= 64 || 2360a625fd2SDavid S. Miller qp->head != qp->tail) 2370a625fd2SDavid S. Miller return true; 2380a625fd2SDavid S. Miller return false; 2390a625fd2SDavid S. Miller } 2400a625fd2SDavid S. Miller #endif 2410a625fd2SDavid S. Miller 2420a625fd2SDavid S. Miller struct n2_base_ctx { 2430a625fd2SDavid S. Miller struct list_head list; 2440a625fd2SDavid S. Miller }; 2450a625fd2SDavid S. Miller 2460a625fd2SDavid S. Miller static void n2_base_ctx_init(struct n2_base_ctx *ctx) 2470a625fd2SDavid S. Miller { 2480a625fd2SDavid S. Miller INIT_LIST_HEAD(&ctx->list); 2490a625fd2SDavid S. Miller } 2500a625fd2SDavid S. Miller 2510a625fd2SDavid S. Miller struct n2_hash_ctx { 2520a625fd2SDavid S. Miller struct n2_base_ctx base; 2530a625fd2SDavid S. Miller 2540a625fd2SDavid S. Miller struct crypto_ahash *fallback; 2550a625fd2SDavid S. Miller 2560a625fd2SDavid S. Miller /* These next three members must match the layout created by 2570a625fd2SDavid S. Miller * crypto_init_shash_ops_async. This allows us to properly 2580a625fd2SDavid S. Miller * plumb requests we can't do in hardware down to the fallback 2590a625fd2SDavid S. Miller * operation, providing all of the data structures and layouts 2600a625fd2SDavid S. Miller * expected by those paths. 2610a625fd2SDavid S. Miller */ 2620a625fd2SDavid S. Miller struct ahash_request fallback_req; 2630a625fd2SDavid S. Miller struct shash_desc fallback_desc; 2640a625fd2SDavid S. Miller union { 2650a625fd2SDavid S. Miller struct md5_state md5; 2660a625fd2SDavid S. Miller struct sha1_state sha1; 2670a625fd2SDavid S. Miller struct sha256_state sha256; 2680a625fd2SDavid S. Miller } u; 2690a625fd2SDavid S. Miller 2700a625fd2SDavid S. Miller unsigned char hash_key[64]; 2710a625fd2SDavid S. Miller unsigned char keyed_zero_hash[32]; 2720a625fd2SDavid S. Miller }; 2730a625fd2SDavid S. Miller 2740a625fd2SDavid S. Miller static int n2_hash_async_init(struct ahash_request *req) 2750a625fd2SDavid S. Miller { 2760a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2770a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 2780a625fd2SDavid S. Miller 2790a625fd2SDavid S. Miller ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 2800a625fd2SDavid S. Miller ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 2810a625fd2SDavid S. Miller 2820a625fd2SDavid S. Miller return crypto_ahash_init(&ctx->fallback_req); 2830a625fd2SDavid S. Miller } 2840a625fd2SDavid S. Miller 2850a625fd2SDavid S. Miller static int n2_hash_async_update(struct ahash_request *req) 2860a625fd2SDavid S. Miller { 2870a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2880a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 2890a625fd2SDavid S. Miller 2900a625fd2SDavid S. Miller ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 2910a625fd2SDavid S. Miller ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 2920a625fd2SDavid S. Miller ctx->fallback_req.nbytes = req->nbytes; 2930a625fd2SDavid S. Miller ctx->fallback_req.src = req->src; 2940a625fd2SDavid S. Miller 2950a625fd2SDavid S. Miller return crypto_ahash_update(&ctx->fallback_req); 2960a625fd2SDavid S. Miller } 2970a625fd2SDavid S. Miller 2980a625fd2SDavid S. Miller static int n2_hash_async_final(struct ahash_request *req) 2990a625fd2SDavid S. Miller { 3000a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3010a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3020a625fd2SDavid S. Miller 3030a625fd2SDavid S. Miller ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 3040a625fd2SDavid S. Miller ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 3050a625fd2SDavid S. Miller ctx->fallback_req.result = req->result; 3060a625fd2SDavid S. Miller 3070a625fd2SDavid S. Miller return crypto_ahash_final(&ctx->fallback_req); 3080a625fd2SDavid S. Miller } 3090a625fd2SDavid S. Miller 3100a625fd2SDavid S. Miller static int n2_hash_async_finup(struct ahash_request *req) 3110a625fd2SDavid S. Miller { 3120a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3130a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3140a625fd2SDavid S. Miller 3150a625fd2SDavid S. Miller ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 3160a625fd2SDavid S. Miller ctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 3170a625fd2SDavid S. Miller ctx->fallback_req.nbytes = req->nbytes; 3180a625fd2SDavid S. Miller ctx->fallback_req.src = req->src; 3190a625fd2SDavid S. Miller ctx->fallback_req.result = req->result; 3200a625fd2SDavid S. Miller 3210a625fd2SDavid S. Miller return crypto_ahash_finup(&ctx->fallback_req); 3220a625fd2SDavid S. Miller } 3230a625fd2SDavid S. Miller 3240a625fd2SDavid S. Miller static int n2_hash_cra_init(struct crypto_tfm *tfm) 3250a625fd2SDavid S. Miller { 3260a625fd2SDavid S. Miller const char *fallback_driver_name = tfm->__crt_alg->cra_name; 3270a625fd2SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 3280a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 3290a625fd2SDavid S. Miller struct crypto_ahash *fallback_tfm; 3300a625fd2SDavid S. Miller int err; 3310a625fd2SDavid S. Miller 3320a625fd2SDavid S. Miller fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 3330a625fd2SDavid S. Miller CRYPTO_ALG_NEED_FALLBACK); 3340a625fd2SDavid S. Miller if (IS_ERR(fallback_tfm)) { 3350a625fd2SDavid S. Miller pr_warning("Fallback driver '%s' could not be loaded!\n", 3360a625fd2SDavid S. Miller fallback_driver_name); 3370a625fd2SDavid S. Miller err = PTR_ERR(fallback_tfm); 3380a625fd2SDavid S. Miller goto out; 3390a625fd2SDavid S. Miller } 3400a625fd2SDavid S. Miller 3410a625fd2SDavid S. Miller ctx->fallback = fallback_tfm; 3420a625fd2SDavid S. Miller return 0; 3430a625fd2SDavid S. Miller 3440a625fd2SDavid S. Miller out: 3450a625fd2SDavid S. Miller return err; 3460a625fd2SDavid S. Miller } 3470a625fd2SDavid S. Miller 3480a625fd2SDavid S. Miller static void n2_hash_cra_exit(struct crypto_tfm *tfm) 3490a625fd2SDavid S. Miller { 3500a625fd2SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 3510a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 3520a625fd2SDavid S. Miller 3530a625fd2SDavid S. Miller crypto_free_ahash(ctx->fallback); 3540a625fd2SDavid S. Miller } 3550a625fd2SDavid S. Miller 3560a625fd2SDavid S. Miller static unsigned long wait_for_tail(struct spu_queue *qp) 3570a625fd2SDavid S. Miller { 3580a625fd2SDavid S. Miller unsigned long head, hv_ret; 3590a625fd2SDavid S. Miller 3600a625fd2SDavid S. Miller do { 3610a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 3620a625fd2SDavid S. Miller if (hv_ret != HV_EOK) { 3630a625fd2SDavid S. Miller pr_err("Hypervisor error on gethead\n"); 3640a625fd2SDavid S. Miller break; 3650a625fd2SDavid S. Miller } 3660a625fd2SDavid S. Miller if (head == qp->tail) { 3670a625fd2SDavid S. Miller qp->head = head; 3680a625fd2SDavid S. Miller break; 3690a625fd2SDavid S. Miller } 3700a625fd2SDavid S. Miller } while (1); 3710a625fd2SDavid S. Miller return hv_ret; 3720a625fd2SDavid S. Miller } 3730a625fd2SDavid S. Miller 3740a625fd2SDavid S. Miller static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 3750a625fd2SDavid S. Miller struct cwq_initial_entry *ent) 3760a625fd2SDavid S. Miller { 3770a625fd2SDavid S. Miller unsigned long hv_ret = spu_queue_submit(qp, ent); 3780a625fd2SDavid S. Miller 3790a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 3800a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 3810a625fd2SDavid S. Miller 3820a625fd2SDavid S. Miller return hv_ret; 3830a625fd2SDavid S. Miller } 3840a625fd2SDavid S. Miller 3850a625fd2SDavid S. Miller static int n2_hash_async_digest(struct ahash_request *req, 3860a625fd2SDavid S. Miller unsigned int auth_type, unsigned int digest_size, 3870a625fd2SDavid S. Miller unsigned int result_size, void *hash_loc) 3880a625fd2SDavid S. Miller { 3890a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3900a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3910a625fd2SDavid S. Miller struct cwq_initial_entry *ent; 3920a625fd2SDavid S. Miller struct crypto_hash_walk walk; 3930a625fd2SDavid S. Miller struct spu_queue *qp; 3940a625fd2SDavid S. Miller unsigned long flags; 3950a625fd2SDavid S. Miller int err = -ENODEV; 3960a625fd2SDavid S. Miller int nbytes, cpu; 3970a625fd2SDavid S. Miller 3980a625fd2SDavid S. Miller /* The total effective length of the operation may not 3990a625fd2SDavid S. Miller * exceed 2^16. 4000a625fd2SDavid S. Miller */ 4010a625fd2SDavid S. Miller if (unlikely(req->nbytes > (1 << 16))) { 4020a625fd2SDavid S. Miller ctx->fallback_req.base.tfm = crypto_ahash_tfm(ctx->fallback); 4030a625fd2SDavid S. Miller ctx->fallback_req.base.flags = 4040a625fd2SDavid S. Miller req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 4050a625fd2SDavid S. Miller ctx->fallback_req.nbytes = req->nbytes; 4060a625fd2SDavid S. Miller ctx->fallback_req.src = req->src; 4070a625fd2SDavid S. Miller ctx->fallback_req.result = req->result; 4080a625fd2SDavid S. Miller 4090a625fd2SDavid S. Miller return crypto_ahash_digest(&ctx->fallback_req); 4100a625fd2SDavid S. Miller } 4110a625fd2SDavid S. Miller 4120a625fd2SDavid S. Miller n2_base_ctx_init(&ctx->base); 4130a625fd2SDavid S. Miller 4140a625fd2SDavid S. Miller nbytes = crypto_hash_walk_first(req, &walk); 4150a625fd2SDavid S. Miller 4160a625fd2SDavid S. Miller cpu = get_cpu(); 4170a625fd2SDavid S. Miller qp = cpu_to_cwq[cpu]; 4180a625fd2SDavid S. Miller if (!qp) 4190a625fd2SDavid S. Miller goto out; 4200a625fd2SDavid S. Miller 4210a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 4220a625fd2SDavid S. Miller 4230a625fd2SDavid S. Miller /* XXX can do better, improve this later by doing a by-hand scatterlist 4240a625fd2SDavid S. Miller * XXX walk, etc. 4250a625fd2SDavid S. Miller */ 4260a625fd2SDavid S. Miller ent = qp->q + qp->tail; 4270a625fd2SDavid S. Miller 4280a625fd2SDavid S. Miller ent->control = control_word_base(nbytes, 0, 0, 4290a625fd2SDavid S. Miller auth_type, digest_size, 4300a625fd2SDavid S. Miller false, true, false, false, 4310a625fd2SDavid S. Miller OPCODE_INPLACE_BIT | 4320a625fd2SDavid S. Miller OPCODE_AUTH_MAC); 4330a625fd2SDavid S. Miller ent->src_addr = __pa(walk.data); 4340a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 4350a625fd2SDavid S. Miller ent->auth_iv_addr = __pa(hash_loc); 4360a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 4370a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 4380a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 4390a625fd2SDavid S. Miller ent->dest_addr = __pa(hash_loc); 4400a625fd2SDavid S. Miller 4410a625fd2SDavid S. Miller nbytes = crypto_hash_walk_done(&walk, 0); 4420a625fd2SDavid S. Miller while (nbytes > 0) { 4430a625fd2SDavid S. Miller ent = spu_queue_next(qp, ent); 4440a625fd2SDavid S. Miller 4450a625fd2SDavid S. Miller ent->control = (nbytes - 1); 4460a625fd2SDavid S. Miller ent->src_addr = __pa(walk.data); 4470a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 4480a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 4490a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 4500a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 4510a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 4520a625fd2SDavid S. Miller ent->dest_addr = 0UL; 4530a625fd2SDavid S. Miller 4540a625fd2SDavid S. Miller nbytes = crypto_hash_walk_done(&walk, 0); 4550a625fd2SDavid S. Miller } 4560a625fd2SDavid S. Miller ent->control |= CONTROL_END_OF_BLOCK; 4570a625fd2SDavid S. Miller 4580a625fd2SDavid S. Miller if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 4590a625fd2SDavid S. Miller err = -EINVAL; 4600a625fd2SDavid S. Miller else 4610a625fd2SDavid S. Miller err = 0; 4620a625fd2SDavid S. Miller 4630a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 4640a625fd2SDavid S. Miller 4650a625fd2SDavid S. Miller if (!err) 4660a625fd2SDavid S. Miller memcpy(req->result, hash_loc, result_size); 4670a625fd2SDavid S. Miller out: 4680a625fd2SDavid S. Miller put_cpu(); 4690a625fd2SDavid S. Miller 4700a625fd2SDavid S. Miller return err; 4710a625fd2SDavid S. Miller } 4720a625fd2SDavid S. Miller 4730a625fd2SDavid S. Miller static int n2_md5_async_digest(struct ahash_request *req) 4740a625fd2SDavid S. Miller { 4750a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 4760a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 4770a625fd2SDavid S. Miller struct md5_state *m = &ctx->u.md5; 4780a625fd2SDavid S. Miller 4790a625fd2SDavid S. Miller if (unlikely(req->nbytes == 0)) { 4800a625fd2SDavid S. Miller static const char md5_zero[MD5_DIGEST_SIZE] = { 4810a625fd2SDavid S. Miller 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04, 4820a625fd2SDavid S. Miller 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e, 4830a625fd2SDavid S. Miller }; 4840a625fd2SDavid S. Miller 4850a625fd2SDavid S. Miller memcpy(req->result, md5_zero, MD5_DIGEST_SIZE); 4860a625fd2SDavid S. Miller return 0; 4870a625fd2SDavid S. Miller } 4880a625fd2SDavid S. Miller m->hash[0] = cpu_to_le32(0x67452301); 4890a625fd2SDavid S. Miller m->hash[1] = cpu_to_le32(0xefcdab89); 4900a625fd2SDavid S. Miller m->hash[2] = cpu_to_le32(0x98badcfe); 4910a625fd2SDavid S. Miller m->hash[3] = cpu_to_le32(0x10325476); 4920a625fd2SDavid S. Miller 4930a625fd2SDavid S. Miller return n2_hash_async_digest(req, AUTH_TYPE_MD5, 4940a625fd2SDavid S. Miller MD5_DIGEST_SIZE, MD5_DIGEST_SIZE, 4950a625fd2SDavid S. Miller m->hash); 4960a625fd2SDavid S. Miller } 4970a625fd2SDavid S. Miller 4980a625fd2SDavid S. Miller static int n2_sha1_async_digest(struct ahash_request *req) 4990a625fd2SDavid S. Miller { 5000a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 5010a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 5020a625fd2SDavid S. Miller struct sha1_state *s = &ctx->u.sha1; 5030a625fd2SDavid S. Miller 5040a625fd2SDavid S. Miller if (unlikely(req->nbytes == 0)) { 5050a625fd2SDavid S. Miller static const char sha1_zero[SHA1_DIGEST_SIZE] = { 5060a625fd2SDavid S. Miller 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 5070a625fd2SDavid S. Miller 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 5080a625fd2SDavid S. Miller 0x07, 0x09 5090a625fd2SDavid S. Miller }; 5100a625fd2SDavid S. Miller 5110a625fd2SDavid S. Miller memcpy(req->result, sha1_zero, SHA1_DIGEST_SIZE); 5120a625fd2SDavid S. Miller return 0; 5130a625fd2SDavid S. Miller } 5140a625fd2SDavid S. Miller s->state[0] = SHA1_H0; 5150a625fd2SDavid S. Miller s->state[1] = SHA1_H1; 5160a625fd2SDavid S. Miller s->state[2] = SHA1_H2; 5170a625fd2SDavid S. Miller s->state[3] = SHA1_H3; 5180a625fd2SDavid S. Miller s->state[4] = SHA1_H4; 5190a625fd2SDavid S. Miller 5200a625fd2SDavid S. Miller return n2_hash_async_digest(req, AUTH_TYPE_SHA1, 5210a625fd2SDavid S. Miller SHA1_DIGEST_SIZE, SHA1_DIGEST_SIZE, 5220a625fd2SDavid S. Miller s->state); 5230a625fd2SDavid S. Miller } 5240a625fd2SDavid S. Miller 5250a625fd2SDavid S. Miller static int n2_sha256_async_digest(struct ahash_request *req) 5260a625fd2SDavid S. Miller { 5270a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 5280a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 5290a625fd2SDavid S. Miller struct sha256_state *s = &ctx->u.sha256; 5300a625fd2SDavid S. Miller 5310a625fd2SDavid S. Miller if (req->nbytes == 0) { 5320a625fd2SDavid S. Miller static const char sha256_zero[SHA256_DIGEST_SIZE] = { 5330a625fd2SDavid S. Miller 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 5340a625fd2SDavid S. Miller 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 5350a625fd2SDavid S. Miller 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 5360a625fd2SDavid S. Miller 0x1b, 0x78, 0x52, 0xb8, 0x55 5370a625fd2SDavid S. Miller }; 5380a625fd2SDavid S. Miller 5390a625fd2SDavid S. Miller memcpy(req->result, sha256_zero, SHA256_DIGEST_SIZE); 5400a625fd2SDavid S. Miller return 0; 5410a625fd2SDavid S. Miller } 5420a625fd2SDavid S. Miller s->state[0] = SHA256_H0; 5430a625fd2SDavid S. Miller s->state[1] = SHA256_H1; 5440a625fd2SDavid S. Miller s->state[2] = SHA256_H2; 5450a625fd2SDavid S. Miller s->state[3] = SHA256_H3; 5460a625fd2SDavid S. Miller s->state[4] = SHA256_H4; 5470a625fd2SDavid S. Miller s->state[5] = SHA256_H5; 5480a625fd2SDavid S. Miller s->state[6] = SHA256_H6; 5490a625fd2SDavid S. Miller s->state[7] = SHA256_H7; 5500a625fd2SDavid S. Miller 5510a625fd2SDavid S. Miller return n2_hash_async_digest(req, AUTH_TYPE_SHA256, 5520a625fd2SDavid S. Miller SHA256_DIGEST_SIZE, SHA256_DIGEST_SIZE, 5530a625fd2SDavid S. Miller s->state); 5540a625fd2SDavid S. Miller } 5550a625fd2SDavid S. Miller 5560a625fd2SDavid S. Miller static int n2_sha224_async_digest(struct ahash_request *req) 5570a625fd2SDavid S. Miller { 5580a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 5590a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 5600a625fd2SDavid S. Miller struct sha256_state *s = &ctx->u.sha256; 5610a625fd2SDavid S. Miller 5620a625fd2SDavid S. Miller if (req->nbytes == 0) { 5630a625fd2SDavid S. Miller static const char sha224_zero[SHA224_DIGEST_SIZE] = { 5640a625fd2SDavid S. Miller 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47, 5650a625fd2SDavid S. Miller 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2, 5660a625fd2SDavid S. Miller 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4, 5670a625fd2SDavid S. Miller 0x2f 5680a625fd2SDavid S. Miller }; 5690a625fd2SDavid S. Miller 5700a625fd2SDavid S. Miller memcpy(req->result, sha224_zero, SHA224_DIGEST_SIZE); 5710a625fd2SDavid S. Miller return 0; 5720a625fd2SDavid S. Miller } 5730a625fd2SDavid S. Miller s->state[0] = SHA224_H0; 5740a625fd2SDavid S. Miller s->state[1] = SHA224_H1; 5750a625fd2SDavid S. Miller s->state[2] = SHA224_H2; 5760a625fd2SDavid S. Miller s->state[3] = SHA224_H3; 5770a625fd2SDavid S. Miller s->state[4] = SHA224_H4; 5780a625fd2SDavid S. Miller s->state[5] = SHA224_H5; 5790a625fd2SDavid S. Miller s->state[6] = SHA224_H6; 5800a625fd2SDavid S. Miller s->state[7] = SHA224_H7; 5810a625fd2SDavid S. Miller 5820a625fd2SDavid S. Miller return n2_hash_async_digest(req, AUTH_TYPE_SHA256, 5830a625fd2SDavid S. Miller SHA256_DIGEST_SIZE, SHA224_DIGEST_SIZE, 5840a625fd2SDavid S. Miller s->state); 5850a625fd2SDavid S. Miller } 5860a625fd2SDavid S. Miller 5870a625fd2SDavid S. Miller struct n2_cipher_context { 5880a625fd2SDavid S. Miller int key_len; 5890a625fd2SDavid S. Miller int enc_type; 5900a625fd2SDavid S. Miller union { 5910a625fd2SDavid S. Miller u8 aes[AES_MAX_KEY_SIZE]; 5920a625fd2SDavid S. Miller u8 des[DES_KEY_SIZE]; 5930a625fd2SDavid S. Miller u8 des3[3 * DES_KEY_SIZE]; 5940a625fd2SDavid S. Miller u8 arc4[258]; /* S-box, X, Y */ 5950a625fd2SDavid S. Miller } key; 5960a625fd2SDavid S. Miller }; 5970a625fd2SDavid S. Miller 5980a625fd2SDavid S. Miller #define N2_CHUNK_ARR_LEN 16 5990a625fd2SDavid S. Miller 6000a625fd2SDavid S. Miller struct n2_crypto_chunk { 6010a625fd2SDavid S. Miller struct list_head entry; 6020a625fd2SDavid S. Miller unsigned long iv_paddr : 44; 6030a625fd2SDavid S. Miller unsigned long arr_len : 20; 6040a625fd2SDavid S. Miller unsigned long dest_paddr; 6050a625fd2SDavid S. Miller unsigned long dest_final; 6060a625fd2SDavid S. Miller struct { 6070a625fd2SDavid S. Miller unsigned long src_paddr : 44; 6080a625fd2SDavid S. Miller unsigned long src_len : 20; 6090a625fd2SDavid S. Miller } arr[N2_CHUNK_ARR_LEN]; 6100a625fd2SDavid S. Miller }; 6110a625fd2SDavid S. Miller 6120a625fd2SDavid S. Miller struct n2_request_context { 6130a625fd2SDavid S. Miller struct ablkcipher_walk walk; 6140a625fd2SDavid S. Miller struct list_head chunk_list; 6150a625fd2SDavid S. Miller struct n2_crypto_chunk chunk; 6160a625fd2SDavid S. Miller u8 temp_iv[16]; 6170a625fd2SDavid S. Miller }; 6180a625fd2SDavid S. Miller 6190a625fd2SDavid S. Miller /* The SPU allows some level of flexibility for partial cipher blocks 6200a625fd2SDavid S. Miller * being specified in a descriptor. 6210a625fd2SDavid S. Miller * 6220a625fd2SDavid S. Miller * It merely requires that every descriptor's length field is at least 6230a625fd2SDavid S. Miller * as large as the cipher block size. This means that a cipher block 6240a625fd2SDavid S. Miller * can span at most 2 descriptors. However, this does not allow a 6250a625fd2SDavid S. Miller * partial block to span into the final descriptor as that would 6260a625fd2SDavid S. Miller * violate the rule (since every descriptor's length must be at lest 6270a625fd2SDavid S. Miller * the block size). So, for example, assuming an 8 byte block size: 6280a625fd2SDavid S. Miller * 6290a625fd2SDavid S. Miller * 0xe --> 0xa --> 0x8 6300a625fd2SDavid S. Miller * 6310a625fd2SDavid S. Miller * is a valid length sequence, whereas: 6320a625fd2SDavid S. Miller * 6330a625fd2SDavid S. Miller * 0xe --> 0xb --> 0x7 6340a625fd2SDavid S. Miller * 6350a625fd2SDavid S. Miller * is not a valid sequence. 6360a625fd2SDavid S. Miller */ 6370a625fd2SDavid S. Miller 6380a625fd2SDavid S. Miller struct n2_cipher_alg { 6390a625fd2SDavid S. Miller struct list_head entry; 6400a625fd2SDavid S. Miller u8 enc_type; 6410a625fd2SDavid S. Miller struct crypto_alg alg; 6420a625fd2SDavid S. Miller }; 6430a625fd2SDavid S. Miller 6440a625fd2SDavid S. Miller static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 6450a625fd2SDavid S. Miller { 6460a625fd2SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 6470a625fd2SDavid S. Miller 6480a625fd2SDavid S. Miller return container_of(alg, struct n2_cipher_alg, alg); 6490a625fd2SDavid S. Miller } 6500a625fd2SDavid S. Miller 6510a625fd2SDavid S. Miller struct n2_cipher_request_context { 6520a625fd2SDavid S. Miller struct ablkcipher_walk walk; 6530a625fd2SDavid S. Miller }; 6540a625fd2SDavid S. Miller 6550a625fd2SDavid S. Miller static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 6560a625fd2SDavid S. Miller unsigned int keylen) 6570a625fd2SDavid S. Miller { 6580a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 6590a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 6600a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 6610a625fd2SDavid S. Miller 6620a625fd2SDavid S. Miller ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 6630a625fd2SDavid S. Miller 6640a625fd2SDavid S. Miller switch (keylen) { 6650a625fd2SDavid S. Miller case AES_KEYSIZE_128: 6660a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES128; 6670a625fd2SDavid S. Miller break; 6680a625fd2SDavid S. Miller case AES_KEYSIZE_192: 6690a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES192; 6700a625fd2SDavid S. Miller break; 6710a625fd2SDavid S. Miller case AES_KEYSIZE_256: 6720a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES256; 6730a625fd2SDavid S. Miller break; 6740a625fd2SDavid S. Miller default: 6750a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 6760a625fd2SDavid S. Miller return -EINVAL; 6770a625fd2SDavid S. Miller } 6780a625fd2SDavid S. Miller 6790a625fd2SDavid S. Miller ctx->key_len = keylen; 6800a625fd2SDavid S. Miller memcpy(ctx->key.aes, key, keylen); 6810a625fd2SDavid S. Miller return 0; 6820a625fd2SDavid S. Miller } 6830a625fd2SDavid S. Miller 6840a625fd2SDavid S. Miller static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 6850a625fd2SDavid S. Miller unsigned int keylen) 6860a625fd2SDavid S. Miller { 6870a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 6880a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 6890a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 6900a625fd2SDavid S. Miller u32 tmp[DES_EXPKEY_WORDS]; 6910a625fd2SDavid S. Miller int err; 6920a625fd2SDavid S. Miller 6930a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 6940a625fd2SDavid S. Miller 6950a625fd2SDavid S. Miller if (keylen != DES_KEY_SIZE) { 6960a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 6970a625fd2SDavid S. Miller return -EINVAL; 6980a625fd2SDavid S. Miller } 6990a625fd2SDavid S. Miller 7000a625fd2SDavid S. Miller err = des_ekey(tmp, key); 7010a625fd2SDavid S. Miller if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 7020a625fd2SDavid S. Miller tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 7030a625fd2SDavid S. Miller return -EINVAL; 7040a625fd2SDavid S. Miller } 7050a625fd2SDavid S. Miller 7060a625fd2SDavid S. Miller ctx->key_len = keylen; 7070a625fd2SDavid S. Miller memcpy(ctx->key.des, key, keylen); 7080a625fd2SDavid S. Miller return 0; 7090a625fd2SDavid S. Miller } 7100a625fd2SDavid S. Miller 7110a625fd2SDavid S. Miller static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7120a625fd2SDavid S. Miller unsigned int keylen) 7130a625fd2SDavid S. Miller { 7140a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7150a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7160a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7170a625fd2SDavid S. Miller 7180a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 7190a625fd2SDavid S. Miller 7200a625fd2SDavid S. Miller if (keylen != (3 * DES_KEY_SIZE)) { 7210a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 7220a625fd2SDavid S. Miller return -EINVAL; 7230a625fd2SDavid S. Miller } 7240a625fd2SDavid S. Miller ctx->key_len = keylen; 7250a625fd2SDavid S. Miller memcpy(ctx->key.des3, key, keylen); 7260a625fd2SDavid S. Miller return 0; 7270a625fd2SDavid S. Miller } 7280a625fd2SDavid S. Miller 7290a625fd2SDavid S. Miller static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7300a625fd2SDavid S. Miller unsigned int keylen) 7310a625fd2SDavid S. Miller { 7320a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7330a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7340a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7350a625fd2SDavid S. Miller u8 *s = ctx->key.arc4; 7360a625fd2SDavid S. Miller u8 *x = s + 256; 7370a625fd2SDavid S. Miller u8 *y = x + 1; 7380a625fd2SDavid S. Miller int i, j, k; 7390a625fd2SDavid S. Miller 7400a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 7410a625fd2SDavid S. Miller 7420a625fd2SDavid S. Miller j = k = 0; 7430a625fd2SDavid S. Miller *x = 0; 7440a625fd2SDavid S. Miller *y = 0; 7450a625fd2SDavid S. Miller for (i = 0; i < 256; i++) 7460a625fd2SDavid S. Miller s[i] = i; 7470a625fd2SDavid S. Miller for (i = 0; i < 256; i++) { 7480a625fd2SDavid S. Miller u8 a = s[i]; 7490a625fd2SDavid S. Miller j = (j + key[k] + a) & 0xff; 7500a625fd2SDavid S. Miller s[i] = s[j]; 7510a625fd2SDavid S. Miller s[j] = a; 7520a625fd2SDavid S. Miller if (++k >= keylen) 7530a625fd2SDavid S. Miller k = 0; 7540a625fd2SDavid S. Miller } 7550a625fd2SDavid S. Miller 7560a625fd2SDavid S. Miller return 0; 7570a625fd2SDavid S. Miller } 7580a625fd2SDavid S. Miller 7590a625fd2SDavid S. Miller static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 7600a625fd2SDavid S. Miller { 7610a625fd2SDavid S. Miller int this_len = nbytes; 7620a625fd2SDavid S. Miller 7630a625fd2SDavid S. Miller this_len -= (nbytes & (block_size - 1)); 7640a625fd2SDavid S. Miller return this_len > (1 << 16) ? (1 << 16) : this_len; 7650a625fd2SDavid S. Miller } 7660a625fd2SDavid S. Miller 7670a625fd2SDavid S. Miller static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 7680a625fd2SDavid S. Miller struct spu_queue *qp, bool encrypt) 7690a625fd2SDavid S. Miller { 7700a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7710a625fd2SDavid S. Miller struct cwq_initial_entry *ent; 7720a625fd2SDavid S. Miller bool in_place; 7730a625fd2SDavid S. Miller int i; 7740a625fd2SDavid S. Miller 7750a625fd2SDavid S. Miller ent = spu_queue_alloc(qp, cp->arr_len); 7760a625fd2SDavid S. Miller if (!ent) { 7770a625fd2SDavid S. Miller pr_info("queue_alloc() of %d fails\n", 7780a625fd2SDavid S. Miller cp->arr_len); 7790a625fd2SDavid S. Miller return -EBUSY; 7800a625fd2SDavid S. Miller } 7810a625fd2SDavid S. Miller 7820a625fd2SDavid S. Miller in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 7830a625fd2SDavid S. Miller 7840a625fd2SDavid S. Miller ent->control = control_word_base(cp->arr[0].src_len, 7850a625fd2SDavid S. Miller 0, ctx->enc_type, 0, 0, 7860a625fd2SDavid S. Miller false, true, false, encrypt, 7870a625fd2SDavid S. Miller OPCODE_ENCRYPT | 7880a625fd2SDavid S. Miller (in_place ? OPCODE_INPLACE_BIT : 0)); 7890a625fd2SDavid S. Miller ent->src_addr = cp->arr[0].src_paddr; 7900a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 7910a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 7920a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 7930a625fd2SDavid S. Miller ent->enc_key_addr = __pa(&ctx->key); 7940a625fd2SDavid S. Miller ent->enc_iv_addr = cp->iv_paddr; 7950a625fd2SDavid S. Miller ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 7960a625fd2SDavid S. Miller 7970a625fd2SDavid S. Miller for (i = 1; i < cp->arr_len; i++) { 7980a625fd2SDavid S. Miller ent = spu_queue_next(qp, ent); 7990a625fd2SDavid S. Miller 8000a625fd2SDavid S. Miller ent->control = cp->arr[i].src_len - 1; 8010a625fd2SDavid S. Miller ent->src_addr = cp->arr[i].src_paddr; 8020a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 8030a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 8040a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 8050a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 8060a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 8070a625fd2SDavid S. Miller ent->dest_addr = 0UL; 8080a625fd2SDavid S. Miller } 8090a625fd2SDavid S. Miller ent->control |= CONTROL_END_OF_BLOCK; 8100a625fd2SDavid S. Miller 8110a625fd2SDavid S. Miller return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 8120a625fd2SDavid S. Miller } 8130a625fd2SDavid S. Miller 8140a625fd2SDavid S. Miller static int n2_compute_chunks(struct ablkcipher_request *req) 8150a625fd2SDavid S. Miller { 8160a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 8170a625fd2SDavid S. Miller struct ablkcipher_walk *walk = &rctx->walk; 8180a625fd2SDavid S. Miller struct n2_crypto_chunk *chunk; 8190a625fd2SDavid S. Miller unsigned long dest_prev; 8200a625fd2SDavid S. Miller unsigned int tot_len; 8210a625fd2SDavid S. Miller bool prev_in_place; 8220a625fd2SDavid S. Miller int err, nbytes; 8230a625fd2SDavid S. Miller 8240a625fd2SDavid S. Miller ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 8250a625fd2SDavid S. Miller err = ablkcipher_walk_phys(req, walk); 8260a625fd2SDavid S. Miller if (err) 8270a625fd2SDavid S. Miller return err; 8280a625fd2SDavid S. Miller 8290a625fd2SDavid S. Miller INIT_LIST_HEAD(&rctx->chunk_list); 8300a625fd2SDavid S. Miller 8310a625fd2SDavid S. Miller chunk = &rctx->chunk; 8320a625fd2SDavid S. Miller INIT_LIST_HEAD(&chunk->entry); 8330a625fd2SDavid S. Miller 8340a625fd2SDavid S. Miller chunk->iv_paddr = 0UL; 8350a625fd2SDavid S. Miller chunk->arr_len = 0; 8360a625fd2SDavid S. Miller chunk->dest_paddr = 0UL; 8370a625fd2SDavid S. Miller 8380a625fd2SDavid S. Miller prev_in_place = false; 8390a625fd2SDavid S. Miller dest_prev = ~0UL; 8400a625fd2SDavid S. Miller tot_len = 0; 8410a625fd2SDavid S. Miller 8420a625fd2SDavid S. Miller while ((nbytes = walk->nbytes) != 0) { 8430a625fd2SDavid S. Miller unsigned long dest_paddr, src_paddr; 8440a625fd2SDavid S. Miller bool in_place; 8450a625fd2SDavid S. Miller int this_len; 8460a625fd2SDavid S. Miller 8470a625fd2SDavid S. Miller src_paddr = (page_to_phys(walk->src.page) + 8480a625fd2SDavid S. Miller walk->src.offset); 8490a625fd2SDavid S. Miller dest_paddr = (page_to_phys(walk->dst.page) + 8500a625fd2SDavid S. Miller walk->dst.offset); 8510a625fd2SDavid S. Miller in_place = (src_paddr == dest_paddr); 8520a625fd2SDavid S. Miller this_len = cipher_descriptor_len(nbytes, walk->blocksize); 8530a625fd2SDavid S. Miller 8540a625fd2SDavid S. Miller if (chunk->arr_len != 0) { 8550a625fd2SDavid S. Miller if (in_place != prev_in_place || 8560a625fd2SDavid S. Miller (!prev_in_place && 8570a625fd2SDavid S. Miller dest_paddr != dest_prev) || 8580a625fd2SDavid S. Miller chunk->arr_len == N2_CHUNK_ARR_LEN || 8590a625fd2SDavid S. Miller tot_len + this_len > (1 << 16)) { 8600a625fd2SDavid S. Miller chunk->dest_final = dest_prev; 8610a625fd2SDavid S. Miller list_add_tail(&chunk->entry, 8620a625fd2SDavid S. Miller &rctx->chunk_list); 8630a625fd2SDavid S. Miller chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 8640a625fd2SDavid S. Miller if (!chunk) { 8650a625fd2SDavid S. Miller err = -ENOMEM; 8660a625fd2SDavid S. Miller break; 8670a625fd2SDavid S. Miller } 8680a625fd2SDavid S. Miller INIT_LIST_HEAD(&chunk->entry); 8690a625fd2SDavid S. Miller } 8700a625fd2SDavid S. Miller } 8710a625fd2SDavid S. Miller if (chunk->arr_len == 0) { 8720a625fd2SDavid S. Miller chunk->dest_paddr = dest_paddr; 8730a625fd2SDavid S. Miller tot_len = 0; 8740a625fd2SDavid S. Miller } 8750a625fd2SDavid S. Miller chunk->arr[chunk->arr_len].src_paddr = src_paddr; 8760a625fd2SDavid S. Miller chunk->arr[chunk->arr_len].src_len = this_len; 8770a625fd2SDavid S. Miller chunk->arr_len++; 8780a625fd2SDavid S. Miller 8790a625fd2SDavid S. Miller dest_prev = dest_paddr + this_len; 8800a625fd2SDavid S. Miller prev_in_place = in_place; 8810a625fd2SDavid S. Miller tot_len += this_len; 8820a625fd2SDavid S. Miller 8830a625fd2SDavid S. Miller err = ablkcipher_walk_done(req, walk, nbytes - this_len); 8840a625fd2SDavid S. Miller if (err) 8850a625fd2SDavid S. Miller break; 8860a625fd2SDavid S. Miller } 8870a625fd2SDavid S. Miller if (!err && chunk->arr_len != 0) { 8880a625fd2SDavid S. Miller chunk->dest_final = dest_prev; 8890a625fd2SDavid S. Miller list_add_tail(&chunk->entry, &rctx->chunk_list); 8900a625fd2SDavid S. Miller } 8910a625fd2SDavid S. Miller 8920a625fd2SDavid S. Miller return err; 8930a625fd2SDavid S. Miller } 8940a625fd2SDavid S. Miller 8950a625fd2SDavid S. Miller static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 8960a625fd2SDavid S. Miller { 8970a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 8980a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 8990a625fd2SDavid S. Miller 9000a625fd2SDavid S. Miller if (final_iv) 9010a625fd2SDavid S. Miller memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 9020a625fd2SDavid S. Miller 9030a625fd2SDavid S. Miller ablkcipher_walk_complete(&rctx->walk); 9040a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 9050a625fd2SDavid S. Miller list_del(&c->entry); 9060a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 9070a625fd2SDavid S. Miller kfree(c); 9080a625fd2SDavid S. Miller } 9090a625fd2SDavid S. Miller 9100a625fd2SDavid S. Miller } 9110a625fd2SDavid S. Miller 9120a625fd2SDavid S. Miller static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 9130a625fd2SDavid S. Miller { 9140a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 9150a625fd2SDavid S. Miller struct crypto_tfm *tfm = req->base.tfm; 9160a625fd2SDavid S. Miller int err = n2_compute_chunks(req); 9170a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 9180a625fd2SDavid S. Miller unsigned long flags, hv_ret; 9190a625fd2SDavid S. Miller struct spu_queue *qp; 9200a625fd2SDavid S. Miller 9210a625fd2SDavid S. Miller if (err) 9220a625fd2SDavid S. Miller return err; 9230a625fd2SDavid S. Miller 9240a625fd2SDavid S. Miller qp = cpu_to_cwq[get_cpu()]; 9250a625fd2SDavid S. Miller err = -ENODEV; 9260a625fd2SDavid S. Miller if (!qp) 9270a625fd2SDavid S. Miller goto out; 9280a625fd2SDavid S. Miller 9290a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 9300a625fd2SDavid S. Miller 9310a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 9320a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, encrypt); 9330a625fd2SDavid S. Miller if (err) 9340a625fd2SDavid S. Miller break; 9350a625fd2SDavid S. Miller list_del(&c->entry); 9360a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 9370a625fd2SDavid S. Miller kfree(c); 9380a625fd2SDavid S. Miller } 9390a625fd2SDavid S. Miller if (!err) { 9400a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 9410a625fd2SDavid S. Miller if (hv_ret != HV_EOK) 9420a625fd2SDavid S. Miller err = -EINVAL; 9430a625fd2SDavid S. Miller } 9440a625fd2SDavid S. Miller 9450a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 9460a625fd2SDavid S. Miller 9470a625fd2SDavid S. Miller put_cpu(); 9480a625fd2SDavid S. Miller 9490a625fd2SDavid S. Miller out: 9500a625fd2SDavid S. Miller n2_chunk_complete(req, NULL); 9510a625fd2SDavid S. Miller return err; 9520a625fd2SDavid S. Miller } 9530a625fd2SDavid S. Miller 9540a625fd2SDavid S. Miller static int n2_encrypt_ecb(struct ablkcipher_request *req) 9550a625fd2SDavid S. Miller { 9560a625fd2SDavid S. Miller return n2_do_ecb(req, true); 9570a625fd2SDavid S. Miller } 9580a625fd2SDavid S. Miller 9590a625fd2SDavid S. Miller static int n2_decrypt_ecb(struct ablkcipher_request *req) 9600a625fd2SDavid S. Miller { 9610a625fd2SDavid S. Miller return n2_do_ecb(req, false); 9620a625fd2SDavid S. Miller } 9630a625fd2SDavid S. Miller 9640a625fd2SDavid S. Miller static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 9650a625fd2SDavid S. Miller { 9660a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 9670a625fd2SDavid S. Miller struct crypto_tfm *tfm = req->base.tfm; 9680a625fd2SDavid S. Miller unsigned long flags, hv_ret, iv_paddr; 9690a625fd2SDavid S. Miller int err = n2_compute_chunks(req); 9700a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 9710a625fd2SDavid S. Miller struct spu_queue *qp; 9720a625fd2SDavid S. Miller void *final_iv_addr; 9730a625fd2SDavid S. Miller 9740a625fd2SDavid S. Miller final_iv_addr = NULL; 9750a625fd2SDavid S. Miller 9760a625fd2SDavid S. Miller if (err) 9770a625fd2SDavid S. Miller return err; 9780a625fd2SDavid S. Miller 9790a625fd2SDavid S. Miller qp = cpu_to_cwq[get_cpu()]; 9800a625fd2SDavid S. Miller err = -ENODEV; 9810a625fd2SDavid S. Miller if (!qp) 9820a625fd2SDavid S. Miller goto out; 9830a625fd2SDavid S. Miller 9840a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 9850a625fd2SDavid S. Miller 9860a625fd2SDavid S. Miller if (encrypt) { 9870a625fd2SDavid S. Miller iv_paddr = __pa(rctx->walk.iv); 9880a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 9890a625fd2SDavid S. Miller entry) { 9900a625fd2SDavid S. Miller c->iv_paddr = iv_paddr; 9910a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, true); 9920a625fd2SDavid S. Miller if (err) 9930a625fd2SDavid S. Miller break; 9940a625fd2SDavid S. Miller iv_paddr = c->dest_final - rctx->walk.blocksize; 9950a625fd2SDavid S. Miller list_del(&c->entry); 9960a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 9970a625fd2SDavid S. Miller kfree(c); 9980a625fd2SDavid S. Miller } 9990a625fd2SDavid S. Miller final_iv_addr = __va(iv_paddr); 10000a625fd2SDavid S. Miller } else { 10010a625fd2SDavid S. Miller list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 10020a625fd2SDavid S. Miller entry) { 10030a625fd2SDavid S. Miller if (c == &rctx->chunk) { 10040a625fd2SDavid S. Miller iv_paddr = __pa(rctx->walk.iv); 10050a625fd2SDavid S. Miller } else { 10060a625fd2SDavid S. Miller iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 10070a625fd2SDavid S. Miller tmp->arr[tmp->arr_len-1].src_len - 10080a625fd2SDavid S. Miller rctx->walk.blocksize); 10090a625fd2SDavid S. Miller } 10100a625fd2SDavid S. Miller if (!final_iv_addr) { 10110a625fd2SDavid S. Miller unsigned long pa; 10120a625fd2SDavid S. Miller 10130a625fd2SDavid S. Miller pa = (c->arr[c->arr_len-1].src_paddr + 10140a625fd2SDavid S. Miller c->arr[c->arr_len-1].src_len - 10150a625fd2SDavid S. Miller rctx->walk.blocksize); 10160a625fd2SDavid S. Miller final_iv_addr = rctx->temp_iv; 10170a625fd2SDavid S. Miller memcpy(rctx->temp_iv, __va(pa), 10180a625fd2SDavid S. Miller rctx->walk.blocksize); 10190a625fd2SDavid S. Miller } 10200a625fd2SDavid S. Miller c->iv_paddr = iv_paddr; 10210a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, false); 10220a625fd2SDavid S. Miller if (err) 10230a625fd2SDavid S. Miller break; 10240a625fd2SDavid S. Miller list_del(&c->entry); 10250a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 10260a625fd2SDavid S. Miller kfree(c); 10270a625fd2SDavid S. Miller } 10280a625fd2SDavid S. Miller } 10290a625fd2SDavid S. Miller if (!err) { 10300a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 10310a625fd2SDavid S. Miller if (hv_ret != HV_EOK) 10320a625fd2SDavid S. Miller err = -EINVAL; 10330a625fd2SDavid S. Miller } 10340a625fd2SDavid S. Miller 10350a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 10360a625fd2SDavid S. Miller 10370a625fd2SDavid S. Miller put_cpu(); 10380a625fd2SDavid S. Miller 10390a625fd2SDavid S. Miller out: 10400a625fd2SDavid S. Miller n2_chunk_complete(req, err ? NULL : final_iv_addr); 10410a625fd2SDavid S. Miller return err; 10420a625fd2SDavid S. Miller } 10430a625fd2SDavid S. Miller 10440a625fd2SDavid S. Miller static int n2_encrypt_chaining(struct ablkcipher_request *req) 10450a625fd2SDavid S. Miller { 10460a625fd2SDavid S. Miller return n2_do_chaining(req, true); 10470a625fd2SDavid S. Miller } 10480a625fd2SDavid S. Miller 10490a625fd2SDavid S. Miller static int n2_decrypt_chaining(struct ablkcipher_request *req) 10500a625fd2SDavid S. Miller { 10510a625fd2SDavid S. Miller return n2_do_chaining(req, false); 10520a625fd2SDavid S. Miller } 10530a625fd2SDavid S. Miller 10540a625fd2SDavid S. Miller struct n2_cipher_tmpl { 10550a625fd2SDavid S. Miller const char *name; 10560a625fd2SDavid S. Miller const char *drv_name; 10570a625fd2SDavid S. Miller u8 block_size; 10580a625fd2SDavid S. Miller u8 enc_type; 10590a625fd2SDavid S. Miller struct ablkcipher_alg ablkcipher; 10600a625fd2SDavid S. Miller }; 10610a625fd2SDavid S. Miller 10620a625fd2SDavid S. Miller static const struct n2_cipher_tmpl cipher_tmpls[] = { 10630a625fd2SDavid S. Miller /* ARC4: only ECB is supported (chaining bits ignored) */ 10640a625fd2SDavid S. Miller { .name = "ecb(arc4)", 10650a625fd2SDavid S. Miller .drv_name = "ecb-arc4", 10660a625fd2SDavid S. Miller .block_size = 1, 10670a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 10680a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 10690a625fd2SDavid S. Miller .ablkcipher = { 10700a625fd2SDavid S. Miller .min_keysize = 1, 10710a625fd2SDavid S. Miller .max_keysize = 256, 10720a625fd2SDavid S. Miller .setkey = n2_arc4_setkey, 10730a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 10740a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 10750a625fd2SDavid S. Miller }, 10760a625fd2SDavid S. Miller }, 10770a625fd2SDavid S. Miller 10780a625fd2SDavid S. Miller /* DES: ECB CBC and CFB are supported */ 10790a625fd2SDavid S. Miller { .name = "ecb(des)", 10800a625fd2SDavid S. Miller .drv_name = "ecb-des", 10810a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 10820a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 10830a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 10840a625fd2SDavid S. Miller .ablkcipher = { 10850a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 10860a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 10870a625fd2SDavid S. Miller .setkey = n2_des_setkey, 10880a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 10890a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 10900a625fd2SDavid S. Miller }, 10910a625fd2SDavid S. Miller }, 10920a625fd2SDavid S. Miller { .name = "cbc(des)", 10930a625fd2SDavid S. Miller .drv_name = "cbc-des", 10940a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 10950a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 10960a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 10970a625fd2SDavid S. Miller .ablkcipher = { 10980a625fd2SDavid S. Miller .ivsize = DES_BLOCK_SIZE, 10990a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11000a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11010a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11020a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11030a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11040a625fd2SDavid S. Miller }, 11050a625fd2SDavid S. Miller }, 11060a625fd2SDavid S. Miller { .name = "cfb(des)", 11070a625fd2SDavid S. Miller .drv_name = "cfb-des", 11080a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11090a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11100a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CFB), 11110a625fd2SDavid S. Miller .ablkcipher = { 11120a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11130a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11140a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11150a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11160a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11170a625fd2SDavid S. Miller }, 11180a625fd2SDavid S. Miller }, 11190a625fd2SDavid S. Miller 11200a625fd2SDavid S. Miller /* 3DES: ECB CBC and CFB are supported */ 11210a625fd2SDavid S. Miller { .name = "ecb(des3_ede)", 11220a625fd2SDavid S. Miller .drv_name = "ecb-3des", 11230a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11240a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 11250a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11260a625fd2SDavid S. Miller .ablkcipher = { 11270a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 11280a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 11290a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 11300a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11310a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11320a625fd2SDavid S. Miller }, 11330a625fd2SDavid S. Miller }, 11340a625fd2SDavid S. Miller { .name = "cbc(des3_ede)", 11350a625fd2SDavid S. Miller .drv_name = "cbc-3des", 11360a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11370a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 11380a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 11390a625fd2SDavid S. Miller .ablkcipher = { 11400a625fd2SDavid S. Miller .ivsize = DES_BLOCK_SIZE, 11410a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 11420a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 11430a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 11440a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11450a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11460a625fd2SDavid S. Miller }, 11470a625fd2SDavid S. Miller }, 11480a625fd2SDavid S. Miller { .name = "cfb(des3_ede)", 11490a625fd2SDavid S. Miller .drv_name = "cfb-3des", 11500a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11510a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 11520a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CFB), 11530a625fd2SDavid S. Miller .ablkcipher = { 11540a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 11550a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 11560a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 11570a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11580a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11590a625fd2SDavid S. Miller }, 11600a625fd2SDavid S. Miller }, 11610a625fd2SDavid S. Miller /* AES: ECB CBC and CTR are supported */ 11620a625fd2SDavid S. Miller { .name = "ecb(aes)", 11630a625fd2SDavid S. Miller .drv_name = "ecb-aes", 11640a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 11650a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 11660a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11670a625fd2SDavid S. Miller .ablkcipher = { 11680a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 11690a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 11700a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 11710a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11720a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11730a625fd2SDavid S. Miller }, 11740a625fd2SDavid S. Miller }, 11750a625fd2SDavid S. Miller { .name = "cbc(aes)", 11760a625fd2SDavid S. Miller .drv_name = "cbc-aes", 11770a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 11780a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 11790a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 11800a625fd2SDavid S. Miller .ablkcipher = { 11810a625fd2SDavid S. Miller .ivsize = AES_BLOCK_SIZE, 11820a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 11830a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 11840a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 11850a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11860a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11870a625fd2SDavid S. Miller }, 11880a625fd2SDavid S. Miller }, 11890a625fd2SDavid S. Miller { .name = "ctr(aes)", 11900a625fd2SDavid S. Miller .drv_name = "ctr-aes", 11910a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 11920a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 11930a625fd2SDavid S. Miller ENC_TYPE_CHAINING_COUNTER), 11940a625fd2SDavid S. Miller .ablkcipher = { 11950a625fd2SDavid S. Miller .ivsize = AES_BLOCK_SIZE, 11960a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 11970a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 11980a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 11990a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12000a625fd2SDavid S. Miller .decrypt = n2_encrypt_chaining, 12010a625fd2SDavid S. Miller }, 12020a625fd2SDavid S. Miller }, 12030a625fd2SDavid S. Miller 12040a625fd2SDavid S. Miller }; 12050a625fd2SDavid S. Miller #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 12060a625fd2SDavid S. Miller 12070a625fd2SDavid S. Miller static LIST_HEAD(cipher_algs); 12080a625fd2SDavid S. Miller 12090a625fd2SDavid S. Miller struct n2_hash_tmpl { 12100a625fd2SDavid S. Miller const char *name; 12110a625fd2SDavid S. Miller int (*digest)(struct ahash_request *req); 12120a625fd2SDavid S. Miller u8 digest_size; 12130a625fd2SDavid S. Miller u8 block_size; 12140a625fd2SDavid S. Miller }; 12150a625fd2SDavid S. Miller static const struct n2_hash_tmpl hash_tmpls[] = { 12160a625fd2SDavid S. Miller { .name = "md5", 12170a625fd2SDavid S. Miller .digest = n2_md5_async_digest, 12180a625fd2SDavid S. Miller .digest_size = MD5_DIGEST_SIZE, 12190a625fd2SDavid S. Miller .block_size = MD5_HMAC_BLOCK_SIZE }, 12200a625fd2SDavid S. Miller { .name = "sha1", 12210a625fd2SDavid S. Miller .digest = n2_sha1_async_digest, 12220a625fd2SDavid S. Miller .digest_size = SHA1_DIGEST_SIZE, 12230a625fd2SDavid S. Miller .block_size = SHA1_BLOCK_SIZE }, 12240a625fd2SDavid S. Miller { .name = "sha256", 12250a625fd2SDavid S. Miller .digest = n2_sha256_async_digest, 12260a625fd2SDavid S. Miller .digest_size = SHA256_DIGEST_SIZE, 12270a625fd2SDavid S. Miller .block_size = SHA256_BLOCK_SIZE }, 12280a625fd2SDavid S. Miller { .name = "sha224", 12290a625fd2SDavid S. Miller .digest = n2_sha224_async_digest, 12300a625fd2SDavid S. Miller .digest_size = SHA224_DIGEST_SIZE, 12310a625fd2SDavid S. Miller .block_size = SHA224_BLOCK_SIZE }, 12320a625fd2SDavid S. Miller }; 12330a625fd2SDavid S. Miller #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 12340a625fd2SDavid S. Miller 12350a625fd2SDavid S. Miller struct n2_ahash_alg { 12360a625fd2SDavid S. Miller struct list_head entry; 12370a625fd2SDavid S. Miller struct ahash_alg alg; 12380a625fd2SDavid S. Miller }; 12390a625fd2SDavid S. Miller static LIST_HEAD(ahash_algs); 12400a625fd2SDavid S. Miller 12410a625fd2SDavid S. Miller static int algs_registered; 12420a625fd2SDavid S. Miller 12430a625fd2SDavid S. Miller static void __n2_unregister_algs(void) 12440a625fd2SDavid S. Miller { 12450a625fd2SDavid S. Miller struct n2_cipher_alg *cipher, *cipher_tmp; 12460a625fd2SDavid S. Miller struct n2_ahash_alg *alg, *alg_tmp; 12470a625fd2SDavid S. Miller 12480a625fd2SDavid S. Miller list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 12490a625fd2SDavid S. Miller crypto_unregister_alg(&cipher->alg); 12500a625fd2SDavid S. Miller list_del(&cipher->entry); 12510a625fd2SDavid S. Miller kfree(cipher); 12520a625fd2SDavid S. Miller } 12530a625fd2SDavid S. Miller list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 12540a625fd2SDavid S. Miller crypto_unregister_ahash(&alg->alg); 12550a625fd2SDavid S. Miller list_del(&alg->entry); 12560a625fd2SDavid S. Miller kfree(alg); 12570a625fd2SDavid S. Miller } 12580a625fd2SDavid S. Miller } 12590a625fd2SDavid S. Miller 12600a625fd2SDavid S. Miller static int n2_cipher_cra_init(struct crypto_tfm *tfm) 12610a625fd2SDavid S. Miller { 12620a625fd2SDavid S. Miller tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 12630a625fd2SDavid S. Miller return 0; 12640a625fd2SDavid S. Miller } 12650a625fd2SDavid S. Miller 12660a625fd2SDavid S. Miller static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 12670a625fd2SDavid S. Miller { 12680a625fd2SDavid S. Miller struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 12690a625fd2SDavid S. Miller struct crypto_alg *alg; 12700a625fd2SDavid S. Miller int err; 12710a625fd2SDavid S. Miller 12720a625fd2SDavid S. Miller if (!p) 12730a625fd2SDavid S. Miller return -ENOMEM; 12740a625fd2SDavid S. Miller 12750a625fd2SDavid S. Miller alg = &p->alg; 12760a625fd2SDavid S. Miller 12770a625fd2SDavid S. Miller snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 12780a625fd2SDavid S. Miller snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 12790a625fd2SDavid S. Miller alg->cra_priority = N2_CRA_PRIORITY; 12800a625fd2SDavid S. Miller alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 12810a625fd2SDavid S. Miller alg->cra_blocksize = tmpl->block_size; 12820a625fd2SDavid S. Miller p->enc_type = tmpl->enc_type; 12830a625fd2SDavid S. Miller alg->cra_ctxsize = sizeof(struct n2_cipher_context); 12840a625fd2SDavid S. Miller alg->cra_type = &crypto_ablkcipher_type; 12850a625fd2SDavid S. Miller alg->cra_u.ablkcipher = tmpl->ablkcipher; 12860a625fd2SDavid S. Miller alg->cra_init = n2_cipher_cra_init; 12870a625fd2SDavid S. Miller alg->cra_module = THIS_MODULE; 12880a625fd2SDavid S. Miller 12890a625fd2SDavid S. Miller list_add(&p->entry, &cipher_algs); 12900a625fd2SDavid S. Miller err = crypto_register_alg(alg); 12910a625fd2SDavid S. Miller if (err) { 12920a625fd2SDavid S. Miller list_del(&p->entry); 12930a625fd2SDavid S. Miller kfree(p); 12940a625fd2SDavid S. Miller } 12950a625fd2SDavid S. Miller return err; 12960a625fd2SDavid S. Miller } 12970a625fd2SDavid S. Miller 12980a625fd2SDavid S. Miller static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 12990a625fd2SDavid S. Miller { 13000a625fd2SDavid S. Miller struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 13010a625fd2SDavid S. Miller struct hash_alg_common *halg; 13020a625fd2SDavid S. Miller struct crypto_alg *base; 13030a625fd2SDavid S. Miller struct ahash_alg *ahash; 13040a625fd2SDavid S. Miller int err; 13050a625fd2SDavid S. Miller 13060a625fd2SDavid S. Miller if (!p) 13070a625fd2SDavid S. Miller return -ENOMEM; 13080a625fd2SDavid S. Miller 13090a625fd2SDavid S. Miller ahash = &p->alg; 13100a625fd2SDavid S. Miller ahash->init = n2_hash_async_init; 13110a625fd2SDavid S. Miller ahash->update = n2_hash_async_update; 13120a625fd2SDavid S. Miller ahash->final = n2_hash_async_final; 13130a625fd2SDavid S. Miller ahash->finup = n2_hash_async_finup; 13140a625fd2SDavid S. Miller ahash->digest = tmpl->digest; 13150a625fd2SDavid S. Miller 13160a625fd2SDavid S. Miller halg = &ahash->halg; 13170a625fd2SDavid S. Miller halg->digestsize = tmpl->digest_size; 13180a625fd2SDavid S. Miller 13190a625fd2SDavid S. Miller base = &halg->base; 13200a625fd2SDavid S. Miller snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 13210a625fd2SDavid S. Miller snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 13220a625fd2SDavid S. Miller base->cra_priority = N2_CRA_PRIORITY; 13230a625fd2SDavid S. Miller base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK; 13240a625fd2SDavid S. Miller base->cra_blocksize = tmpl->block_size; 13250a625fd2SDavid S. Miller base->cra_ctxsize = sizeof(struct n2_hash_ctx); 13260a625fd2SDavid S. Miller base->cra_module = THIS_MODULE; 13270a625fd2SDavid S. Miller base->cra_init = n2_hash_cra_init; 13280a625fd2SDavid S. Miller base->cra_exit = n2_hash_cra_exit; 13290a625fd2SDavid S. Miller 13300a625fd2SDavid S. Miller list_add(&p->entry, &ahash_algs); 13310a625fd2SDavid S. Miller err = crypto_register_ahash(ahash); 13320a625fd2SDavid S. Miller if (err) { 13330a625fd2SDavid S. Miller list_del(&p->entry); 13340a625fd2SDavid S. Miller kfree(p); 13350a625fd2SDavid S. Miller } 13360a625fd2SDavid S. Miller return err; 13370a625fd2SDavid S. Miller } 13380a625fd2SDavid S. Miller 13390a625fd2SDavid S. Miller static int __devinit n2_register_algs(void) 13400a625fd2SDavid S. Miller { 13410a625fd2SDavid S. Miller int i, err = 0; 13420a625fd2SDavid S. Miller 13430a625fd2SDavid S. Miller mutex_lock(&spu_lock); 13440a625fd2SDavid S. Miller if (algs_registered++) 13450a625fd2SDavid S. Miller goto out; 13460a625fd2SDavid S. Miller 13470a625fd2SDavid S. Miller for (i = 0; i < NUM_HASH_TMPLS; i++) { 13480a625fd2SDavid S. Miller err = __n2_register_one_ahash(&hash_tmpls[i]); 13490a625fd2SDavid S. Miller if (err) { 13500a625fd2SDavid S. Miller __n2_unregister_algs(); 13510a625fd2SDavid S. Miller goto out; 13520a625fd2SDavid S. Miller } 13530a625fd2SDavid S. Miller } 13540a625fd2SDavid S. Miller for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 13550a625fd2SDavid S. Miller err = __n2_register_one_cipher(&cipher_tmpls[i]); 13560a625fd2SDavid S. Miller if (err) { 13570a625fd2SDavid S. Miller __n2_unregister_algs(); 13580a625fd2SDavid S. Miller goto out; 13590a625fd2SDavid S. Miller } 13600a625fd2SDavid S. Miller } 13610a625fd2SDavid S. Miller 13620a625fd2SDavid S. Miller out: 13630a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 13640a625fd2SDavid S. Miller return err; 13650a625fd2SDavid S. Miller } 13660a625fd2SDavid S. Miller 13670a625fd2SDavid S. Miller static void __exit n2_unregister_algs(void) 13680a625fd2SDavid S. Miller { 13690a625fd2SDavid S. Miller mutex_lock(&spu_lock); 13700a625fd2SDavid S. Miller if (!--algs_registered) 13710a625fd2SDavid S. Miller __n2_unregister_algs(); 13720a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 13730a625fd2SDavid S. Miller } 13740a625fd2SDavid S. Miller 13750a625fd2SDavid S. Miller /* To map CWQ queues to interrupt sources, the hypervisor API provides 13760a625fd2SDavid S. Miller * a devino. This isn't very useful to us because all of the 13770a625fd2SDavid S. Miller * interrupts listed in the of_device node have been translated to 13780a625fd2SDavid S. Miller * Linux virtual IRQ cookie numbers. 13790a625fd2SDavid S. Miller * 13800a625fd2SDavid S. Miller * So we have to back-translate, going through the 'intr' and 'ino' 13810a625fd2SDavid S. Miller * property tables of the n2cp MDESC node, matching it with the OF 13820a625fd2SDavid S. Miller * 'interrupts' property entries, in order to to figure out which 13830a625fd2SDavid S. Miller * devino goes to which already-translated IRQ. 13840a625fd2SDavid S. Miller */ 13850a625fd2SDavid S. Miller static int find_devino_index(struct of_device *dev, struct spu_mdesc_info *ip, 13860a625fd2SDavid S. Miller unsigned long dev_ino) 13870a625fd2SDavid S. Miller { 13880a625fd2SDavid S. Miller const unsigned int *dev_intrs; 13890a625fd2SDavid S. Miller unsigned int intr; 13900a625fd2SDavid S. Miller int i; 13910a625fd2SDavid S. Miller 13920a625fd2SDavid S. Miller for (i = 0; i < ip->num_intrs; i++) { 13930a625fd2SDavid S. Miller if (ip->ino_table[i].ino == dev_ino) 13940a625fd2SDavid S. Miller break; 13950a625fd2SDavid S. Miller } 13960a625fd2SDavid S. Miller if (i == ip->num_intrs) 13970a625fd2SDavid S. Miller return -ENODEV; 13980a625fd2SDavid S. Miller 13990a625fd2SDavid S. Miller intr = ip->ino_table[i].intr; 14000a625fd2SDavid S. Miller 1401ff6c7341SDavid S. Miller dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 14020a625fd2SDavid S. Miller if (!dev_intrs) 14030a625fd2SDavid S. Miller return -ENODEV; 14040a625fd2SDavid S. Miller 14050a625fd2SDavid S. Miller for (i = 0; i < dev->num_irqs; i++) { 14060a625fd2SDavid S. Miller if (dev_intrs[i] == intr) 14070a625fd2SDavid S. Miller return i; 14080a625fd2SDavid S. Miller } 14090a625fd2SDavid S. Miller 14100a625fd2SDavid S. Miller return -ENODEV; 14110a625fd2SDavid S. Miller } 14120a625fd2SDavid S. Miller 14130a625fd2SDavid S. Miller static int spu_map_ino(struct of_device *dev, struct spu_mdesc_info *ip, 14140a625fd2SDavid S. Miller const char *irq_name, struct spu_queue *p, 14150a625fd2SDavid S. Miller irq_handler_t handler) 14160a625fd2SDavid S. Miller { 14170a625fd2SDavid S. Miller unsigned long herr; 14180a625fd2SDavid S. Miller int index; 14190a625fd2SDavid S. Miller 14200a625fd2SDavid S. Miller herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 14210a625fd2SDavid S. Miller if (herr) 14220a625fd2SDavid S. Miller return -EINVAL; 14230a625fd2SDavid S. Miller 14240a625fd2SDavid S. Miller index = find_devino_index(dev, ip, p->devino); 14250a625fd2SDavid S. Miller if (index < 0) 14260a625fd2SDavid S. Miller return index; 14270a625fd2SDavid S. Miller 14280a625fd2SDavid S. Miller p->irq = dev->irqs[index]; 14290a625fd2SDavid S. Miller 14300a625fd2SDavid S. Miller sprintf(p->irq_name, "%s-%d", irq_name, index); 14310a625fd2SDavid S. Miller 14320a625fd2SDavid S. Miller return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM, 14330a625fd2SDavid S. Miller p->irq_name, p); 14340a625fd2SDavid S. Miller } 14350a625fd2SDavid S. Miller 14360a625fd2SDavid S. Miller static struct kmem_cache *queue_cache[2]; 14370a625fd2SDavid S. Miller 14380a625fd2SDavid S. Miller static void *new_queue(unsigned long q_type) 14390a625fd2SDavid S. Miller { 14400a625fd2SDavid S. Miller return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 14410a625fd2SDavid S. Miller } 14420a625fd2SDavid S. Miller 14430a625fd2SDavid S. Miller static void free_queue(void *p, unsigned long q_type) 14440a625fd2SDavid S. Miller { 14450a625fd2SDavid S. Miller return kmem_cache_free(queue_cache[q_type - 1], p); 14460a625fd2SDavid S. Miller } 14470a625fd2SDavid S. Miller 14480a625fd2SDavid S. Miller static int queue_cache_init(void) 14490a625fd2SDavid S. Miller { 14500a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 14510a625fd2SDavid S. Miller queue_cache[HV_NCS_QTYPE_MAU - 1] = 1452527b9525SDavid S. Miller kmem_cache_create("mau_queue", 14530a625fd2SDavid S. Miller (MAU_NUM_ENTRIES * 14540a625fd2SDavid S. Miller MAU_ENTRY_SIZE), 14550a625fd2SDavid S. Miller MAU_ENTRY_SIZE, 0, NULL); 14560a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 14570a625fd2SDavid S. Miller return -ENOMEM; 14580a625fd2SDavid S. Miller 14590a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 14600a625fd2SDavid S. Miller queue_cache[HV_NCS_QTYPE_CWQ - 1] = 14610a625fd2SDavid S. Miller kmem_cache_create("cwq_queue", 14620a625fd2SDavid S. Miller (CWQ_NUM_ENTRIES * 14630a625fd2SDavid S. Miller CWQ_ENTRY_SIZE), 14640a625fd2SDavid S. Miller CWQ_ENTRY_SIZE, 0, NULL); 14650a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 14660a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 14670a625fd2SDavid S. Miller return -ENOMEM; 14680a625fd2SDavid S. Miller } 14690a625fd2SDavid S. Miller return 0; 14700a625fd2SDavid S. Miller } 14710a625fd2SDavid S. Miller 14720a625fd2SDavid S. Miller static void queue_cache_destroy(void) 14730a625fd2SDavid S. Miller { 14740a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 14750a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 14760a625fd2SDavid S. Miller } 14770a625fd2SDavid S. Miller 14780a625fd2SDavid S. Miller static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 14790a625fd2SDavid S. Miller { 14800a625fd2SDavid S. Miller cpumask_var_t old_allowed; 14810a625fd2SDavid S. Miller unsigned long hv_ret; 14820a625fd2SDavid S. Miller 14830a625fd2SDavid S. Miller if (cpumask_empty(&p->sharing)) 14840a625fd2SDavid S. Miller return -EINVAL; 14850a625fd2SDavid S. Miller 14860a625fd2SDavid S. Miller if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) 14870a625fd2SDavid S. Miller return -ENOMEM; 14880a625fd2SDavid S. Miller 14890a625fd2SDavid S. Miller cpumask_copy(old_allowed, ¤t->cpus_allowed); 14900a625fd2SDavid S. Miller 14910a625fd2SDavid S. Miller set_cpus_allowed_ptr(current, &p->sharing); 14920a625fd2SDavid S. Miller 14930a625fd2SDavid S. Miller hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 14940a625fd2SDavid S. Miller CWQ_NUM_ENTRIES, &p->qhandle); 14950a625fd2SDavid S. Miller if (!hv_ret) 14960a625fd2SDavid S. Miller sun4v_ncs_sethead_marker(p->qhandle, 0); 14970a625fd2SDavid S. Miller 14980a625fd2SDavid S. Miller set_cpus_allowed_ptr(current, old_allowed); 14990a625fd2SDavid S. Miller 15000a625fd2SDavid S. Miller free_cpumask_var(old_allowed); 15010a625fd2SDavid S. Miller 15020a625fd2SDavid S. Miller return (hv_ret ? -EINVAL : 0); 15030a625fd2SDavid S. Miller } 15040a625fd2SDavid S. Miller 15050a625fd2SDavid S. Miller static int spu_queue_setup(struct spu_queue *p) 15060a625fd2SDavid S. Miller { 15070a625fd2SDavid S. Miller int err; 15080a625fd2SDavid S. Miller 15090a625fd2SDavid S. Miller p->q = new_queue(p->q_type); 15100a625fd2SDavid S. Miller if (!p->q) 15110a625fd2SDavid S. Miller return -ENOMEM; 15120a625fd2SDavid S. Miller 15130a625fd2SDavid S. Miller err = spu_queue_register(p, p->q_type); 15140a625fd2SDavid S. Miller if (err) { 15150a625fd2SDavid S. Miller free_queue(p->q, p->q_type); 15160a625fd2SDavid S. Miller p->q = NULL; 15170a625fd2SDavid S. Miller } 15180a625fd2SDavid S. Miller 15190a625fd2SDavid S. Miller return err; 15200a625fd2SDavid S. Miller } 15210a625fd2SDavid S. Miller 15220a625fd2SDavid S. Miller static void spu_queue_destroy(struct spu_queue *p) 15230a625fd2SDavid S. Miller { 15240a625fd2SDavid S. Miller unsigned long hv_ret; 15250a625fd2SDavid S. Miller 15260a625fd2SDavid S. Miller if (!p->q) 15270a625fd2SDavid S. Miller return; 15280a625fd2SDavid S. Miller 15290a625fd2SDavid S. Miller hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 15300a625fd2SDavid S. Miller 15310a625fd2SDavid S. Miller if (!hv_ret) 15320a625fd2SDavid S. Miller free_queue(p->q, p->q_type); 15330a625fd2SDavid S. Miller } 15340a625fd2SDavid S. Miller 15350a625fd2SDavid S. Miller static void spu_list_destroy(struct list_head *list) 15360a625fd2SDavid S. Miller { 15370a625fd2SDavid S. Miller struct spu_queue *p, *n; 15380a625fd2SDavid S. Miller 15390a625fd2SDavid S. Miller list_for_each_entry_safe(p, n, list, list) { 15400a625fd2SDavid S. Miller int i; 15410a625fd2SDavid S. Miller 15420a625fd2SDavid S. Miller for (i = 0; i < NR_CPUS; i++) { 15430a625fd2SDavid S. Miller if (cpu_to_cwq[i] == p) 15440a625fd2SDavid S. Miller cpu_to_cwq[i] = NULL; 15450a625fd2SDavid S. Miller } 15460a625fd2SDavid S. Miller 15470a625fd2SDavid S. Miller if (p->irq) { 15480a625fd2SDavid S. Miller free_irq(p->irq, p); 15490a625fd2SDavid S. Miller p->irq = 0; 15500a625fd2SDavid S. Miller } 15510a625fd2SDavid S. Miller spu_queue_destroy(p); 15520a625fd2SDavid S. Miller list_del(&p->list); 15530a625fd2SDavid S. Miller kfree(p); 15540a625fd2SDavid S. Miller } 15550a625fd2SDavid S. Miller } 15560a625fd2SDavid S. Miller 15570a625fd2SDavid S. Miller /* Walk the backward arcs of a CWQ 'exec-unit' node, 15580a625fd2SDavid S. Miller * gathering cpu membership information. 15590a625fd2SDavid S. Miller */ 15600a625fd2SDavid S. Miller static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 15610a625fd2SDavid S. Miller struct of_device *dev, 15620a625fd2SDavid S. Miller u64 node, struct spu_queue *p, 15630a625fd2SDavid S. Miller struct spu_queue **table) 15640a625fd2SDavid S. Miller { 15650a625fd2SDavid S. Miller u64 arc; 15660a625fd2SDavid S. Miller 15670a625fd2SDavid S. Miller mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 15680a625fd2SDavid S. Miller u64 tgt = mdesc_arc_target(mdesc, arc); 15690a625fd2SDavid S. Miller const char *name = mdesc_node_name(mdesc, tgt); 15700a625fd2SDavid S. Miller const u64 *id; 15710a625fd2SDavid S. Miller 15720a625fd2SDavid S. Miller if (strcmp(name, "cpu")) 15730a625fd2SDavid S. Miller continue; 15740a625fd2SDavid S. Miller id = mdesc_get_property(mdesc, tgt, "id", NULL); 15750a625fd2SDavid S. Miller if (table[*id] != NULL) { 15760a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: SPU cpu slot already set.\n", 1577ff6c7341SDavid S. Miller dev->dev.of_node->full_name); 15780a625fd2SDavid S. Miller return -EINVAL; 15790a625fd2SDavid S. Miller } 15800a625fd2SDavid S. Miller cpu_set(*id, p->sharing); 15810a625fd2SDavid S. Miller table[*id] = p; 15820a625fd2SDavid S. Miller } 15830a625fd2SDavid S. Miller return 0; 15840a625fd2SDavid S. Miller } 15850a625fd2SDavid S. Miller 15860a625fd2SDavid S. Miller /* Process an 'exec-unit' MDESC node of type 'cwq'. */ 15870a625fd2SDavid S. Miller static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 15880a625fd2SDavid S. Miller struct of_device *dev, struct mdesc_handle *mdesc, 15890a625fd2SDavid S. Miller u64 node, const char *iname, unsigned long q_type, 15900a625fd2SDavid S. Miller irq_handler_t handler, struct spu_queue **table) 15910a625fd2SDavid S. Miller { 15920a625fd2SDavid S. Miller struct spu_queue *p; 15930a625fd2SDavid S. Miller int err; 15940a625fd2SDavid S. Miller 15950a625fd2SDavid S. Miller p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 15960a625fd2SDavid S. Miller if (!p) { 15970a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n", 1598ff6c7341SDavid S. Miller dev->dev.of_node->full_name); 15990a625fd2SDavid S. Miller return -ENOMEM; 16000a625fd2SDavid S. Miller } 16010a625fd2SDavid S. Miller 16020a625fd2SDavid S. Miller cpus_clear(p->sharing); 16030a625fd2SDavid S. Miller spin_lock_init(&p->lock); 16040a625fd2SDavid S. Miller p->q_type = q_type; 16050a625fd2SDavid S. Miller INIT_LIST_HEAD(&p->jobs); 16060a625fd2SDavid S. Miller list_add(&p->list, list); 16070a625fd2SDavid S. Miller 16080a625fd2SDavid S. Miller err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 16090a625fd2SDavid S. Miller if (err) 16100a625fd2SDavid S. Miller return err; 16110a625fd2SDavid S. Miller 16120a625fd2SDavid S. Miller err = spu_queue_setup(p); 16130a625fd2SDavid S. Miller if (err) 16140a625fd2SDavid S. Miller return err; 16150a625fd2SDavid S. Miller 16160a625fd2SDavid S. Miller return spu_map_ino(dev, ip, iname, p, handler); 16170a625fd2SDavid S. Miller } 16180a625fd2SDavid S. Miller 16190a625fd2SDavid S. Miller static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct of_device *dev, 16200a625fd2SDavid S. Miller struct spu_mdesc_info *ip, struct list_head *list, 16210a625fd2SDavid S. Miller const char *exec_name, unsigned long q_type, 16220a625fd2SDavid S. Miller irq_handler_t handler, struct spu_queue **table) 16230a625fd2SDavid S. Miller { 16240a625fd2SDavid S. Miller int err = 0; 16250a625fd2SDavid S. Miller u64 node; 16260a625fd2SDavid S. Miller 16270a625fd2SDavid S. Miller mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 16280a625fd2SDavid S. Miller const char *type; 16290a625fd2SDavid S. Miller 16300a625fd2SDavid S. Miller type = mdesc_get_property(mdesc, node, "type", NULL); 16310a625fd2SDavid S. Miller if (!type || strcmp(type, exec_name)) 16320a625fd2SDavid S. Miller continue; 16330a625fd2SDavid S. Miller 16340a625fd2SDavid S. Miller err = handle_exec_unit(ip, list, dev, mdesc, node, 16350a625fd2SDavid S. Miller exec_name, q_type, handler, table); 16360a625fd2SDavid S. Miller if (err) { 16370a625fd2SDavid S. Miller spu_list_destroy(list); 16380a625fd2SDavid S. Miller break; 16390a625fd2SDavid S. Miller } 16400a625fd2SDavid S. Miller } 16410a625fd2SDavid S. Miller 16420a625fd2SDavid S. Miller return err; 16430a625fd2SDavid S. Miller } 16440a625fd2SDavid S. Miller 16450a625fd2SDavid S. Miller static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, 16460a625fd2SDavid S. Miller struct spu_mdesc_info *ip) 16470a625fd2SDavid S. Miller { 16480a625fd2SDavid S. Miller const u64 *intr, *ino; 16490a625fd2SDavid S. Miller int intr_len, ino_len; 16500a625fd2SDavid S. Miller int i; 16510a625fd2SDavid S. Miller 16520a625fd2SDavid S. Miller intr = mdesc_get_property(mdesc, node, "intr", &intr_len); 16530a625fd2SDavid S. Miller if (!intr) 16540a625fd2SDavid S. Miller return -ENODEV; 16550a625fd2SDavid S. Miller 16560a625fd2SDavid S. Miller ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 16570a625fd2SDavid S. Miller if (!intr) 16580a625fd2SDavid S. Miller return -ENODEV; 16590a625fd2SDavid S. Miller 16600a625fd2SDavid S. Miller if (intr_len != ino_len) 16610a625fd2SDavid S. Miller return -EINVAL; 16620a625fd2SDavid S. Miller 16630a625fd2SDavid S. Miller ip->num_intrs = intr_len / sizeof(u64); 16640a625fd2SDavid S. Miller ip->ino_table = kzalloc((sizeof(struct ino_blob) * 16650a625fd2SDavid S. Miller ip->num_intrs), 16660a625fd2SDavid S. Miller GFP_KERNEL); 16670a625fd2SDavid S. Miller if (!ip->ino_table) 16680a625fd2SDavid S. Miller return -ENOMEM; 16690a625fd2SDavid S. Miller 16700a625fd2SDavid S. Miller for (i = 0; i < ip->num_intrs; i++) { 16710a625fd2SDavid S. Miller struct ino_blob *b = &ip->ino_table[i]; 16720a625fd2SDavid S. Miller b->intr = intr[i]; 16730a625fd2SDavid S. Miller b->ino = ino[i]; 16740a625fd2SDavid S. Miller } 16750a625fd2SDavid S. Miller 16760a625fd2SDavid S. Miller return 0; 16770a625fd2SDavid S. Miller } 16780a625fd2SDavid S. Miller 16790a625fd2SDavid S. Miller static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc, 16800a625fd2SDavid S. Miller struct of_device *dev, 16810a625fd2SDavid S. Miller struct spu_mdesc_info *ip, 16820a625fd2SDavid S. Miller const char *node_name) 16830a625fd2SDavid S. Miller { 16840a625fd2SDavid S. Miller const unsigned int *reg; 16850a625fd2SDavid S. Miller u64 node; 16860a625fd2SDavid S. Miller 1687ff6c7341SDavid S. Miller reg = of_get_property(dev->dev.of_node, "reg", NULL); 16880a625fd2SDavid S. Miller if (!reg) 16890a625fd2SDavid S. Miller return -ENODEV; 16900a625fd2SDavid S. Miller 16910a625fd2SDavid S. Miller mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 16920a625fd2SDavid S. Miller const char *name; 16930a625fd2SDavid S. Miller const u64 *chdl; 16940a625fd2SDavid S. Miller 16950a625fd2SDavid S. Miller name = mdesc_get_property(mdesc, node, "name", NULL); 16960a625fd2SDavid S. Miller if (!name || strcmp(name, node_name)) 16970a625fd2SDavid S. Miller continue; 16980a625fd2SDavid S. Miller chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 16990a625fd2SDavid S. Miller if (!chdl || (*chdl != *reg)) 17000a625fd2SDavid S. Miller continue; 17010a625fd2SDavid S. Miller ip->cfg_handle = *chdl; 17020a625fd2SDavid S. Miller return get_irq_props(mdesc, node, ip); 17030a625fd2SDavid S. Miller } 17040a625fd2SDavid S. Miller 17050a625fd2SDavid S. Miller return -ENODEV; 17060a625fd2SDavid S. Miller } 17070a625fd2SDavid S. Miller 17080a625fd2SDavid S. Miller static unsigned long n2_spu_hvapi_major; 17090a625fd2SDavid S. Miller static unsigned long n2_spu_hvapi_minor; 17100a625fd2SDavid S. Miller 17110a625fd2SDavid S. Miller static int __devinit n2_spu_hvapi_register(void) 17120a625fd2SDavid S. Miller { 17130a625fd2SDavid S. Miller int err; 17140a625fd2SDavid S. Miller 17150a625fd2SDavid S. Miller n2_spu_hvapi_major = 2; 17160a625fd2SDavid S. Miller n2_spu_hvapi_minor = 0; 17170a625fd2SDavid S. Miller 17180a625fd2SDavid S. Miller err = sun4v_hvapi_register(HV_GRP_NCS, 17190a625fd2SDavid S. Miller n2_spu_hvapi_major, 17200a625fd2SDavid S. Miller &n2_spu_hvapi_minor); 17210a625fd2SDavid S. Miller 17220a625fd2SDavid S. Miller if (!err) 17230a625fd2SDavid S. Miller pr_info("Registered NCS HVAPI version %lu.%lu\n", 17240a625fd2SDavid S. Miller n2_spu_hvapi_major, 17250a625fd2SDavid S. Miller n2_spu_hvapi_minor); 17260a625fd2SDavid S. Miller 17270a625fd2SDavid S. Miller return err; 17280a625fd2SDavid S. Miller } 17290a625fd2SDavid S. Miller 17300a625fd2SDavid S. Miller static void n2_spu_hvapi_unregister(void) 17310a625fd2SDavid S. Miller { 17320a625fd2SDavid S. Miller sun4v_hvapi_unregister(HV_GRP_NCS); 17330a625fd2SDavid S. Miller } 17340a625fd2SDavid S. Miller 17350a625fd2SDavid S. Miller static int global_ref; 17360a625fd2SDavid S. Miller 17370a625fd2SDavid S. Miller static int __devinit grab_global_resources(void) 17380a625fd2SDavid S. Miller { 17390a625fd2SDavid S. Miller int err = 0; 17400a625fd2SDavid S. Miller 17410a625fd2SDavid S. Miller mutex_lock(&spu_lock); 17420a625fd2SDavid S. Miller 17430a625fd2SDavid S. Miller if (global_ref++) 17440a625fd2SDavid S. Miller goto out; 17450a625fd2SDavid S. Miller 17460a625fd2SDavid S. Miller err = n2_spu_hvapi_register(); 17470a625fd2SDavid S. Miller if (err) 17480a625fd2SDavid S. Miller goto out; 17490a625fd2SDavid S. Miller 17500a625fd2SDavid S. Miller err = queue_cache_init(); 17510a625fd2SDavid S. Miller if (err) 17520a625fd2SDavid S. Miller goto out_hvapi_release; 17530a625fd2SDavid S. Miller 17540a625fd2SDavid S. Miller err = -ENOMEM; 17550a625fd2SDavid S. Miller cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 17560a625fd2SDavid S. Miller GFP_KERNEL); 17570a625fd2SDavid S. Miller if (!cpu_to_cwq) 17580a625fd2SDavid S. Miller goto out_queue_cache_destroy; 17590a625fd2SDavid S. Miller 17600a625fd2SDavid S. Miller cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 17610a625fd2SDavid S. Miller GFP_KERNEL); 17620a625fd2SDavid S. Miller if (!cpu_to_mau) 17630a625fd2SDavid S. Miller goto out_free_cwq_table; 17640a625fd2SDavid S. Miller 17650a625fd2SDavid S. Miller err = 0; 17660a625fd2SDavid S. Miller 17670a625fd2SDavid S. Miller out: 17680a625fd2SDavid S. Miller if (err) 17690a625fd2SDavid S. Miller global_ref--; 17700a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 17710a625fd2SDavid S. Miller return err; 17720a625fd2SDavid S. Miller 17730a625fd2SDavid S. Miller out_free_cwq_table: 17740a625fd2SDavid S. Miller kfree(cpu_to_cwq); 17750a625fd2SDavid S. Miller cpu_to_cwq = NULL; 17760a625fd2SDavid S. Miller 17770a625fd2SDavid S. Miller out_queue_cache_destroy: 17780a625fd2SDavid S. Miller queue_cache_destroy(); 17790a625fd2SDavid S. Miller 17800a625fd2SDavid S. Miller out_hvapi_release: 17810a625fd2SDavid S. Miller n2_spu_hvapi_unregister(); 17820a625fd2SDavid S. Miller goto out; 17830a625fd2SDavid S. Miller } 17840a625fd2SDavid S. Miller 17850a625fd2SDavid S. Miller static void release_global_resources(void) 17860a625fd2SDavid S. Miller { 17870a625fd2SDavid S. Miller mutex_lock(&spu_lock); 17880a625fd2SDavid S. Miller if (!--global_ref) { 17890a625fd2SDavid S. Miller kfree(cpu_to_cwq); 17900a625fd2SDavid S. Miller cpu_to_cwq = NULL; 17910a625fd2SDavid S. Miller 17920a625fd2SDavid S. Miller kfree(cpu_to_mau); 17930a625fd2SDavid S. Miller cpu_to_mau = NULL; 17940a625fd2SDavid S. Miller 17950a625fd2SDavid S. Miller queue_cache_destroy(); 17960a625fd2SDavid S. Miller n2_spu_hvapi_unregister(); 17970a625fd2SDavid S. Miller } 17980a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 17990a625fd2SDavid S. Miller } 18000a625fd2SDavid S. Miller 18010a625fd2SDavid S. Miller static struct n2_crypto * __devinit alloc_n2cp(void) 18020a625fd2SDavid S. Miller { 18030a625fd2SDavid S. Miller struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 18040a625fd2SDavid S. Miller 18050a625fd2SDavid S. Miller if (np) 18060a625fd2SDavid S. Miller INIT_LIST_HEAD(&np->cwq_list); 18070a625fd2SDavid S. Miller 18080a625fd2SDavid S. Miller return np; 18090a625fd2SDavid S. Miller } 18100a625fd2SDavid S. Miller 18110a625fd2SDavid S. Miller static void free_n2cp(struct n2_crypto *np) 18120a625fd2SDavid S. Miller { 18130a625fd2SDavid S. Miller if (np->cwq_info.ino_table) { 18140a625fd2SDavid S. Miller kfree(np->cwq_info.ino_table); 18150a625fd2SDavid S. Miller np->cwq_info.ino_table = NULL; 18160a625fd2SDavid S. Miller } 18170a625fd2SDavid S. Miller 18180a625fd2SDavid S. Miller kfree(np); 18190a625fd2SDavid S. Miller } 18200a625fd2SDavid S. Miller 18210a625fd2SDavid S. Miller static void __devinit n2_spu_driver_version(void) 18220a625fd2SDavid S. Miller { 18230a625fd2SDavid S. Miller static int n2_spu_version_printed; 18240a625fd2SDavid S. Miller 18250a625fd2SDavid S. Miller if (n2_spu_version_printed++ == 0) 18260a625fd2SDavid S. Miller pr_info("%s", version); 18270a625fd2SDavid S. Miller } 18280a625fd2SDavid S. Miller 18290a625fd2SDavid S. Miller static int __devinit n2_crypto_probe(struct of_device *dev, 18300a625fd2SDavid S. Miller const struct of_device_id *match) 18310a625fd2SDavid S. Miller { 18320a625fd2SDavid S. Miller struct mdesc_handle *mdesc; 18330a625fd2SDavid S. Miller const char *full_name; 18340a625fd2SDavid S. Miller struct n2_crypto *np; 18350a625fd2SDavid S. Miller int err; 18360a625fd2SDavid S. Miller 18370a625fd2SDavid S. Miller n2_spu_driver_version(); 18380a625fd2SDavid S. Miller 1839ff6c7341SDavid S. Miller full_name = dev->dev.of_node->full_name; 18400a625fd2SDavid S. Miller pr_info("Found N2CP at %s\n", full_name); 18410a625fd2SDavid S. Miller 18420a625fd2SDavid S. Miller np = alloc_n2cp(); 18430a625fd2SDavid S. Miller if (!np) { 18440a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n", 18450a625fd2SDavid S. Miller full_name); 18460a625fd2SDavid S. Miller return -ENOMEM; 18470a625fd2SDavid S. Miller } 18480a625fd2SDavid S. Miller 18490a625fd2SDavid S. Miller err = grab_global_resources(); 18500a625fd2SDavid S. Miller if (err) { 18510a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab " 18520a625fd2SDavid S. Miller "global resources.\n", full_name); 18530a625fd2SDavid S. Miller goto out_free_n2cp; 18540a625fd2SDavid S. Miller } 18550a625fd2SDavid S. Miller 18560a625fd2SDavid S. Miller mdesc = mdesc_grab(); 18570a625fd2SDavid S. Miller 18580a625fd2SDavid S. Miller if (!mdesc) { 18590a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", 18600a625fd2SDavid S. Miller full_name); 18610a625fd2SDavid S. Miller err = -ENODEV; 18620a625fd2SDavid S. Miller goto out_free_global; 18630a625fd2SDavid S. Miller } 18640a625fd2SDavid S. Miller err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 18650a625fd2SDavid S. Miller if (err) { 18660a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", 18670a625fd2SDavid S. Miller full_name); 18680a625fd2SDavid S. Miller mdesc_release(mdesc); 18690a625fd2SDavid S. Miller goto out_free_global; 18700a625fd2SDavid S. Miller } 18710a625fd2SDavid S. Miller 18720a625fd2SDavid S. Miller err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 18730a625fd2SDavid S. Miller "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 18740a625fd2SDavid S. Miller cpu_to_cwq); 18750a625fd2SDavid S. Miller mdesc_release(mdesc); 18760a625fd2SDavid S. Miller 18770a625fd2SDavid S. Miller if (err) { 18780a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n", 18790a625fd2SDavid S. Miller full_name); 18800a625fd2SDavid S. Miller goto out_free_global; 18810a625fd2SDavid S. Miller } 18820a625fd2SDavid S. Miller 18830a625fd2SDavid S. Miller err = n2_register_algs(); 18840a625fd2SDavid S. Miller if (err) { 18850a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to register algorithms.\n", 18860a625fd2SDavid S. Miller full_name); 18870a625fd2SDavid S. Miller goto out_free_spu_list; 18880a625fd2SDavid S. Miller } 18890a625fd2SDavid S. Miller 18900a625fd2SDavid S. Miller dev_set_drvdata(&dev->dev, np); 18910a625fd2SDavid S. Miller 18920a625fd2SDavid S. Miller return 0; 18930a625fd2SDavid S. Miller 18940a625fd2SDavid S. Miller out_free_spu_list: 18950a625fd2SDavid S. Miller spu_list_destroy(&np->cwq_list); 18960a625fd2SDavid S. Miller 18970a625fd2SDavid S. Miller out_free_global: 18980a625fd2SDavid S. Miller release_global_resources(); 18990a625fd2SDavid S. Miller 19000a625fd2SDavid S. Miller out_free_n2cp: 19010a625fd2SDavid S. Miller free_n2cp(np); 19020a625fd2SDavid S. Miller 19030a625fd2SDavid S. Miller return err; 19040a625fd2SDavid S. Miller } 19050a625fd2SDavid S. Miller 19060a625fd2SDavid S. Miller static int __devexit n2_crypto_remove(struct of_device *dev) 19070a625fd2SDavid S. Miller { 19080a625fd2SDavid S. Miller struct n2_crypto *np = dev_get_drvdata(&dev->dev); 19090a625fd2SDavid S. Miller 19100a625fd2SDavid S. Miller n2_unregister_algs(); 19110a625fd2SDavid S. Miller 19120a625fd2SDavid S. Miller spu_list_destroy(&np->cwq_list); 19130a625fd2SDavid S. Miller 19140a625fd2SDavid S. Miller release_global_resources(); 19150a625fd2SDavid S. Miller 19160a625fd2SDavid S. Miller free_n2cp(np); 19170a625fd2SDavid S. Miller 19180a625fd2SDavid S. Miller return 0; 19190a625fd2SDavid S. Miller } 19200a625fd2SDavid S. Miller 19210a625fd2SDavid S. Miller static struct n2_mau * __devinit alloc_ncp(void) 19220a625fd2SDavid S. Miller { 19230a625fd2SDavid S. Miller struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 19240a625fd2SDavid S. Miller 19250a625fd2SDavid S. Miller if (mp) 19260a625fd2SDavid S. Miller INIT_LIST_HEAD(&mp->mau_list); 19270a625fd2SDavid S. Miller 19280a625fd2SDavid S. Miller return mp; 19290a625fd2SDavid S. Miller } 19300a625fd2SDavid S. Miller 19310a625fd2SDavid S. Miller static void free_ncp(struct n2_mau *mp) 19320a625fd2SDavid S. Miller { 19330a625fd2SDavid S. Miller if (mp->mau_info.ino_table) { 19340a625fd2SDavid S. Miller kfree(mp->mau_info.ino_table); 19350a625fd2SDavid S. Miller mp->mau_info.ino_table = NULL; 19360a625fd2SDavid S. Miller } 19370a625fd2SDavid S. Miller 19380a625fd2SDavid S. Miller kfree(mp); 19390a625fd2SDavid S. Miller } 19400a625fd2SDavid S. Miller 19410a625fd2SDavid S. Miller static int __devinit n2_mau_probe(struct of_device *dev, 19420a625fd2SDavid S. Miller const struct of_device_id *match) 19430a625fd2SDavid S. Miller { 19440a625fd2SDavid S. Miller struct mdesc_handle *mdesc; 19450a625fd2SDavid S. Miller const char *full_name; 19460a625fd2SDavid S. Miller struct n2_mau *mp; 19470a625fd2SDavid S. Miller int err; 19480a625fd2SDavid S. Miller 19490a625fd2SDavid S. Miller n2_spu_driver_version(); 19500a625fd2SDavid S. Miller 1951ff6c7341SDavid S. Miller full_name = dev->dev.of_node->full_name; 19520a625fd2SDavid S. Miller pr_info("Found NCP at %s\n", full_name); 19530a625fd2SDavid S. Miller 19540a625fd2SDavid S. Miller mp = alloc_ncp(); 19550a625fd2SDavid S. Miller if (!mp) { 19560a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to allocate ncp.\n", 19570a625fd2SDavid S. Miller full_name); 19580a625fd2SDavid S. Miller return -ENOMEM; 19590a625fd2SDavid S. Miller } 19600a625fd2SDavid S. Miller 19610a625fd2SDavid S. Miller err = grab_global_resources(); 19620a625fd2SDavid S. Miller if (err) { 19630a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab " 19640a625fd2SDavid S. Miller "global resources.\n", full_name); 19650a625fd2SDavid S. Miller goto out_free_ncp; 19660a625fd2SDavid S. Miller } 19670a625fd2SDavid S. Miller 19680a625fd2SDavid S. Miller mdesc = mdesc_grab(); 19690a625fd2SDavid S. Miller 19700a625fd2SDavid S. Miller if (!mdesc) { 19710a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab MDESC.\n", 19720a625fd2SDavid S. Miller full_name); 19730a625fd2SDavid S. Miller err = -ENODEV; 19740a625fd2SDavid S. Miller goto out_free_global; 19750a625fd2SDavid S. Miller } 19760a625fd2SDavid S. Miller 19770a625fd2SDavid S. Miller err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 19780a625fd2SDavid S. Miller if (err) { 19790a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n", 19800a625fd2SDavid S. Miller full_name); 19810a625fd2SDavid S. Miller mdesc_release(mdesc); 19820a625fd2SDavid S. Miller goto out_free_global; 19830a625fd2SDavid S. Miller } 19840a625fd2SDavid S. Miller 19850a625fd2SDavid S. Miller err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 19860a625fd2SDavid S. Miller "mau", HV_NCS_QTYPE_MAU, mau_intr, 19870a625fd2SDavid S. Miller cpu_to_mau); 19880a625fd2SDavid S. Miller mdesc_release(mdesc); 19890a625fd2SDavid S. Miller 19900a625fd2SDavid S. Miller if (err) { 19910a625fd2SDavid S. Miller dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n", 19920a625fd2SDavid S. Miller full_name); 19930a625fd2SDavid S. Miller goto out_free_global; 19940a625fd2SDavid S. Miller } 19950a625fd2SDavid S. Miller 19960a625fd2SDavid S. Miller dev_set_drvdata(&dev->dev, mp); 19970a625fd2SDavid S. Miller 19980a625fd2SDavid S. Miller return 0; 19990a625fd2SDavid S. Miller 20000a625fd2SDavid S. Miller out_free_global: 20010a625fd2SDavid S. Miller release_global_resources(); 20020a625fd2SDavid S. Miller 20030a625fd2SDavid S. Miller out_free_ncp: 20040a625fd2SDavid S. Miller free_ncp(mp); 20050a625fd2SDavid S. Miller 20060a625fd2SDavid S. Miller return err; 20070a625fd2SDavid S. Miller } 20080a625fd2SDavid S. Miller 20090a625fd2SDavid S. Miller static int __devexit n2_mau_remove(struct of_device *dev) 20100a625fd2SDavid S. Miller { 20110a625fd2SDavid S. Miller struct n2_mau *mp = dev_get_drvdata(&dev->dev); 20120a625fd2SDavid S. Miller 20130a625fd2SDavid S. Miller spu_list_destroy(&mp->mau_list); 20140a625fd2SDavid S. Miller 20150a625fd2SDavid S. Miller release_global_resources(); 20160a625fd2SDavid S. Miller 20170a625fd2SDavid S. Miller free_ncp(mp); 20180a625fd2SDavid S. Miller 20190a625fd2SDavid S. Miller return 0; 20200a625fd2SDavid S. Miller } 20210a625fd2SDavid S. Miller 20220a625fd2SDavid S. Miller static struct of_device_id n2_crypto_match[] = { 20230a625fd2SDavid S. Miller { 20240a625fd2SDavid S. Miller .name = "n2cp", 20250a625fd2SDavid S. Miller .compatible = "SUNW,n2-cwq", 20260a625fd2SDavid S. Miller }, 20270a625fd2SDavid S. Miller { 20280a625fd2SDavid S. Miller .name = "n2cp", 20290a625fd2SDavid S. Miller .compatible = "SUNW,vf-cwq", 20300a625fd2SDavid S. Miller }, 20310a625fd2SDavid S. Miller {}, 20320a625fd2SDavid S. Miller }; 20330a625fd2SDavid S. Miller 20340a625fd2SDavid S. Miller MODULE_DEVICE_TABLE(of, n2_crypto_match); 20350a625fd2SDavid S. Miller 20360a625fd2SDavid S. Miller static struct of_platform_driver n2_crypto_driver = { 2037ff6c7341SDavid S. Miller .driver = { 20380a625fd2SDavid S. Miller .name = "n2cp", 2039ff6c7341SDavid S. Miller .owner = THIS_MODULE, 2040ff6c7341SDavid S. Miller .of_match_table = n2_crypto_match, 2041ff6c7341SDavid S. Miller }, 20420a625fd2SDavid S. Miller .probe = n2_crypto_probe, 20430a625fd2SDavid S. Miller .remove = __devexit_p(n2_crypto_remove), 20440a625fd2SDavid S. Miller }; 20450a625fd2SDavid S. Miller 20460a625fd2SDavid S. Miller static struct of_device_id n2_mau_match[] = { 20470a625fd2SDavid S. Miller { 20480a625fd2SDavid S. Miller .name = "ncp", 20490a625fd2SDavid S. Miller .compatible = "SUNW,n2-mau", 20500a625fd2SDavid S. Miller }, 20510a625fd2SDavid S. Miller { 20520a625fd2SDavid S. Miller .name = "ncp", 20530a625fd2SDavid S. Miller .compatible = "SUNW,vf-mau", 20540a625fd2SDavid S. Miller }, 20550a625fd2SDavid S. Miller {}, 20560a625fd2SDavid S. Miller }; 20570a625fd2SDavid S. Miller 20580a625fd2SDavid S. Miller MODULE_DEVICE_TABLE(of, n2_mau_match); 20590a625fd2SDavid S. Miller 20600a625fd2SDavid S. Miller static struct of_platform_driver n2_mau_driver = { 2061ff6c7341SDavid S. Miller .driver = { 20620a625fd2SDavid S. Miller .name = "ncp", 2063ff6c7341SDavid S. Miller .owner = THIS_MODULE, 2064ff6c7341SDavid S. Miller .of_match_table = n2_mau_match, 2065ff6c7341SDavid S. Miller }, 20660a625fd2SDavid S. Miller .probe = n2_mau_probe, 20670a625fd2SDavid S. Miller .remove = __devexit_p(n2_mau_remove), 20680a625fd2SDavid S. Miller }; 20690a625fd2SDavid S. Miller 20700a625fd2SDavid S. Miller static int __init n2_init(void) 20710a625fd2SDavid S. Miller { 20720a625fd2SDavid S. Miller int err = of_register_driver(&n2_crypto_driver, &of_bus_type); 20730a625fd2SDavid S. Miller 20740a625fd2SDavid S. Miller if (!err) { 20750a625fd2SDavid S. Miller err = of_register_driver(&n2_mau_driver, &of_bus_type); 20760a625fd2SDavid S. Miller if (err) 20770a625fd2SDavid S. Miller of_unregister_driver(&n2_crypto_driver); 20780a625fd2SDavid S. Miller } 20790a625fd2SDavid S. Miller return err; 20800a625fd2SDavid S. Miller } 20810a625fd2SDavid S. Miller 20820a625fd2SDavid S. Miller static void __exit n2_exit(void) 20830a625fd2SDavid S. Miller { 20840a625fd2SDavid S. Miller of_unregister_driver(&n2_mau_driver); 20850a625fd2SDavid S. Miller of_unregister_driver(&n2_crypto_driver); 20860a625fd2SDavid S. Miller } 20870a625fd2SDavid S. Miller 20880a625fd2SDavid S. Miller module_init(n2_init); 20890a625fd2SDavid S. Miller module_exit(n2_exit); 2090