109c434b8SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 20a625fd2SDavid S. Miller /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 30a625fd2SDavid S. Miller * 4eb7caf35SDavid S. Miller * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> 50a625fd2SDavid S. Miller */ 60a625fd2SDavid S. Miller 70a625fd2SDavid S. Miller #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 80a625fd2SDavid S. Miller 90a625fd2SDavid S. Miller #include <linux/kernel.h> 100a625fd2SDavid S. Miller #include <linux/module.h> 110a625fd2SDavid S. Miller #include <linux/of.h> 120a625fd2SDavid S. Miller #include <linux/of_device.h> 130a625fd2SDavid S. Miller #include <linux/cpumask.h> 140a625fd2SDavid S. Miller #include <linux/slab.h> 150a625fd2SDavid S. Miller #include <linux/interrupt.h> 160a625fd2SDavid S. Miller #include <linux/crypto.h> 170a625fd2SDavid S. Miller #include <crypto/md5.h> 180a625fd2SDavid S. Miller #include <crypto/sha.h> 190a625fd2SDavid S. Miller #include <crypto/aes.h> 200a625fd2SDavid S. Miller #include <crypto/des.h> 210a625fd2SDavid S. Miller #include <linux/mutex.h> 220a625fd2SDavid S. Miller #include <linux/delay.h> 230a625fd2SDavid S. Miller #include <linux/sched.h> 240a625fd2SDavid S. Miller 250a625fd2SDavid S. Miller #include <crypto/internal/hash.h> 260a625fd2SDavid S. Miller #include <crypto/scatterwalk.h> 270a625fd2SDavid S. Miller #include <crypto/algapi.h> 280a625fd2SDavid S. Miller 290a625fd2SDavid S. Miller #include <asm/hypervisor.h> 300a625fd2SDavid S. Miller #include <asm/mdesc.h> 310a625fd2SDavid S. Miller 320a625fd2SDavid S. Miller #include "n2_core.h" 330a625fd2SDavid S. Miller 340a625fd2SDavid S. Miller #define DRV_MODULE_NAME "n2_crypto" 35eb7caf35SDavid S. Miller #define DRV_MODULE_VERSION "0.2" 36eb7caf35SDavid S. Miller #define DRV_MODULE_RELDATE "July 28, 2011" 370a625fd2SDavid S. Miller 3850826874SLABBE Corentin static const char version[] = 390a625fd2SDavid S. Miller DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 400a625fd2SDavid S. Miller 410a625fd2SDavid S. Miller MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 420a625fd2SDavid S. Miller MODULE_DESCRIPTION("Niagara2 Crypto driver"); 430a625fd2SDavid S. Miller MODULE_LICENSE("GPL"); 440a625fd2SDavid S. Miller MODULE_VERSION(DRV_MODULE_VERSION); 450a625fd2SDavid S. Miller 4610803624SDavid S. Miller #define N2_CRA_PRIORITY 200 470a625fd2SDavid S. Miller 480a625fd2SDavid S. Miller static DEFINE_MUTEX(spu_lock); 490a625fd2SDavid S. Miller 500a625fd2SDavid S. Miller struct spu_queue { 510a625fd2SDavid S. Miller cpumask_t sharing; 520a625fd2SDavid S. Miller unsigned long qhandle; 530a625fd2SDavid S. Miller 540a625fd2SDavid S. Miller spinlock_t lock; 550a625fd2SDavid S. Miller u8 q_type; 560a625fd2SDavid S. Miller void *q; 570a625fd2SDavid S. Miller unsigned long head; 580a625fd2SDavid S. Miller unsigned long tail; 590a625fd2SDavid S. Miller struct list_head jobs; 600a625fd2SDavid S. Miller 610a625fd2SDavid S. Miller unsigned long devino; 620a625fd2SDavid S. Miller 630a625fd2SDavid S. Miller char irq_name[32]; 640a625fd2SDavid S. Miller unsigned int irq; 650a625fd2SDavid S. Miller 660a625fd2SDavid S. Miller struct list_head list; 670a625fd2SDavid S. Miller }; 680a625fd2SDavid S. Miller 6973810a06SThomas Gleixner struct spu_qreg { 7073810a06SThomas Gleixner struct spu_queue *queue; 7173810a06SThomas Gleixner unsigned long type; 7273810a06SThomas Gleixner }; 7373810a06SThomas Gleixner 740a625fd2SDavid S. Miller static struct spu_queue **cpu_to_cwq; 750a625fd2SDavid S. Miller static struct spu_queue **cpu_to_mau; 760a625fd2SDavid S. Miller 770a625fd2SDavid S. Miller static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 780a625fd2SDavid S. Miller { 790a625fd2SDavid S. Miller if (q->q_type == HV_NCS_QTYPE_MAU) { 800a625fd2SDavid S. Miller off += MAU_ENTRY_SIZE; 810a625fd2SDavid S. Miller if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 820a625fd2SDavid S. Miller off = 0; 830a625fd2SDavid S. Miller } else { 840a625fd2SDavid S. Miller off += CWQ_ENTRY_SIZE; 850a625fd2SDavid S. Miller if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 860a625fd2SDavid S. Miller off = 0; 870a625fd2SDavid S. Miller } 880a625fd2SDavid S. Miller return off; 890a625fd2SDavid S. Miller } 900a625fd2SDavid S. Miller 910a625fd2SDavid S. Miller struct n2_request_common { 920a625fd2SDavid S. Miller struct list_head entry; 930a625fd2SDavid S. Miller unsigned int offset; 940a625fd2SDavid S. Miller }; 950a625fd2SDavid S. Miller #define OFFSET_NOT_RUNNING (~(unsigned int)0) 960a625fd2SDavid S. Miller 970a625fd2SDavid S. Miller /* An async job request records the final tail value it used in 980a625fd2SDavid S. Miller * n2_request_common->offset, test to see if that offset is in 990a625fd2SDavid S. Miller * the range old_head, new_head, inclusive. 1000a625fd2SDavid S. Miller */ 1010a625fd2SDavid S. Miller static inline bool job_finished(struct spu_queue *q, unsigned int offset, 1020a625fd2SDavid S. Miller unsigned long old_head, unsigned long new_head) 1030a625fd2SDavid S. Miller { 1040a625fd2SDavid S. Miller if (old_head <= new_head) { 1050a625fd2SDavid S. Miller if (offset > old_head && offset <= new_head) 1060a625fd2SDavid S. Miller return true; 1070a625fd2SDavid S. Miller } else { 1080a625fd2SDavid S. Miller if (offset > old_head || offset <= new_head) 1090a625fd2SDavid S. Miller return true; 1100a625fd2SDavid S. Miller } 1110a625fd2SDavid S. Miller return false; 1120a625fd2SDavid S. Miller } 1130a625fd2SDavid S. Miller 1140a625fd2SDavid S. Miller /* When the HEAD marker is unequal to the actual HEAD, we get 1150a625fd2SDavid S. Miller * a virtual device INO interrupt. We should process the 1160a625fd2SDavid S. Miller * completed CWQ entries and adjust the HEAD marker to clear 1170a625fd2SDavid S. Miller * the IRQ. 1180a625fd2SDavid S. Miller */ 1190a625fd2SDavid S. Miller static irqreturn_t cwq_intr(int irq, void *dev_id) 1200a625fd2SDavid S. Miller { 1210a625fd2SDavid S. Miller unsigned long off, new_head, hv_ret; 1220a625fd2SDavid S. Miller struct spu_queue *q = dev_id; 1230a625fd2SDavid S. Miller 1240a625fd2SDavid S. Miller pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 1250a625fd2SDavid S. Miller smp_processor_id(), q->qhandle); 1260a625fd2SDavid S. Miller 1270a625fd2SDavid S. Miller spin_lock(&q->lock); 1280a625fd2SDavid S. Miller 1290a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 1300a625fd2SDavid S. Miller 1310a625fd2SDavid S. Miller pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 1320a625fd2SDavid S. Miller smp_processor_id(), new_head, hv_ret); 1330a625fd2SDavid S. Miller 1340a625fd2SDavid S. Miller for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 1350a625fd2SDavid S. Miller /* XXX ... XXX */ 1360a625fd2SDavid S. Miller } 1370a625fd2SDavid S. Miller 1380a625fd2SDavid S. Miller hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 1390a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 1400a625fd2SDavid S. Miller q->head = new_head; 1410a625fd2SDavid S. Miller 1420a625fd2SDavid S. Miller spin_unlock(&q->lock); 1430a625fd2SDavid S. Miller 1440a625fd2SDavid S. Miller return IRQ_HANDLED; 1450a625fd2SDavid S. Miller } 1460a625fd2SDavid S. Miller 1470a625fd2SDavid S. Miller static irqreturn_t mau_intr(int irq, void *dev_id) 1480a625fd2SDavid S. Miller { 1490a625fd2SDavid S. Miller struct spu_queue *q = dev_id; 1500a625fd2SDavid S. Miller unsigned long head, hv_ret; 1510a625fd2SDavid S. Miller 1520a625fd2SDavid S. Miller spin_lock(&q->lock); 1530a625fd2SDavid S. Miller 1540a625fd2SDavid S. Miller pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 1550a625fd2SDavid S. Miller smp_processor_id(), q->qhandle); 1560a625fd2SDavid S. Miller 1570a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 1580a625fd2SDavid S. Miller 1590a625fd2SDavid S. Miller pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 1600a625fd2SDavid S. Miller smp_processor_id(), head, hv_ret); 1610a625fd2SDavid S. Miller 1620a625fd2SDavid S. Miller sun4v_ncs_sethead_marker(q->qhandle, head); 1630a625fd2SDavid S. Miller 1640a625fd2SDavid S. Miller spin_unlock(&q->lock); 1650a625fd2SDavid S. Miller 1660a625fd2SDavid S. Miller return IRQ_HANDLED; 1670a625fd2SDavid S. Miller } 1680a625fd2SDavid S. Miller 1690a625fd2SDavid S. Miller static void *spu_queue_next(struct spu_queue *q, void *cur) 1700a625fd2SDavid S. Miller { 1710a625fd2SDavid S. Miller return q->q + spu_next_offset(q, cur - q->q); 1720a625fd2SDavid S. Miller } 1730a625fd2SDavid S. Miller 1740a625fd2SDavid S. Miller static int spu_queue_num_free(struct spu_queue *q) 1750a625fd2SDavid S. Miller { 1760a625fd2SDavid S. Miller unsigned long head = q->head; 1770a625fd2SDavid S. Miller unsigned long tail = q->tail; 1780a625fd2SDavid S. Miller unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 1790a625fd2SDavid S. Miller unsigned long diff; 1800a625fd2SDavid S. Miller 1810a625fd2SDavid S. Miller if (head > tail) 1820a625fd2SDavid S. Miller diff = head - tail; 1830a625fd2SDavid S. Miller else 1840a625fd2SDavid S. Miller diff = (end - tail) + head; 1850a625fd2SDavid S. Miller 1860a625fd2SDavid S. Miller return (diff / CWQ_ENTRY_SIZE) - 1; 1870a625fd2SDavid S. Miller } 1880a625fd2SDavid S. Miller 1890a625fd2SDavid S. Miller static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 1900a625fd2SDavid S. Miller { 1910a625fd2SDavid S. Miller int avail = spu_queue_num_free(q); 1920a625fd2SDavid S. Miller 1930a625fd2SDavid S. Miller if (avail >= num_entries) 1940a625fd2SDavid S. Miller return q->q + q->tail; 1950a625fd2SDavid S. Miller 1960a625fd2SDavid S. Miller return NULL; 1970a625fd2SDavid S. Miller } 1980a625fd2SDavid S. Miller 1990a625fd2SDavid S. Miller static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 2000a625fd2SDavid S. Miller { 2010a625fd2SDavid S. Miller unsigned long hv_ret, new_tail; 2020a625fd2SDavid S. Miller 2030a625fd2SDavid S. Miller new_tail = spu_next_offset(q, last - q->q); 2040a625fd2SDavid S. Miller 2050a625fd2SDavid S. Miller hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 2060a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 2070a625fd2SDavid S. Miller q->tail = new_tail; 2080a625fd2SDavid S. Miller return hv_ret; 2090a625fd2SDavid S. Miller } 2100a625fd2SDavid S. Miller 2110a625fd2SDavid S. Miller static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 2120a625fd2SDavid S. Miller int enc_type, int auth_type, 2130a625fd2SDavid S. Miller unsigned int hash_len, 2140a625fd2SDavid S. Miller bool sfas, bool sob, bool eob, bool encrypt, 2150a625fd2SDavid S. Miller int opcode) 2160a625fd2SDavid S. Miller { 2170a625fd2SDavid S. Miller u64 word = (len - 1) & CONTROL_LEN; 2180a625fd2SDavid S. Miller 2190a625fd2SDavid S. Miller word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 2200a625fd2SDavid S. Miller word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 2210a625fd2SDavid S. Miller word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 2220a625fd2SDavid S. Miller if (sfas) 2230a625fd2SDavid S. Miller word |= CONTROL_STORE_FINAL_AUTH_STATE; 2240a625fd2SDavid S. Miller if (sob) 2250a625fd2SDavid S. Miller word |= CONTROL_START_OF_BLOCK; 2260a625fd2SDavid S. Miller if (eob) 2270a625fd2SDavid S. Miller word |= CONTROL_END_OF_BLOCK; 2280a625fd2SDavid S. Miller if (encrypt) 2290a625fd2SDavid S. Miller word |= CONTROL_ENCRYPT; 2300a625fd2SDavid S. Miller if (hmac_key_len) 2310a625fd2SDavid S. Miller word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 2320a625fd2SDavid S. Miller if (hash_len) 2330a625fd2SDavid S. Miller word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 2340a625fd2SDavid S. Miller 2350a625fd2SDavid S. Miller return word; 2360a625fd2SDavid S. Miller } 2370a625fd2SDavid S. Miller 2380a625fd2SDavid S. Miller #if 0 2390a625fd2SDavid S. Miller static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 2400a625fd2SDavid S. Miller { 2410a625fd2SDavid S. Miller if (this_len >= 64 || 2420a625fd2SDavid S. Miller qp->head != qp->tail) 2430a625fd2SDavid S. Miller return true; 2440a625fd2SDavid S. Miller return false; 2450a625fd2SDavid S. Miller } 2460a625fd2SDavid S. Miller #endif 2470a625fd2SDavid S. Miller 2483a2c0346SDavid S. Miller struct n2_ahash_alg { 2493a2c0346SDavid S. Miller struct list_head entry; 2508054b800SLABBE Corentin const u8 *hash_zero; 2513a2c0346SDavid S. Miller const u32 *hash_init; 2523a2c0346SDavid S. Miller u8 hw_op_hashsz; 2533a2c0346SDavid S. Miller u8 digest_size; 2543a2c0346SDavid S. Miller u8 auth_type; 255dc4ccfd1SDavid S. Miller u8 hmac_type; 2563a2c0346SDavid S. Miller struct ahash_alg alg; 2573a2c0346SDavid S. Miller }; 2583a2c0346SDavid S. Miller 2593a2c0346SDavid S. Miller static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) 2603a2c0346SDavid S. Miller { 2613a2c0346SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 2623a2c0346SDavid S. Miller struct ahash_alg *ahash_alg; 2633a2c0346SDavid S. Miller 2643a2c0346SDavid S. Miller ahash_alg = container_of(alg, struct ahash_alg, halg.base); 2653a2c0346SDavid S. Miller 2663a2c0346SDavid S. Miller return container_of(ahash_alg, struct n2_ahash_alg, alg); 2673a2c0346SDavid S. Miller } 2683a2c0346SDavid S. Miller 269dc4ccfd1SDavid S. Miller struct n2_hmac_alg { 270dc4ccfd1SDavid S. Miller const char *child_alg; 271dc4ccfd1SDavid S. Miller struct n2_ahash_alg derived; 272dc4ccfd1SDavid S. Miller }; 273dc4ccfd1SDavid S. Miller 274dc4ccfd1SDavid S. Miller static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) 275dc4ccfd1SDavid S. Miller { 276dc4ccfd1SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 277dc4ccfd1SDavid S. Miller struct ahash_alg *ahash_alg; 278dc4ccfd1SDavid S. Miller 279dc4ccfd1SDavid S. Miller ahash_alg = container_of(alg, struct ahash_alg, halg.base); 280dc4ccfd1SDavid S. Miller 281dc4ccfd1SDavid S. Miller return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); 282dc4ccfd1SDavid S. Miller } 283dc4ccfd1SDavid S. Miller 2840a625fd2SDavid S. Miller struct n2_hash_ctx { 285c9aa55e5SDavid S. Miller struct crypto_ahash *fallback_tfm; 286c9aa55e5SDavid S. Miller }; 2870a625fd2SDavid S. Miller 288dc4ccfd1SDavid S. Miller #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ 289dc4ccfd1SDavid S. Miller 290dc4ccfd1SDavid S. Miller struct n2_hmac_ctx { 291dc4ccfd1SDavid S. Miller struct n2_hash_ctx base; 292dc4ccfd1SDavid S. Miller 293dc4ccfd1SDavid S. Miller struct crypto_shash *child_shash; 294dc4ccfd1SDavid S. Miller 295dc4ccfd1SDavid S. Miller int hash_key_len; 296dc4ccfd1SDavid S. Miller unsigned char hash_key[N2_HASH_KEY_MAX]; 297dc4ccfd1SDavid S. Miller }; 298dc4ccfd1SDavid S. Miller 299c9aa55e5SDavid S. Miller struct n2_hash_req_ctx { 3000a625fd2SDavid S. Miller union { 3010a625fd2SDavid S. Miller struct md5_state md5; 3020a625fd2SDavid S. Miller struct sha1_state sha1; 3030a625fd2SDavid S. Miller struct sha256_state sha256; 3040a625fd2SDavid S. Miller } u; 3050a625fd2SDavid S. Miller 306c9aa55e5SDavid S. Miller struct ahash_request fallback_req; 3070a625fd2SDavid S. Miller }; 3080a625fd2SDavid S. Miller 3090a625fd2SDavid S. Miller static int n2_hash_async_init(struct ahash_request *req) 3100a625fd2SDavid S. Miller { 311c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3120a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3130a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3140a625fd2SDavid S. Miller 315c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 316c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 3170a625fd2SDavid S. Miller 318c9aa55e5SDavid S. Miller return crypto_ahash_init(&rctx->fallback_req); 3190a625fd2SDavid S. Miller } 3200a625fd2SDavid S. Miller 3210a625fd2SDavid S. Miller static int n2_hash_async_update(struct ahash_request *req) 3220a625fd2SDavid S. Miller { 323c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3240a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3250a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3260a625fd2SDavid S. Miller 327c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 328c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 329c9aa55e5SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 330c9aa55e5SDavid S. Miller rctx->fallback_req.src = req->src; 3310a625fd2SDavid S. Miller 332c9aa55e5SDavid S. Miller return crypto_ahash_update(&rctx->fallback_req); 3330a625fd2SDavid S. Miller } 3340a625fd2SDavid S. Miller 3350a625fd2SDavid S. Miller static int n2_hash_async_final(struct ahash_request *req) 3360a625fd2SDavid S. Miller { 337c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3380a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3390a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3400a625fd2SDavid S. Miller 341c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 342c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 343c9aa55e5SDavid S. Miller rctx->fallback_req.result = req->result; 3440a625fd2SDavid S. Miller 345c9aa55e5SDavid S. Miller return crypto_ahash_final(&rctx->fallback_req); 3460a625fd2SDavid S. Miller } 3470a625fd2SDavid S. Miller 3480a625fd2SDavid S. Miller static int n2_hash_async_finup(struct ahash_request *req) 3490a625fd2SDavid S. Miller { 350c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 3510a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 3520a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 3530a625fd2SDavid S. Miller 354c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 355c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 356c9aa55e5SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 357c9aa55e5SDavid S. Miller rctx->fallback_req.src = req->src; 358c9aa55e5SDavid S. Miller rctx->fallback_req.result = req->result; 3590a625fd2SDavid S. Miller 360c9aa55e5SDavid S. Miller return crypto_ahash_finup(&rctx->fallback_req); 3610a625fd2SDavid S. Miller } 3620a625fd2SDavid S. Miller 363378fe6fbSKamil Konieczny static int n2_hash_async_noimport(struct ahash_request *req, const void *in) 364378fe6fbSKamil Konieczny { 365378fe6fbSKamil Konieczny return -ENOSYS; 366378fe6fbSKamil Konieczny } 367378fe6fbSKamil Konieczny 368378fe6fbSKamil Konieczny static int n2_hash_async_noexport(struct ahash_request *req, void *out) 369378fe6fbSKamil Konieczny { 370378fe6fbSKamil Konieczny return -ENOSYS; 371378fe6fbSKamil Konieczny } 372378fe6fbSKamil Konieczny 3730a625fd2SDavid S. Miller static int n2_hash_cra_init(struct crypto_tfm *tfm) 3740a625fd2SDavid S. Miller { 3755837af01SMarek Vasut const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 3760a625fd2SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 3770a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 3780a625fd2SDavid S. Miller struct crypto_ahash *fallback_tfm; 3790a625fd2SDavid S. Miller int err; 3800a625fd2SDavid S. Miller 3810a625fd2SDavid S. Miller fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 3820a625fd2SDavid S. Miller CRYPTO_ALG_NEED_FALLBACK); 3830a625fd2SDavid S. Miller if (IS_ERR(fallback_tfm)) { 3840a625fd2SDavid S. Miller pr_warning("Fallback driver '%s' could not be loaded!\n", 3850a625fd2SDavid S. Miller fallback_driver_name); 3860a625fd2SDavid S. Miller err = PTR_ERR(fallback_tfm); 3870a625fd2SDavid S. Miller goto out; 3880a625fd2SDavid S. Miller } 3890a625fd2SDavid S. Miller 390c9aa55e5SDavid S. Miller crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 391c9aa55e5SDavid S. Miller crypto_ahash_reqsize(fallback_tfm))); 392c9aa55e5SDavid S. Miller 393c9aa55e5SDavid S. Miller ctx->fallback_tfm = fallback_tfm; 3940a625fd2SDavid S. Miller return 0; 3950a625fd2SDavid S. Miller 3960a625fd2SDavid S. Miller out: 3970a625fd2SDavid S. Miller return err; 3980a625fd2SDavid S. Miller } 3990a625fd2SDavid S. Miller 4000a625fd2SDavid S. Miller static void n2_hash_cra_exit(struct crypto_tfm *tfm) 4010a625fd2SDavid S. Miller { 4020a625fd2SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 4030a625fd2SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 4040a625fd2SDavid S. Miller 405c9aa55e5SDavid S. Miller crypto_free_ahash(ctx->fallback_tfm); 4060a625fd2SDavid S. Miller } 4070a625fd2SDavid S. Miller 408dc4ccfd1SDavid S. Miller static int n2_hmac_cra_init(struct crypto_tfm *tfm) 409dc4ccfd1SDavid S. Miller { 4105837af01SMarek Vasut const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 411dc4ccfd1SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 412dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 413dc4ccfd1SDavid S. Miller struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 414dc4ccfd1SDavid S. Miller struct crypto_ahash *fallback_tfm; 415dc4ccfd1SDavid S. Miller struct crypto_shash *child_shash; 416dc4ccfd1SDavid S. Miller int err; 417dc4ccfd1SDavid S. Miller 418dc4ccfd1SDavid S. Miller fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 419dc4ccfd1SDavid S. Miller CRYPTO_ALG_NEED_FALLBACK); 420dc4ccfd1SDavid S. Miller if (IS_ERR(fallback_tfm)) { 421dc4ccfd1SDavid S. Miller pr_warning("Fallback driver '%s' could not be loaded!\n", 422dc4ccfd1SDavid S. Miller fallback_driver_name); 423dc4ccfd1SDavid S. Miller err = PTR_ERR(fallback_tfm); 424dc4ccfd1SDavid S. Miller goto out; 425dc4ccfd1SDavid S. Miller } 426dc4ccfd1SDavid S. Miller 427dc4ccfd1SDavid S. Miller child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); 428dc4ccfd1SDavid S. Miller if (IS_ERR(child_shash)) { 429dc4ccfd1SDavid S. Miller pr_warning("Child shash '%s' could not be loaded!\n", 430dc4ccfd1SDavid S. Miller n2alg->child_alg); 431dc4ccfd1SDavid S. Miller err = PTR_ERR(child_shash); 432dc4ccfd1SDavid S. Miller goto out_free_fallback; 433dc4ccfd1SDavid S. Miller } 434dc4ccfd1SDavid S. Miller 435dc4ccfd1SDavid S. Miller crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 436dc4ccfd1SDavid S. Miller crypto_ahash_reqsize(fallback_tfm))); 437dc4ccfd1SDavid S. Miller 438dc4ccfd1SDavid S. Miller ctx->child_shash = child_shash; 439dc4ccfd1SDavid S. Miller ctx->base.fallback_tfm = fallback_tfm; 440dc4ccfd1SDavid S. Miller return 0; 441dc4ccfd1SDavid S. Miller 442dc4ccfd1SDavid S. Miller out_free_fallback: 443dc4ccfd1SDavid S. Miller crypto_free_ahash(fallback_tfm); 444dc4ccfd1SDavid S. Miller 445dc4ccfd1SDavid S. Miller out: 446dc4ccfd1SDavid S. Miller return err; 447dc4ccfd1SDavid S. Miller } 448dc4ccfd1SDavid S. Miller 449dc4ccfd1SDavid S. Miller static void n2_hmac_cra_exit(struct crypto_tfm *tfm) 450dc4ccfd1SDavid S. Miller { 451dc4ccfd1SDavid S. Miller struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 452dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 453dc4ccfd1SDavid S. Miller 454dc4ccfd1SDavid S. Miller crypto_free_ahash(ctx->base.fallback_tfm); 455dc4ccfd1SDavid S. Miller crypto_free_shash(ctx->child_shash); 456dc4ccfd1SDavid S. Miller } 457dc4ccfd1SDavid S. Miller 458dc4ccfd1SDavid S. Miller static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, 459dc4ccfd1SDavid S. Miller unsigned int keylen) 460dc4ccfd1SDavid S. Miller { 461dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 462dc4ccfd1SDavid S. Miller struct crypto_shash *child_shash = ctx->child_shash; 463dc4ccfd1SDavid S. Miller struct crypto_ahash *fallback_tfm; 464ce1f3e47SBehan Webster SHASH_DESC_ON_STACK(shash, child_shash); 465dc4ccfd1SDavid S. Miller int err, bs, ds; 466dc4ccfd1SDavid S. Miller 467dc4ccfd1SDavid S. Miller fallback_tfm = ctx->base.fallback_tfm; 468dc4ccfd1SDavid S. Miller err = crypto_ahash_setkey(fallback_tfm, key, keylen); 469dc4ccfd1SDavid S. Miller if (err) 470dc4ccfd1SDavid S. Miller return err; 471dc4ccfd1SDavid S. Miller 472ce1f3e47SBehan Webster shash->tfm = child_shash; 473dc4ccfd1SDavid S. Miller 474dc4ccfd1SDavid S. Miller bs = crypto_shash_blocksize(child_shash); 475dc4ccfd1SDavid S. Miller ds = crypto_shash_digestsize(child_shash); 476dc4ccfd1SDavid S. Miller BUG_ON(ds > N2_HASH_KEY_MAX); 477dc4ccfd1SDavid S. Miller if (keylen > bs) { 478ce1f3e47SBehan Webster err = crypto_shash_digest(shash, key, keylen, 479dc4ccfd1SDavid S. Miller ctx->hash_key); 480dc4ccfd1SDavid S. Miller if (err) 481dc4ccfd1SDavid S. Miller return err; 482dc4ccfd1SDavid S. Miller keylen = ds; 483dc4ccfd1SDavid S. Miller } else if (keylen <= N2_HASH_KEY_MAX) 484dc4ccfd1SDavid S. Miller memcpy(ctx->hash_key, key, keylen); 485dc4ccfd1SDavid S. Miller 486dc4ccfd1SDavid S. Miller ctx->hash_key_len = keylen; 487dc4ccfd1SDavid S. Miller 488dc4ccfd1SDavid S. Miller return err; 489dc4ccfd1SDavid S. Miller } 490dc4ccfd1SDavid S. Miller 4910a625fd2SDavid S. Miller static unsigned long wait_for_tail(struct spu_queue *qp) 4920a625fd2SDavid S. Miller { 4930a625fd2SDavid S. Miller unsigned long head, hv_ret; 4940a625fd2SDavid S. Miller 4950a625fd2SDavid S. Miller do { 4960a625fd2SDavid S. Miller hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 4970a625fd2SDavid S. Miller if (hv_ret != HV_EOK) { 4980a625fd2SDavid S. Miller pr_err("Hypervisor error on gethead\n"); 4990a625fd2SDavid S. Miller break; 5000a625fd2SDavid S. Miller } 5010a625fd2SDavid S. Miller if (head == qp->tail) { 5020a625fd2SDavid S. Miller qp->head = head; 5030a625fd2SDavid S. Miller break; 5040a625fd2SDavid S. Miller } 5050a625fd2SDavid S. Miller } while (1); 5060a625fd2SDavid S. Miller return hv_ret; 5070a625fd2SDavid S. Miller } 5080a625fd2SDavid S. Miller 5090a625fd2SDavid S. Miller static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 5100a625fd2SDavid S. Miller struct cwq_initial_entry *ent) 5110a625fd2SDavid S. Miller { 5120a625fd2SDavid S. Miller unsigned long hv_ret = spu_queue_submit(qp, ent); 5130a625fd2SDavid S. Miller 5140a625fd2SDavid S. Miller if (hv_ret == HV_EOK) 5150a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 5160a625fd2SDavid S. Miller 5170a625fd2SDavid S. Miller return hv_ret; 5180a625fd2SDavid S. Miller } 5190a625fd2SDavid S. Miller 5203a2c0346SDavid S. Miller static int n2_do_async_digest(struct ahash_request *req, 5210a625fd2SDavid S. Miller unsigned int auth_type, unsigned int digest_size, 522dc4ccfd1SDavid S. Miller unsigned int result_size, void *hash_loc, 523dc4ccfd1SDavid S. Miller unsigned long auth_key, unsigned int auth_key_len) 5240a625fd2SDavid S. Miller { 5250a625fd2SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 5260a625fd2SDavid S. Miller struct cwq_initial_entry *ent; 5270a625fd2SDavid S. Miller struct crypto_hash_walk walk; 5280a625fd2SDavid S. Miller struct spu_queue *qp; 5290a625fd2SDavid S. Miller unsigned long flags; 5300a625fd2SDavid S. Miller int err = -ENODEV; 5310a625fd2SDavid S. Miller int nbytes, cpu; 5320a625fd2SDavid S. Miller 5330a625fd2SDavid S. Miller /* The total effective length of the operation may not 5340a625fd2SDavid S. Miller * exceed 2^16. 5350a625fd2SDavid S. Miller */ 5360a625fd2SDavid S. Miller if (unlikely(req->nbytes > (1 << 16))) { 537c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 53865a23d67SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 5390a625fd2SDavid S. Miller 540c9aa55e5SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 541c9aa55e5SDavid S. Miller rctx->fallback_req.base.flags = 542c9aa55e5SDavid S. Miller req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 543c9aa55e5SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 544c9aa55e5SDavid S. Miller rctx->fallback_req.src = req->src; 545c9aa55e5SDavid S. Miller rctx->fallback_req.result = req->result; 546c9aa55e5SDavid S. Miller 547c9aa55e5SDavid S. Miller return crypto_ahash_digest(&rctx->fallback_req); 5480a625fd2SDavid S. Miller } 5490a625fd2SDavid S. Miller 5500a625fd2SDavid S. Miller nbytes = crypto_hash_walk_first(req, &walk); 5510a625fd2SDavid S. Miller 5520a625fd2SDavid S. Miller cpu = get_cpu(); 5530a625fd2SDavid S. Miller qp = cpu_to_cwq[cpu]; 5540a625fd2SDavid S. Miller if (!qp) 5550a625fd2SDavid S. Miller goto out; 5560a625fd2SDavid S. Miller 5570a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 5580a625fd2SDavid S. Miller 5590a625fd2SDavid S. Miller /* XXX can do better, improve this later by doing a by-hand scatterlist 5600a625fd2SDavid S. Miller * XXX walk, etc. 5610a625fd2SDavid S. Miller */ 5620a625fd2SDavid S. Miller ent = qp->q + qp->tail; 5630a625fd2SDavid S. Miller 564dc4ccfd1SDavid S. Miller ent->control = control_word_base(nbytes, auth_key_len, 0, 5650a625fd2SDavid S. Miller auth_type, digest_size, 5660a625fd2SDavid S. Miller false, true, false, false, 5670a625fd2SDavid S. Miller OPCODE_INPLACE_BIT | 5680a625fd2SDavid S. Miller OPCODE_AUTH_MAC); 5690a625fd2SDavid S. Miller ent->src_addr = __pa(walk.data); 570dc4ccfd1SDavid S. Miller ent->auth_key_addr = auth_key; 5710a625fd2SDavid S. Miller ent->auth_iv_addr = __pa(hash_loc); 5720a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 5730a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 5740a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 5750a625fd2SDavid S. Miller ent->dest_addr = __pa(hash_loc); 5760a625fd2SDavid S. Miller 5770a625fd2SDavid S. Miller nbytes = crypto_hash_walk_done(&walk, 0); 5780a625fd2SDavid S. Miller while (nbytes > 0) { 5790a625fd2SDavid S. Miller ent = spu_queue_next(qp, ent); 5800a625fd2SDavid S. Miller 5810a625fd2SDavid S. Miller ent->control = (nbytes - 1); 5820a625fd2SDavid S. Miller ent->src_addr = __pa(walk.data); 5830a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 5840a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 5850a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 5860a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 5870a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 5880a625fd2SDavid S. Miller ent->dest_addr = 0UL; 5890a625fd2SDavid S. Miller 5900a625fd2SDavid S. Miller nbytes = crypto_hash_walk_done(&walk, 0); 5910a625fd2SDavid S. Miller } 5920a625fd2SDavid S. Miller ent->control |= CONTROL_END_OF_BLOCK; 5930a625fd2SDavid S. Miller 5940a625fd2SDavid S. Miller if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 5950a625fd2SDavid S. Miller err = -EINVAL; 5960a625fd2SDavid S. Miller else 5970a625fd2SDavid S. Miller err = 0; 5980a625fd2SDavid S. Miller 5990a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 6000a625fd2SDavid S. Miller 6010a625fd2SDavid S. Miller if (!err) 6020a625fd2SDavid S. Miller memcpy(req->result, hash_loc, result_size); 6030a625fd2SDavid S. Miller out: 6040a625fd2SDavid S. Miller put_cpu(); 6050a625fd2SDavid S. Miller 6060a625fd2SDavid S. Miller return err; 6070a625fd2SDavid S. Miller } 6080a625fd2SDavid S. Miller 6093a2c0346SDavid S. Miller static int n2_hash_async_digest(struct ahash_request *req) 6100a625fd2SDavid S. Miller { 6113a2c0346SDavid S. Miller struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); 612c9aa55e5SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 6133a2c0346SDavid S. Miller int ds; 6140a625fd2SDavid S. Miller 6153a2c0346SDavid S. Miller ds = n2alg->digest_size; 6160a625fd2SDavid S. Miller if (unlikely(req->nbytes == 0)) { 6173a2c0346SDavid S. Miller memcpy(req->result, n2alg->hash_zero, ds); 6180a625fd2SDavid S. Miller return 0; 6190a625fd2SDavid S. Miller } 6203a2c0346SDavid S. Miller memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); 6210a625fd2SDavid S. Miller 6223a2c0346SDavid S. Miller return n2_do_async_digest(req, n2alg->auth_type, 6233a2c0346SDavid S. Miller n2alg->hw_op_hashsz, ds, 624dc4ccfd1SDavid S. Miller &rctx->u, 0UL, 0); 625dc4ccfd1SDavid S. Miller } 626dc4ccfd1SDavid S. Miller 627dc4ccfd1SDavid S. Miller static int n2_hmac_async_digest(struct ahash_request *req) 628dc4ccfd1SDavid S. Miller { 629dc4ccfd1SDavid S. Miller struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); 630dc4ccfd1SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 631dc4ccfd1SDavid S. Miller struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 632dc4ccfd1SDavid S. Miller struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 633dc4ccfd1SDavid S. Miller int ds; 634dc4ccfd1SDavid S. Miller 635dc4ccfd1SDavid S. Miller ds = n2alg->derived.digest_size; 636dc4ccfd1SDavid S. Miller if (unlikely(req->nbytes == 0) || 637dc4ccfd1SDavid S. Miller unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { 638dc4ccfd1SDavid S. Miller struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 639dc4ccfd1SDavid S. Miller struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 640dc4ccfd1SDavid S. Miller 641dc4ccfd1SDavid S. Miller ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 642dc4ccfd1SDavid S. Miller rctx->fallback_req.base.flags = 643dc4ccfd1SDavid S. Miller req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 644dc4ccfd1SDavid S. Miller rctx->fallback_req.nbytes = req->nbytes; 645dc4ccfd1SDavid S. Miller rctx->fallback_req.src = req->src; 646dc4ccfd1SDavid S. Miller rctx->fallback_req.result = req->result; 647dc4ccfd1SDavid S. Miller 648dc4ccfd1SDavid S. Miller return crypto_ahash_digest(&rctx->fallback_req); 649dc4ccfd1SDavid S. Miller } 650dc4ccfd1SDavid S. Miller memcpy(&rctx->u, n2alg->derived.hash_init, 651dc4ccfd1SDavid S. Miller n2alg->derived.hw_op_hashsz); 652dc4ccfd1SDavid S. Miller 653dc4ccfd1SDavid S. Miller return n2_do_async_digest(req, n2alg->derived.hmac_type, 654dc4ccfd1SDavid S. Miller n2alg->derived.hw_op_hashsz, ds, 655dc4ccfd1SDavid S. Miller &rctx->u, 656dc4ccfd1SDavid S. Miller __pa(&ctx->hash_key), 657dc4ccfd1SDavid S. Miller ctx->hash_key_len); 6580a625fd2SDavid S. Miller } 6590a625fd2SDavid S. Miller 6600a625fd2SDavid S. Miller struct n2_cipher_context { 6610a625fd2SDavid S. Miller int key_len; 6620a625fd2SDavid S. Miller int enc_type; 6630a625fd2SDavid S. Miller union { 6640a625fd2SDavid S. Miller u8 aes[AES_MAX_KEY_SIZE]; 6650a625fd2SDavid S. Miller u8 des[DES_KEY_SIZE]; 6660a625fd2SDavid S. Miller u8 des3[3 * DES_KEY_SIZE]; 6670a625fd2SDavid S. Miller u8 arc4[258]; /* S-box, X, Y */ 6680a625fd2SDavid S. Miller } key; 6690a625fd2SDavid S. Miller }; 6700a625fd2SDavid S. Miller 6710a625fd2SDavid S. Miller #define N2_CHUNK_ARR_LEN 16 6720a625fd2SDavid S. Miller 6730a625fd2SDavid S. Miller struct n2_crypto_chunk { 6740a625fd2SDavid S. Miller struct list_head entry; 6750a625fd2SDavid S. Miller unsigned long iv_paddr : 44; 6760a625fd2SDavid S. Miller unsigned long arr_len : 20; 6770a625fd2SDavid S. Miller unsigned long dest_paddr; 6780a625fd2SDavid S. Miller unsigned long dest_final; 6790a625fd2SDavid S. Miller struct { 6800a625fd2SDavid S. Miller unsigned long src_paddr : 44; 6810a625fd2SDavid S. Miller unsigned long src_len : 20; 6820a625fd2SDavid S. Miller } arr[N2_CHUNK_ARR_LEN]; 6830a625fd2SDavid S. Miller }; 6840a625fd2SDavid S. Miller 6850a625fd2SDavid S. Miller struct n2_request_context { 6860a625fd2SDavid S. Miller struct ablkcipher_walk walk; 6870a625fd2SDavid S. Miller struct list_head chunk_list; 6880a625fd2SDavid S. Miller struct n2_crypto_chunk chunk; 6890a625fd2SDavid S. Miller u8 temp_iv[16]; 6900a625fd2SDavid S. Miller }; 6910a625fd2SDavid S. Miller 6920a625fd2SDavid S. Miller /* The SPU allows some level of flexibility for partial cipher blocks 6930a625fd2SDavid S. Miller * being specified in a descriptor. 6940a625fd2SDavid S. Miller * 6950a625fd2SDavid S. Miller * It merely requires that every descriptor's length field is at least 6960a625fd2SDavid S. Miller * as large as the cipher block size. This means that a cipher block 6970a625fd2SDavid S. Miller * can span at most 2 descriptors. However, this does not allow a 6980a625fd2SDavid S. Miller * partial block to span into the final descriptor as that would 6990a625fd2SDavid S. Miller * violate the rule (since every descriptor's length must be at lest 7000a625fd2SDavid S. Miller * the block size). So, for example, assuming an 8 byte block size: 7010a625fd2SDavid S. Miller * 7020a625fd2SDavid S. Miller * 0xe --> 0xa --> 0x8 7030a625fd2SDavid S. Miller * 7040a625fd2SDavid S. Miller * is a valid length sequence, whereas: 7050a625fd2SDavid S. Miller * 7060a625fd2SDavid S. Miller * 0xe --> 0xb --> 0x7 7070a625fd2SDavid S. Miller * 7080a625fd2SDavid S. Miller * is not a valid sequence. 7090a625fd2SDavid S. Miller */ 7100a625fd2SDavid S. Miller 7110a625fd2SDavid S. Miller struct n2_cipher_alg { 7120a625fd2SDavid S. Miller struct list_head entry; 7130a625fd2SDavid S. Miller u8 enc_type; 7140a625fd2SDavid S. Miller struct crypto_alg alg; 7150a625fd2SDavid S. Miller }; 7160a625fd2SDavid S. Miller 7170a625fd2SDavid S. Miller static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 7180a625fd2SDavid S. Miller { 7190a625fd2SDavid S. Miller struct crypto_alg *alg = tfm->__crt_alg; 7200a625fd2SDavid S. Miller 7210a625fd2SDavid S. Miller return container_of(alg, struct n2_cipher_alg, alg); 7220a625fd2SDavid S. Miller } 7230a625fd2SDavid S. Miller 7240a625fd2SDavid S. Miller struct n2_cipher_request_context { 7250a625fd2SDavid S. Miller struct ablkcipher_walk walk; 7260a625fd2SDavid S. Miller }; 7270a625fd2SDavid S. Miller 7280a625fd2SDavid S. Miller static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7290a625fd2SDavid S. Miller unsigned int keylen) 7300a625fd2SDavid S. Miller { 7310a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7320a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7330a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7340a625fd2SDavid S. Miller 7350a625fd2SDavid S. Miller ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 7360a625fd2SDavid S. Miller 7370a625fd2SDavid S. Miller switch (keylen) { 7380a625fd2SDavid S. Miller case AES_KEYSIZE_128: 7390a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES128; 7400a625fd2SDavid S. Miller break; 7410a625fd2SDavid S. Miller case AES_KEYSIZE_192: 7420a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES192; 7430a625fd2SDavid S. Miller break; 7440a625fd2SDavid S. Miller case AES_KEYSIZE_256: 7450a625fd2SDavid S. Miller ctx->enc_type |= ENC_TYPE_ALG_AES256; 7460a625fd2SDavid S. Miller break; 7470a625fd2SDavid S. Miller default: 7480a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 7490a625fd2SDavid S. Miller return -EINVAL; 7500a625fd2SDavid S. Miller } 7510a625fd2SDavid S. Miller 7520a625fd2SDavid S. Miller ctx->key_len = keylen; 7530a625fd2SDavid S. Miller memcpy(ctx->key.aes, key, keylen); 7540a625fd2SDavid S. Miller return 0; 7550a625fd2SDavid S. Miller } 7560a625fd2SDavid S. Miller 7570a625fd2SDavid S. Miller static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7580a625fd2SDavid S. Miller unsigned int keylen) 7590a625fd2SDavid S. Miller { 7600a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7610a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7620a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 7630a625fd2SDavid S. Miller u32 tmp[DES_EXPKEY_WORDS]; 7640a625fd2SDavid S. Miller int err; 7650a625fd2SDavid S. Miller 7660a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 7670a625fd2SDavid S. Miller 7680a625fd2SDavid S. Miller if (keylen != DES_KEY_SIZE) { 7690a625fd2SDavid S. Miller crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 7700a625fd2SDavid S. Miller return -EINVAL; 7710a625fd2SDavid S. Miller } 7720a625fd2SDavid S. Miller 7730a625fd2SDavid S. Miller err = des_ekey(tmp, key); 774231baecdSEric Biggers if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 7750a625fd2SDavid S. Miller tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 7760a625fd2SDavid S. Miller return -EINVAL; 7770a625fd2SDavid S. Miller } 7780a625fd2SDavid S. Miller 7790a625fd2SDavid S. Miller ctx->key_len = keylen; 7800a625fd2SDavid S. Miller memcpy(ctx->key.des, key, keylen); 7810a625fd2SDavid S. Miller return 0; 7820a625fd2SDavid S. Miller } 7830a625fd2SDavid S. Miller 7840a625fd2SDavid S. Miller static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 7850a625fd2SDavid S. Miller unsigned int keylen) 7860a625fd2SDavid S. Miller { 7870a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 7880a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 7890a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 790e4fffa5fSHerbert Xu u32 flags; 791e4fffa5fSHerbert Xu int err; 792e4fffa5fSHerbert Xu 793e4fffa5fSHerbert Xu flags = crypto_ablkcipher_get_flags(cipher); 794e4fffa5fSHerbert Xu err = __des3_verify_key(&flags, key); 795e4fffa5fSHerbert Xu if (unlikely(err)) { 796e4fffa5fSHerbert Xu crypto_ablkcipher_set_flags(cipher, flags); 797e4fffa5fSHerbert Xu return err; 798e4fffa5fSHerbert Xu } 7990a625fd2SDavid S. Miller 8000a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 8010a625fd2SDavid S. Miller 8020a625fd2SDavid S. Miller ctx->key_len = keylen; 8030a625fd2SDavid S. Miller memcpy(ctx->key.des3, key, keylen); 8040a625fd2SDavid S. Miller return 0; 8050a625fd2SDavid S. Miller } 8060a625fd2SDavid S. Miller 8070a625fd2SDavid S. Miller static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 8080a625fd2SDavid S. Miller unsigned int keylen) 8090a625fd2SDavid S. Miller { 8100a625fd2SDavid S. Miller struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 8110a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 8120a625fd2SDavid S. Miller struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 8130a625fd2SDavid S. Miller u8 *s = ctx->key.arc4; 8140a625fd2SDavid S. Miller u8 *x = s + 256; 8150a625fd2SDavid S. Miller u8 *y = x + 1; 8160a625fd2SDavid S. Miller int i, j, k; 8170a625fd2SDavid S. Miller 8180a625fd2SDavid S. Miller ctx->enc_type = n2alg->enc_type; 8190a625fd2SDavid S. Miller 8200a625fd2SDavid S. Miller j = k = 0; 8210a625fd2SDavid S. Miller *x = 0; 8220a625fd2SDavid S. Miller *y = 0; 8230a625fd2SDavid S. Miller for (i = 0; i < 256; i++) 8240a625fd2SDavid S. Miller s[i] = i; 8250a625fd2SDavid S. Miller for (i = 0; i < 256; i++) { 8260a625fd2SDavid S. Miller u8 a = s[i]; 8270a625fd2SDavid S. Miller j = (j + key[k] + a) & 0xff; 8280a625fd2SDavid S. Miller s[i] = s[j]; 8290a625fd2SDavid S. Miller s[j] = a; 8300a625fd2SDavid S. Miller if (++k >= keylen) 8310a625fd2SDavid S. Miller k = 0; 8320a625fd2SDavid S. Miller } 8330a625fd2SDavid S. Miller 8340a625fd2SDavid S. Miller return 0; 8350a625fd2SDavid S. Miller } 8360a625fd2SDavid S. Miller 8370a625fd2SDavid S. Miller static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 8380a625fd2SDavid S. Miller { 8390a625fd2SDavid S. Miller int this_len = nbytes; 8400a625fd2SDavid S. Miller 8410a625fd2SDavid S. Miller this_len -= (nbytes & (block_size - 1)); 8420a625fd2SDavid S. Miller return this_len > (1 << 16) ? (1 << 16) : this_len; 8430a625fd2SDavid S. Miller } 8440a625fd2SDavid S. Miller 8450a625fd2SDavid S. Miller static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 8460a625fd2SDavid S. Miller struct spu_queue *qp, bool encrypt) 8470a625fd2SDavid S. Miller { 8480a625fd2SDavid S. Miller struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 8490a625fd2SDavid S. Miller struct cwq_initial_entry *ent; 8500a625fd2SDavid S. Miller bool in_place; 8510a625fd2SDavid S. Miller int i; 8520a625fd2SDavid S. Miller 8530a625fd2SDavid S. Miller ent = spu_queue_alloc(qp, cp->arr_len); 8540a625fd2SDavid S. Miller if (!ent) { 8550a625fd2SDavid S. Miller pr_info("queue_alloc() of %d fails\n", 8560a625fd2SDavid S. Miller cp->arr_len); 8570a625fd2SDavid S. Miller return -EBUSY; 8580a625fd2SDavid S. Miller } 8590a625fd2SDavid S. Miller 8600a625fd2SDavid S. Miller in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 8610a625fd2SDavid S. Miller 8620a625fd2SDavid S. Miller ent->control = control_word_base(cp->arr[0].src_len, 8630a625fd2SDavid S. Miller 0, ctx->enc_type, 0, 0, 8640a625fd2SDavid S. Miller false, true, false, encrypt, 8650a625fd2SDavid S. Miller OPCODE_ENCRYPT | 8660a625fd2SDavid S. Miller (in_place ? OPCODE_INPLACE_BIT : 0)); 8670a625fd2SDavid S. Miller ent->src_addr = cp->arr[0].src_paddr; 8680a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 8690a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 8700a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 8710a625fd2SDavid S. Miller ent->enc_key_addr = __pa(&ctx->key); 8720a625fd2SDavid S. Miller ent->enc_iv_addr = cp->iv_paddr; 8730a625fd2SDavid S. Miller ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 8740a625fd2SDavid S. Miller 8750a625fd2SDavid S. Miller for (i = 1; i < cp->arr_len; i++) { 8760a625fd2SDavid S. Miller ent = spu_queue_next(qp, ent); 8770a625fd2SDavid S. Miller 8780a625fd2SDavid S. Miller ent->control = cp->arr[i].src_len - 1; 8790a625fd2SDavid S. Miller ent->src_addr = cp->arr[i].src_paddr; 8800a625fd2SDavid S. Miller ent->auth_key_addr = 0UL; 8810a625fd2SDavid S. Miller ent->auth_iv_addr = 0UL; 8820a625fd2SDavid S. Miller ent->final_auth_state_addr = 0UL; 8830a625fd2SDavid S. Miller ent->enc_key_addr = 0UL; 8840a625fd2SDavid S. Miller ent->enc_iv_addr = 0UL; 8850a625fd2SDavid S. Miller ent->dest_addr = 0UL; 8860a625fd2SDavid S. Miller } 8870a625fd2SDavid S. Miller ent->control |= CONTROL_END_OF_BLOCK; 8880a625fd2SDavid S. Miller 8890a625fd2SDavid S. Miller return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 8900a625fd2SDavid S. Miller } 8910a625fd2SDavid S. Miller 8920a625fd2SDavid S. Miller static int n2_compute_chunks(struct ablkcipher_request *req) 8930a625fd2SDavid S. Miller { 8940a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 8950a625fd2SDavid S. Miller struct ablkcipher_walk *walk = &rctx->walk; 8960a625fd2SDavid S. Miller struct n2_crypto_chunk *chunk; 8970a625fd2SDavid S. Miller unsigned long dest_prev; 8980a625fd2SDavid S. Miller unsigned int tot_len; 8990a625fd2SDavid S. Miller bool prev_in_place; 9000a625fd2SDavid S. Miller int err, nbytes; 9010a625fd2SDavid S. Miller 9020a625fd2SDavid S. Miller ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 9030a625fd2SDavid S. Miller err = ablkcipher_walk_phys(req, walk); 9040a625fd2SDavid S. Miller if (err) 9050a625fd2SDavid S. Miller return err; 9060a625fd2SDavid S. Miller 9070a625fd2SDavid S. Miller INIT_LIST_HEAD(&rctx->chunk_list); 9080a625fd2SDavid S. Miller 9090a625fd2SDavid S. Miller chunk = &rctx->chunk; 9100a625fd2SDavid S. Miller INIT_LIST_HEAD(&chunk->entry); 9110a625fd2SDavid S. Miller 9120a625fd2SDavid S. Miller chunk->iv_paddr = 0UL; 9130a625fd2SDavid S. Miller chunk->arr_len = 0; 9140a625fd2SDavid S. Miller chunk->dest_paddr = 0UL; 9150a625fd2SDavid S. Miller 9160a625fd2SDavid S. Miller prev_in_place = false; 9170a625fd2SDavid S. Miller dest_prev = ~0UL; 9180a625fd2SDavid S. Miller tot_len = 0; 9190a625fd2SDavid S. Miller 9200a625fd2SDavid S. Miller while ((nbytes = walk->nbytes) != 0) { 9210a625fd2SDavid S. Miller unsigned long dest_paddr, src_paddr; 9220a625fd2SDavid S. Miller bool in_place; 9230a625fd2SDavid S. Miller int this_len; 9240a625fd2SDavid S. Miller 9250a625fd2SDavid S. Miller src_paddr = (page_to_phys(walk->src.page) + 9260a625fd2SDavid S. Miller walk->src.offset); 9270a625fd2SDavid S. Miller dest_paddr = (page_to_phys(walk->dst.page) + 9280a625fd2SDavid S. Miller walk->dst.offset); 9290a625fd2SDavid S. Miller in_place = (src_paddr == dest_paddr); 9300a625fd2SDavid S. Miller this_len = cipher_descriptor_len(nbytes, walk->blocksize); 9310a625fd2SDavid S. Miller 9320a625fd2SDavid S. Miller if (chunk->arr_len != 0) { 9330a625fd2SDavid S. Miller if (in_place != prev_in_place || 9340a625fd2SDavid S. Miller (!prev_in_place && 9350a625fd2SDavid S. Miller dest_paddr != dest_prev) || 9360a625fd2SDavid S. Miller chunk->arr_len == N2_CHUNK_ARR_LEN || 9370a625fd2SDavid S. Miller tot_len + this_len > (1 << 16)) { 9380a625fd2SDavid S. Miller chunk->dest_final = dest_prev; 9390a625fd2SDavid S. Miller list_add_tail(&chunk->entry, 9400a625fd2SDavid S. Miller &rctx->chunk_list); 9410a625fd2SDavid S. Miller chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 9420a625fd2SDavid S. Miller if (!chunk) { 9430a625fd2SDavid S. Miller err = -ENOMEM; 9440a625fd2SDavid S. Miller break; 9450a625fd2SDavid S. Miller } 9460a625fd2SDavid S. Miller INIT_LIST_HEAD(&chunk->entry); 9470a625fd2SDavid S. Miller } 9480a625fd2SDavid S. Miller } 9490a625fd2SDavid S. Miller if (chunk->arr_len == 0) { 9500a625fd2SDavid S. Miller chunk->dest_paddr = dest_paddr; 9510a625fd2SDavid S. Miller tot_len = 0; 9520a625fd2SDavid S. Miller } 9530a625fd2SDavid S. Miller chunk->arr[chunk->arr_len].src_paddr = src_paddr; 9540a625fd2SDavid S. Miller chunk->arr[chunk->arr_len].src_len = this_len; 9550a625fd2SDavid S. Miller chunk->arr_len++; 9560a625fd2SDavid S. Miller 9570a625fd2SDavid S. Miller dest_prev = dest_paddr + this_len; 9580a625fd2SDavid S. Miller prev_in_place = in_place; 9590a625fd2SDavid S. Miller tot_len += this_len; 9600a625fd2SDavid S. Miller 9610a625fd2SDavid S. Miller err = ablkcipher_walk_done(req, walk, nbytes - this_len); 9620a625fd2SDavid S. Miller if (err) 9630a625fd2SDavid S. Miller break; 9640a625fd2SDavid S. Miller } 9650a625fd2SDavid S. Miller if (!err && chunk->arr_len != 0) { 9660a625fd2SDavid S. Miller chunk->dest_final = dest_prev; 9670a625fd2SDavid S. Miller list_add_tail(&chunk->entry, &rctx->chunk_list); 9680a625fd2SDavid S. Miller } 9690a625fd2SDavid S. Miller 9700a625fd2SDavid S. Miller return err; 9710a625fd2SDavid S. Miller } 9720a625fd2SDavid S. Miller 9730a625fd2SDavid S. Miller static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 9740a625fd2SDavid S. Miller { 9750a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 9760a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 9770a625fd2SDavid S. Miller 9780a625fd2SDavid S. Miller if (final_iv) 9790a625fd2SDavid S. Miller memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 9800a625fd2SDavid S. Miller 9810a625fd2SDavid S. Miller ablkcipher_walk_complete(&rctx->walk); 9820a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 9830a625fd2SDavid S. Miller list_del(&c->entry); 9840a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 9850a625fd2SDavid S. Miller kfree(c); 9860a625fd2SDavid S. Miller } 9870a625fd2SDavid S. Miller 9880a625fd2SDavid S. Miller } 9890a625fd2SDavid S. Miller 9900a625fd2SDavid S. Miller static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 9910a625fd2SDavid S. Miller { 9920a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 9930a625fd2SDavid S. Miller struct crypto_tfm *tfm = req->base.tfm; 9940a625fd2SDavid S. Miller int err = n2_compute_chunks(req); 9950a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 9960a625fd2SDavid S. Miller unsigned long flags, hv_ret; 9970a625fd2SDavid S. Miller struct spu_queue *qp; 9980a625fd2SDavid S. Miller 9990a625fd2SDavid S. Miller if (err) 10000a625fd2SDavid S. Miller return err; 10010a625fd2SDavid S. Miller 10020a625fd2SDavid S. Miller qp = cpu_to_cwq[get_cpu()]; 10030a625fd2SDavid S. Miller err = -ENODEV; 10040a625fd2SDavid S. Miller if (!qp) 10050a625fd2SDavid S. Miller goto out; 10060a625fd2SDavid S. Miller 10070a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 10080a625fd2SDavid S. Miller 10090a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 10100a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, encrypt); 10110a625fd2SDavid S. Miller if (err) 10120a625fd2SDavid S. Miller break; 10130a625fd2SDavid S. Miller list_del(&c->entry); 10140a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 10150a625fd2SDavid S. Miller kfree(c); 10160a625fd2SDavid S. Miller } 10170a625fd2SDavid S. Miller if (!err) { 10180a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 10190a625fd2SDavid S. Miller if (hv_ret != HV_EOK) 10200a625fd2SDavid S. Miller err = -EINVAL; 10210a625fd2SDavid S. Miller } 10220a625fd2SDavid S. Miller 10230a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 10240a625fd2SDavid S. Miller 1025e27303b2SThomas Meyer out: 10260a625fd2SDavid S. Miller put_cpu(); 10270a625fd2SDavid S. Miller 10280a625fd2SDavid S. Miller n2_chunk_complete(req, NULL); 10290a625fd2SDavid S. Miller return err; 10300a625fd2SDavid S. Miller } 10310a625fd2SDavid S. Miller 10320a625fd2SDavid S. Miller static int n2_encrypt_ecb(struct ablkcipher_request *req) 10330a625fd2SDavid S. Miller { 10340a625fd2SDavid S. Miller return n2_do_ecb(req, true); 10350a625fd2SDavid S. Miller } 10360a625fd2SDavid S. Miller 10370a625fd2SDavid S. Miller static int n2_decrypt_ecb(struct ablkcipher_request *req) 10380a625fd2SDavid S. Miller { 10390a625fd2SDavid S. Miller return n2_do_ecb(req, false); 10400a625fd2SDavid S. Miller } 10410a625fd2SDavid S. Miller 10420a625fd2SDavid S. Miller static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 10430a625fd2SDavid S. Miller { 10440a625fd2SDavid S. Miller struct n2_request_context *rctx = ablkcipher_request_ctx(req); 10450a625fd2SDavid S. Miller struct crypto_tfm *tfm = req->base.tfm; 10460a625fd2SDavid S. Miller unsigned long flags, hv_ret, iv_paddr; 10470a625fd2SDavid S. Miller int err = n2_compute_chunks(req); 10480a625fd2SDavid S. Miller struct n2_crypto_chunk *c, *tmp; 10490a625fd2SDavid S. Miller struct spu_queue *qp; 10500a625fd2SDavid S. Miller void *final_iv_addr; 10510a625fd2SDavid S. Miller 10520a625fd2SDavid S. Miller final_iv_addr = NULL; 10530a625fd2SDavid S. Miller 10540a625fd2SDavid S. Miller if (err) 10550a625fd2SDavid S. Miller return err; 10560a625fd2SDavid S. Miller 10570a625fd2SDavid S. Miller qp = cpu_to_cwq[get_cpu()]; 10580a625fd2SDavid S. Miller err = -ENODEV; 10590a625fd2SDavid S. Miller if (!qp) 10600a625fd2SDavid S. Miller goto out; 10610a625fd2SDavid S. Miller 10620a625fd2SDavid S. Miller spin_lock_irqsave(&qp->lock, flags); 10630a625fd2SDavid S. Miller 10640a625fd2SDavid S. Miller if (encrypt) { 10650a625fd2SDavid S. Miller iv_paddr = __pa(rctx->walk.iv); 10660a625fd2SDavid S. Miller list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 10670a625fd2SDavid S. Miller entry) { 10680a625fd2SDavid S. Miller c->iv_paddr = iv_paddr; 10690a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, true); 10700a625fd2SDavid S. Miller if (err) 10710a625fd2SDavid S. Miller break; 10720a625fd2SDavid S. Miller iv_paddr = c->dest_final - rctx->walk.blocksize; 10730a625fd2SDavid S. Miller list_del(&c->entry); 10740a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 10750a625fd2SDavid S. Miller kfree(c); 10760a625fd2SDavid S. Miller } 10770a625fd2SDavid S. Miller final_iv_addr = __va(iv_paddr); 10780a625fd2SDavid S. Miller } else { 10790a625fd2SDavid S. Miller list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 10800a625fd2SDavid S. Miller entry) { 10810a625fd2SDavid S. Miller if (c == &rctx->chunk) { 10820a625fd2SDavid S. Miller iv_paddr = __pa(rctx->walk.iv); 10830a625fd2SDavid S. Miller } else { 10840a625fd2SDavid S. Miller iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 10850a625fd2SDavid S. Miller tmp->arr[tmp->arr_len-1].src_len - 10860a625fd2SDavid S. Miller rctx->walk.blocksize); 10870a625fd2SDavid S. Miller } 10880a625fd2SDavid S. Miller if (!final_iv_addr) { 10890a625fd2SDavid S. Miller unsigned long pa; 10900a625fd2SDavid S. Miller 10910a625fd2SDavid S. Miller pa = (c->arr[c->arr_len-1].src_paddr + 10920a625fd2SDavid S. Miller c->arr[c->arr_len-1].src_len - 10930a625fd2SDavid S. Miller rctx->walk.blocksize); 10940a625fd2SDavid S. Miller final_iv_addr = rctx->temp_iv; 10950a625fd2SDavid S. Miller memcpy(rctx->temp_iv, __va(pa), 10960a625fd2SDavid S. Miller rctx->walk.blocksize); 10970a625fd2SDavid S. Miller } 10980a625fd2SDavid S. Miller c->iv_paddr = iv_paddr; 10990a625fd2SDavid S. Miller err = __n2_crypt_chunk(tfm, c, qp, false); 11000a625fd2SDavid S. Miller if (err) 11010a625fd2SDavid S. Miller break; 11020a625fd2SDavid S. Miller list_del(&c->entry); 11030a625fd2SDavid S. Miller if (unlikely(c != &rctx->chunk)) 11040a625fd2SDavid S. Miller kfree(c); 11050a625fd2SDavid S. Miller } 11060a625fd2SDavid S. Miller } 11070a625fd2SDavid S. Miller if (!err) { 11080a625fd2SDavid S. Miller hv_ret = wait_for_tail(qp); 11090a625fd2SDavid S. Miller if (hv_ret != HV_EOK) 11100a625fd2SDavid S. Miller err = -EINVAL; 11110a625fd2SDavid S. Miller } 11120a625fd2SDavid S. Miller 11130a625fd2SDavid S. Miller spin_unlock_irqrestore(&qp->lock, flags); 11140a625fd2SDavid S. Miller 1115e27303b2SThomas Meyer out: 11160a625fd2SDavid S. Miller put_cpu(); 11170a625fd2SDavid S. Miller 11180a625fd2SDavid S. Miller n2_chunk_complete(req, err ? NULL : final_iv_addr); 11190a625fd2SDavid S. Miller return err; 11200a625fd2SDavid S. Miller } 11210a625fd2SDavid S. Miller 11220a625fd2SDavid S. Miller static int n2_encrypt_chaining(struct ablkcipher_request *req) 11230a625fd2SDavid S. Miller { 11240a625fd2SDavid S. Miller return n2_do_chaining(req, true); 11250a625fd2SDavid S. Miller } 11260a625fd2SDavid S. Miller 11270a625fd2SDavid S. Miller static int n2_decrypt_chaining(struct ablkcipher_request *req) 11280a625fd2SDavid S. Miller { 11290a625fd2SDavid S. Miller return n2_do_chaining(req, false); 11300a625fd2SDavid S. Miller } 11310a625fd2SDavid S. Miller 11320a625fd2SDavid S. Miller struct n2_cipher_tmpl { 11330a625fd2SDavid S. Miller const char *name; 11340a625fd2SDavid S. Miller const char *drv_name; 11350a625fd2SDavid S. Miller u8 block_size; 11360a625fd2SDavid S. Miller u8 enc_type; 11370a625fd2SDavid S. Miller struct ablkcipher_alg ablkcipher; 11380a625fd2SDavid S. Miller }; 11390a625fd2SDavid S. Miller 11400a625fd2SDavid S. Miller static const struct n2_cipher_tmpl cipher_tmpls[] = { 11410a625fd2SDavid S. Miller /* ARC4: only ECB is supported (chaining bits ignored) */ 11420a625fd2SDavid S. Miller { .name = "ecb(arc4)", 11430a625fd2SDavid S. Miller .drv_name = "ecb-arc4", 11440a625fd2SDavid S. Miller .block_size = 1, 11450a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 11460a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11470a625fd2SDavid S. Miller .ablkcipher = { 11480a625fd2SDavid S. Miller .min_keysize = 1, 11490a625fd2SDavid S. Miller .max_keysize = 256, 11500a625fd2SDavid S. Miller .setkey = n2_arc4_setkey, 11510a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11520a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11530a625fd2SDavid S. Miller }, 11540a625fd2SDavid S. Miller }, 11550a625fd2SDavid S. Miller 11560a625fd2SDavid S. Miller /* DES: ECB CBC and CFB are supported */ 11570a625fd2SDavid S. Miller { .name = "ecb(des)", 11580a625fd2SDavid S. Miller .drv_name = "ecb-des", 11590a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11600a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11610a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 11620a625fd2SDavid S. Miller .ablkcipher = { 11630a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11640a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11650a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11660a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 11670a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 11680a625fd2SDavid S. Miller }, 11690a625fd2SDavid S. Miller }, 11700a625fd2SDavid S. Miller { .name = "cbc(des)", 11710a625fd2SDavid S. Miller .drv_name = "cbc-des", 11720a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11730a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11740a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 11750a625fd2SDavid S. Miller .ablkcipher = { 11760a625fd2SDavid S. Miller .ivsize = DES_BLOCK_SIZE, 11770a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11780a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11790a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11800a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11810a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11820a625fd2SDavid S. Miller }, 11830a625fd2SDavid S. Miller }, 11840a625fd2SDavid S. Miller { .name = "cfb(des)", 11850a625fd2SDavid S. Miller .drv_name = "cfb-des", 11860a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 11870a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_DES | 11880a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CFB), 11890a625fd2SDavid S. Miller .ablkcipher = { 11900a625fd2SDavid S. Miller .min_keysize = DES_KEY_SIZE, 11910a625fd2SDavid S. Miller .max_keysize = DES_KEY_SIZE, 11920a625fd2SDavid S. Miller .setkey = n2_des_setkey, 11930a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 11940a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 11950a625fd2SDavid S. Miller }, 11960a625fd2SDavid S. Miller }, 11970a625fd2SDavid S. Miller 11980a625fd2SDavid S. Miller /* 3DES: ECB CBC and CFB are supported */ 11990a625fd2SDavid S. Miller { .name = "ecb(des3_ede)", 12000a625fd2SDavid S. Miller .drv_name = "ecb-3des", 12010a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 12020a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 12030a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 12040a625fd2SDavid S. Miller .ablkcipher = { 12050a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 12060a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 12070a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 12080a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 12090a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 12100a625fd2SDavid S. Miller }, 12110a625fd2SDavid S. Miller }, 12120a625fd2SDavid S. Miller { .name = "cbc(des3_ede)", 12130a625fd2SDavid S. Miller .drv_name = "cbc-3des", 12140a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 12150a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 12160a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 12170a625fd2SDavid S. Miller .ablkcipher = { 12180a625fd2SDavid S. Miller .ivsize = DES_BLOCK_SIZE, 12190a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 12200a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 12210a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 12220a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12230a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 12240a625fd2SDavid S. Miller }, 12250a625fd2SDavid S. Miller }, 12260a625fd2SDavid S. Miller { .name = "cfb(des3_ede)", 12270a625fd2SDavid S. Miller .drv_name = "cfb-3des", 12280a625fd2SDavid S. Miller .block_size = DES_BLOCK_SIZE, 12290a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_3DES | 12300a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CFB), 12310a625fd2SDavid S. Miller .ablkcipher = { 12320a625fd2SDavid S. Miller .min_keysize = 3 * DES_KEY_SIZE, 12330a625fd2SDavid S. Miller .max_keysize = 3 * DES_KEY_SIZE, 12340a625fd2SDavid S. Miller .setkey = n2_3des_setkey, 12350a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12360a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 12370a625fd2SDavid S. Miller }, 12380a625fd2SDavid S. Miller }, 12390a625fd2SDavid S. Miller /* AES: ECB CBC and CTR are supported */ 12400a625fd2SDavid S. Miller { .name = "ecb(aes)", 12410a625fd2SDavid S. Miller .drv_name = "ecb-aes", 12420a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 12430a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 12440a625fd2SDavid S. Miller ENC_TYPE_CHAINING_ECB), 12450a625fd2SDavid S. Miller .ablkcipher = { 12460a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 12470a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 12480a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 12490a625fd2SDavid S. Miller .encrypt = n2_encrypt_ecb, 12500a625fd2SDavid S. Miller .decrypt = n2_decrypt_ecb, 12510a625fd2SDavid S. Miller }, 12520a625fd2SDavid S. Miller }, 12530a625fd2SDavid S. Miller { .name = "cbc(aes)", 12540a625fd2SDavid S. Miller .drv_name = "cbc-aes", 12550a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 12560a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 12570a625fd2SDavid S. Miller ENC_TYPE_CHAINING_CBC), 12580a625fd2SDavid S. Miller .ablkcipher = { 12590a625fd2SDavid S. Miller .ivsize = AES_BLOCK_SIZE, 12600a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 12610a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 12620a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 12630a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12640a625fd2SDavid S. Miller .decrypt = n2_decrypt_chaining, 12650a625fd2SDavid S. Miller }, 12660a625fd2SDavid S. Miller }, 12670a625fd2SDavid S. Miller { .name = "ctr(aes)", 12680a625fd2SDavid S. Miller .drv_name = "ctr-aes", 12690a625fd2SDavid S. Miller .block_size = AES_BLOCK_SIZE, 12700a625fd2SDavid S. Miller .enc_type = (ENC_TYPE_ALG_AES128 | 12710a625fd2SDavid S. Miller ENC_TYPE_CHAINING_COUNTER), 12720a625fd2SDavid S. Miller .ablkcipher = { 12730a625fd2SDavid S. Miller .ivsize = AES_BLOCK_SIZE, 12740a625fd2SDavid S. Miller .min_keysize = AES_MIN_KEY_SIZE, 12750a625fd2SDavid S. Miller .max_keysize = AES_MAX_KEY_SIZE, 12760a625fd2SDavid S. Miller .setkey = n2_aes_setkey, 12770a625fd2SDavid S. Miller .encrypt = n2_encrypt_chaining, 12780a625fd2SDavid S. Miller .decrypt = n2_encrypt_chaining, 12790a625fd2SDavid S. Miller }, 12800a625fd2SDavid S. Miller }, 12810a625fd2SDavid S. Miller 12820a625fd2SDavid S. Miller }; 12830a625fd2SDavid S. Miller #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 12840a625fd2SDavid S. Miller 12850a625fd2SDavid S. Miller static LIST_HEAD(cipher_algs); 12860a625fd2SDavid S. Miller 12870a625fd2SDavid S. Miller struct n2_hash_tmpl { 12880a625fd2SDavid S. Miller const char *name; 12898054b800SLABBE Corentin const u8 *hash_zero; 12903a2c0346SDavid S. Miller const u32 *hash_init; 12913a2c0346SDavid S. Miller u8 hw_op_hashsz; 12920a625fd2SDavid S. Miller u8 digest_size; 12930a625fd2SDavid S. Miller u8 block_size; 12943a2c0346SDavid S. Miller u8 auth_type; 1295dc4ccfd1SDavid S. Miller u8 hmac_type; 12960a625fd2SDavid S. Miller }; 12973a2c0346SDavid S. Miller 12983a2c0346SDavid S. Miller static const u32 md5_init[MD5_HASH_WORDS] = { 1299d0bb9ee3SLABBE Corentin cpu_to_le32(MD5_H0), 1300d0bb9ee3SLABBE Corentin cpu_to_le32(MD5_H1), 1301d0bb9ee3SLABBE Corentin cpu_to_le32(MD5_H2), 1302d0bb9ee3SLABBE Corentin cpu_to_le32(MD5_H3), 13033a2c0346SDavid S. Miller }; 13043a2c0346SDavid S. Miller static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 13053a2c0346SDavid S. Miller SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 13063a2c0346SDavid S. Miller }; 13073a2c0346SDavid S. Miller static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 13083a2c0346SDavid S. Miller SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 13093a2c0346SDavid S. Miller SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 13103a2c0346SDavid S. Miller }; 13113a2c0346SDavid S. Miller static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 13123a2c0346SDavid S. Miller SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 13133a2c0346SDavid S. Miller SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 13143a2c0346SDavid S. Miller }; 13153a2c0346SDavid S. Miller 13160a625fd2SDavid S. Miller static const struct n2_hash_tmpl hash_tmpls[] = { 13170a625fd2SDavid S. Miller { .name = "md5", 13188054b800SLABBE Corentin .hash_zero = md5_zero_message_hash, 13193a2c0346SDavid S. Miller .hash_init = md5_init, 13203a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_MD5, 1321dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_HMAC_MD5, 13223a2c0346SDavid S. Miller .hw_op_hashsz = MD5_DIGEST_SIZE, 13230a625fd2SDavid S. Miller .digest_size = MD5_DIGEST_SIZE, 13240a625fd2SDavid S. Miller .block_size = MD5_HMAC_BLOCK_SIZE }, 13250a625fd2SDavid S. Miller { .name = "sha1", 13268054b800SLABBE Corentin .hash_zero = sha1_zero_message_hash, 13273a2c0346SDavid S. Miller .hash_init = sha1_init, 13283a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_SHA1, 1329dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_HMAC_SHA1, 13303a2c0346SDavid S. Miller .hw_op_hashsz = SHA1_DIGEST_SIZE, 13310a625fd2SDavid S. Miller .digest_size = SHA1_DIGEST_SIZE, 13320a625fd2SDavid S. Miller .block_size = SHA1_BLOCK_SIZE }, 13330a625fd2SDavid S. Miller { .name = "sha256", 13348054b800SLABBE Corentin .hash_zero = sha256_zero_message_hash, 13353a2c0346SDavid S. Miller .hash_init = sha256_init, 13363a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_SHA256, 1337dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_HMAC_SHA256, 13383a2c0346SDavid S. Miller .hw_op_hashsz = SHA256_DIGEST_SIZE, 13390a625fd2SDavid S. Miller .digest_size = SHA256_DIGEST_SIZE, 13400a625fd2SDavid S. Miller .block_size = SHA256_BLOCK_SIZE }, 13410a625fd2SDavid S. Miller { .name = "sha224", 13428054b800SLABBE Corentin .hash_zero = sha224_zero_message_hash, 13433a2c0346SDavid S. Miller .hash_init = sha224_init, 13443a2c0346SDavid S. Miller .auth_type = AUTH_TYPE_SHA256, 1345dc4ccfd1SDavid S. Miller .hmac_type = AUTH_TYPE_RESERVED, 13463a2c0346SDavid S. Miller .hw_op_hashsz = SHA256_DIGEST_SIZE, 13470a625fd2SDavid S. Miller .digest_size = SHA224_DIGEST_SIZE, 13480a625fd2SDavid S. Miller .block_size = SHA224_BLOCK_SIZE }, 13490a625fd2SDavid S. Miller }; 13500a625fd2SDavid S. Miller #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 13510a625fd2SDavid S. Miller 13520a625fd2SDavid S. Miller static LIST_HEAD(ahash_algs); 1353dc4ccfd1SDavid S. Miller static LIST_HEAD(hmac_algs); 13540a625fd2SDavid S. Miller 13550a625fd2SDavid S. Miller static int algs_registered; 13560a625fd2SDavid S. Miller 13570a625fd2SDavid S. Miller static void __n2_unregister_algs(void) 13580a625fd2SDavid S. Miller { 13590a625fd2SDavid S. Miller struct n2_cipher_alg *cipher, *cipher_tmp; 13600a625fd2SDavid S. Miller struct n2_ahash_alg *alg, *alg_tmp; 1361dc4ccfd1SDavid S. Miller struct n2_hmac_alg *hmac, *hmac_tmp; 13620a625fd2SDavid S. Miller 13630a625fd2SDavid S. Miller list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 13640a625fd2SDavid S. Miller crypto_unregister_alg(&cipher->alg); 13650a625fd2SDavid S. Miller list_del(&cipher->entry); 13660a625fd2SDavid S. Miller kfree(cipher); 13670a625fd2SDavid S. Miller } 1368dc4ccfd1SDavid S. Miller list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { 1369dc4ccfd1SDavid S. Miller crypto_unregister_ahash(&hmac->derived.alg); 1370dc4ccfd1SDavid S. Miller list_del(&hmac->derived.entry); 1371dc4ccfd1SDavid S. Miller kfree(hmac); 1372dc4ccfd1SDavid S. Miller } 13730a625fd2SDavid S. Miller list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 13740a625fd2SDavid S. Miller crypto_unregister_ahash(&alg->alg); 13750a625fd2SDavid S. Miller list_del(&alg->entry); 13760a625fd2SDavid S. Miller kfree(alg); 13770a625fd2SDavid S. Miller } 13780a625fd2SDavid S. Miller } 13790a625fd2SDavid S. Miller 13800a625fd2SDavid S. Miller static int n2_cipher_cra_init(struct crypto_tfm *tfm) 13810a625fd2SDavid S. Miller { 13820a625fd2SDavid S. Miller tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 13830a625fd2SDavid S. Miller return 0; 13840a625fd2SDavid S. Miller } 13850a625fd2SDavid S. Miller 138649cfe4dbSGreg Kroah-Hartman static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 13870a625fd2SDavid S. Miller { 13880a625fd2SDavid S. Miller struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 13890a625fd2SDavid S. Miller struct crypto_alg *alg; 13900a625fd2SDavid S. Miller int err; 13910a625fd2SDavid S. Miller 13920a625fd2SDavid S. Miller if (!p) 13930a625fd2SDavid S. Miller return -ENOMEM; 13940a625fd2SDavid S. Miller 13950a625fd2SDavid S. Miller alg = &p->alg; 13960a625fd2SDavid S. Miller 13970a625fd2SDavid S. Miller snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 13980a625fd2SDavid S. Miller snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 13990a625fd2SDavid S. Miller alg->cra_priority = N2_CRA_PRIORITY; 1400d912bb76SNikos Mavrogiannopoulos alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1401d912bb76SNikos Mavrogiannopoulos CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; 14020a625fd2SDavid S. Miller alg->cra_blocksize = tmpl->block_size; 14030a625fd2SDavid S. Miller p->enc_type = tmpl->enc_type; 14040a625fd2SDavid S. Miller alg->cra_ctxsize = sizeof(struct n2_cipher_context); 14050a625fd2SDavid S. Miller alg->cra_type = &crypto_ablkcipher_type; 14060a625fd2SDavid S. Miller alg->cra_u.ablkcipher = tmpl->ablkcipher; 14070a625fd2SDavid S. Miller alg->cra_init = n2_cipher_cra_init; 14080a625fd2SDavid S. Miller alg->cra_module = THIS_MODULE; 14090a625fd2SDavid S. Miller 14100a625fd2SDavid S. Miller list_add(&p->entry, &cipher_algs); 14110a625fd2SDavid S. Miller err = crypto_register_alg(alg); 14120a625fd2SDavid S. Miller if (err) { 141338511108SDavid S. Miller pr_err("%s alg registration failed\n", alg->cra_name); 14140a625fd2SDavid S. Miller list_del(&p->entry); 14150a625fd2SDavid S. Miller kfree(p); 141638511108SDavid S. Miller } else { 141738511108SDavid S. Miller pr_info("%s alg registered\n", alg->cra_name); 14180a625fd2SDavid S. Miller } 14190a625fd2SDavid S. Miller return err; 14200a625fd2SDavid S. Miller } 14210a625fd2SDavid S. Miller 142249cfe4dbSGreg Kroah-Hartman static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) 1423dc4ccfd1SDavid S. Miller { 1424dc4ccfd1SDavid S. Miller struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1425dc4ccfd1SDavid S. Miller struct ahash_alg *ahash; 1426dc4ccfd1SDavid S. Miller struct crypto_alg *base; 1427dc4ccfd1SDavid S. Miller int err; 1428dc4ccfd1SDavid S. Miller 1429dc4ccfd1SDavid S. Miller if (!p) 1430dc4ccfd1SDavid S. Miller return -ENOMEM; 1431dc4ccfd1SDavid S. Miller 1432dc4ccfd1SDavid S. Miller p->child_alg = n2ahash->alg.halg.base.cra_name; 1433dc4ccfd1SDavid S. Miller memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); 1434dc4ccfd1SDavid S. Miller INIT_LIST_HEAD(&p->derived.entry); 1435dc4ccfd1SDavid S. Miller 1436dc4ccfd1SDavid S. Miller ahash = &p->derived.alg; 1437dc4ccfd1SDavid S. Miller ahash->digest = n2_hmac_async_digest; 1438dc4ccfd1SDavid S. Miller ahash->setkey = n2_hmac_async_setkey; 1439dc4ccfd1SDavid S. Miller 1440dc4ccfd1SDavid S. Miller base = &ahash->halg.base; 1441dc4ccfd1SDavid S. Miller snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); 1442dc4ccfd1SDavid S. Miller snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); 1443dc4ccfd1SDavid S. Miller 1444dc4ccfd1SDavid S. Miller base->cra_ctxsize = sizeof(struct n2_hmac_ctx); 1445dc4ccfd1SDavid S. Miller base->cra_init = n2_hmac_cra_init; 1446dc4ccfd1SDavid S. Miller base->cra_exit = n2_hmac_cra_exit; 1447dc4ccfd1SDavid S. Miller 1448dc4ccfd1SDavid S. Miller list_add(&p->derived.entry, &hmac_algs); 1449dc4ccfd1SDavid S. Miller err = crypto_register_ahash(ahash); 1450dc4ccfd1SDavid S. Miller if (err) { 1451dc4ccfd1SDavid S. Miller pr_err("%s alg registration failed\n", base->cra_name); 1452dc4ccfd1SDavid S. Miller list_del(&p->derived.entry); 1453dc4ccfd1SDavid S. Miller kfree(p); 1454dc4ccfd1SDavid S. Miller } else { 1455dc4ccfd1SDavid S. Miller pr_info("%s alg registered\n", base->cra_name); 1456dc4ccfd1SDavid S. Miller } 1457dc4ccfd1SDavid S. Miller return err; 1458dc4ccfd1SDavid S. Miller } 1459dc4ccfd1SDavid S. Miller 146049cfe4dbSGreg Kroah-Hartman static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 14610a625fd2SDavid S. Miller { 14620a625fd2SDavid S. Miller struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 14630a625fd2SDavid S. Miller struct hash_alg_common *halg; 14640a625fd2SDavid S. Miller struct crypto_alg *base; 14650a625fd2SDavid S. Miller struct ahash_alg *ahash; 14660a625fd2SDavid S. Miller int err; 14670a625fd2SDavid S. Miller 14680a625fd2SDavid S. Miller if (!p) 14690a625fd2SDavid S. Miller return -ENOMEM; 14700a625fd2SDavid S. Miller 14713a2c0346SDavid S. Miller p->hash_zero = tmpl->hash_zero; 14723a2c0346SDavid S. Miller p->hash_init = tmpl->hash_init; 14733a2c0346SDavid S. Miller p->auth_type = tmpl->auth_type; 1474dc4ccfd1SDavid S. Miller p->hmac_type = tmpl->hmac_type; 14753a2c0346SDavid S. Miller p->hw_op_hashsz = tmpl->hw_op_hashsz; 14763a2c0346SDavid S. Miller p->digest_size = tmpl->digest_size; 14773a2c0346SDavid S. Miller 14780a625fd2SDavid S. Miller ahash = &p->alg; 14790a625fd2SDavid S. Miller ahash->init = n2_hash_async_init; 14800a625fd2SDavid S. Miller ahash->update = n2_hash_async_update; 14810a625fd2SDavid S. Miller ahash->final = n2_hash_async_final; 14820a625fd2SDavid S. Miller ahash->finup = n2_hash_async_finup; 14833a2c0346SDavid S. Miller ahash->digest = n2_hash_async_digest; 1484378fe6fbSKamil Konieczny ahash->export = n2_hash_async_noexport; 1485378fe6fbSKamil Konieczny ahash->import = n2_hash_async_noimport; 14860a625fd2SDavid S. Miller 14870a625fd2SDavid S. Miller halg = &ahash->halg; 14880a625fd2SDavid S. Miller halg->digestsize = tmpl->digest_size; 14890a625fd2SDavid S. Miller 14900a625fd2SDavid S. Miller base = &halg->base; 14910a625fd2SDavid S. Miller snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 14920a625fd2SDavid S. Miller snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 14930a625fd2SDavid S. Miller base->cra_priority = N2_CRA_PRIORITY; 14946a38f622SEric Biggers base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1495d912bb76SNikos Mavrogiannopoulos CRYPTO_ALG_NEED_FALLBACK; 14960a625fd2SDavid S. Miller base->cra_blocksize = tmpl->block_size; 14970a625fd2SDavid S. Miller base->cra_ctxsize = sizeof(struct n2_hash_ctx); 14980a625fd2SDavid S. Miller base->cra_module = THIS_MODULE; 14990a625fd2SDavid S. Miller base->cra_init = n2_hash_cra_init; 15000a625fd2SDavid S. Miller base->cra_exit = n2_hash_cra_exit; 15010a625fd2SDavid S. Miller 15020a625fd2SDavid S. Miller list_add(&p->entry, &ahash_algs); 15030a625fd2SDavid S. Miller err = crypto_register_ahash(ahash); 15040a625fd2SDavid S. Miller if (err) { 150538511108SDavid S. Miller pr_err("%s alg registration failed\n", base->cra_name); 15060a625fd2SDavid S. Miller list_del(&p->entry); 15070a625fd2SDavid S. Miller kfree(p); 150838511108SDavid S. Miller } else { 150938511108SDavid S. Miller pr_info("%s alg registered\n", base->cra_name); 15100a625fd2SDavid S. Miller } 1511dc4ccfd1SDavid S. Miller if (!err && p->hmac_type != AUTH_TYPE_RESERVED) 1512dc4ccfd1SDavid S. Miller err = __n2_register_one_hmac(p); 15130a625fd2SDavid S. Miller return err; 15140a625fd2SDavid S. Miller } 15150a625fd2SDavid S. Miller 151649cfe4dbSGreg Kroah-Hartman static int n2_register_algs(void) 15170a625fd2SDavid S. Miller { 15180a625fd2SDavid S. Miller int i, err = 0; 15190a625fd2SDavid S. Miller 15200a625fd2SDavid S. Miller mutex_lock(&spu_lock); 15210a625fd2SDavid S. Miller if (algs_registered++) 15220a625fd2SDavid S. Miller goto out; 15230a625fd2SDavid S. Miller 15240a625fd2SDavid S. Miller for (i = 0; i < NUM_HASH_TMPLS; i++) { 15250a625fd2SDavid S. Miller err = __n2_register_one_ahash(&hash_tmpls[i]); 15260a625fd2SDavid S. Miller if (err) { 15270a625fd2SDavid S. Miller __n2_unregister_algs(); 15280a625fd2SDavid S. Miller goto out; 15290a625fd2SDavid S. Miller } 15300a625fd2SDavid S. Miller } 15310a625fd2SDavid S. Miller for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 15320a625fd2SDavid S. Miller err = __n2_register_one_cipher(&cipher_tmpls[i]); 15330a625fd2SDavid S. Miller if (err) { 15340a625fd2SDavid S. Miller __n2_unregister_algs(); 15350a625fd2SDavid S. Miller goto out; 15360a625fd2SDavid S. Miller } 15370a625fd2SDavid S. Miller } 15380a625fd2SDavid S. Miller 15390a625fd2SDavid S. Miller out: 15400a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 15410a625fd2SDavid S. Miller return err; 15420a625fd2SDavid S. Miller } 15430a625fd2SDavid S. Miller 154449cfe4dbSGreg Kroah-Hartman static void n2_unregister_algs(void) 15450a625fd2SDavid S. Miller { 15460a625fd2SDavid S. Miller mutex_lock(&spu_lock); 15470a625fd2SDavid S. Miller if (!--algs_registered) 15480a625fd2SDavid S. Miller __n2_unregister_algs(); 15490a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 15500a625fd2SDavid S. Miller } 15510a625fd2SDavid S. Miller 15520a625fd2SDavid S. Miller /* To map CWQ queues to interrupt sources, the hypervisor API provides 15530a625fd2SDavid S. Miller * a devino. This isn't very useful to us because all of the 15542dc11581SGrant Likely * interrupts listed in the device_node have been translated to 15550a625fd2SDavid S. Miller * Linux virtual IRQ cookie numbers. 15560a625fd2SDavid S. Miller * 15570a625fd2SDavid S. Miller * So we have to back-translate, going through the 'intr' and 'ino' 15580a625fd2SDavid S. Miller * property tables of the n2cp MDESC node, matching it with the OF 15590a625fd2SDavid S. Miller * 'interrupts' property entries, in order to to figure out which 15600a625fd2SDavid S. Miller * devino goes to which already-translated IRQ. 15610a625fd2SDavid S. Miller */ 15622dc11581SGrant Likely static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, 15630a625fd2SDavid S. Miller unsigned long dev_ino) 15640a625fd2SDavid S. Miller { 15650a625fd2SDavid S. Miller const unsigned int *dev_intrs; 15660a625fd2SDavid S. Miller unsigned int intr; 15670a625fd2SDavid S. Miller int i; 15680a625fd2SDavid S. Miller 15690a625fd2SDavid S. Miller for (i = 0; i < ip->num_intrs; i++) { 15700a625fd2SDavid S. Miller if (ip->ino_table[i].ino == dev_ino) 15710a625fd2SDavid S. Miller break; 15720a625fd2SDavid S. Miller } 15730a625fd2SDavid S. Miller if (i == ip->num_intrs) 15740a625fd2SDavid S. Miller return -ENODEV; 15750a625fd2SDavid S. Miller 15760a625fd2SDavid S. Miller intr = ip->ino_table[i].intr; 15770a625fd2SDavid S. Miller 1578ff6c7341SDavid S. Miller dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 15790a625fd2SDavid S. Miller if (!dev_intrs) 15800a625fd2SDavid S. Miller return -ENODEV; 15810a625fd2SDavid S. Miller 158219e4875fSGrant Likely for (i = 0; i < dev->archdata.num_irqs; i++) { 15830a625fd2SDavid S. Miller if (dev_intrs[i] == intr) 15840a625fd2SDavid S. Miller return i; 15850a625fd2SDavid S. Miller } 15860a625fd2SDavid S. Miller 15870a625fd2SDavid S. Miller return -ENODEV; 15880a625fd2SDavid S. Miller } 15890a625fd2SDavid S. Miller 15902dc11581SGrant Likely static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, 15910a625fd2SDavid S. Miller const char *irq_name, struct spu_queue *p, 15920a625fd2SDavid S. Miller irq_handler_t handler) 15930a625fd2SDavid S. Miller { 15940a625fd2SDavid S. Miller unsigned long herr; 15950a625fd2SDavid S. Miller int index; 15960a625fd2SDavid S. Miller 15970a625fd2SDavid S. Miller herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 15980a625fd2SDavid S. Miller if (herr) 15990a625fd2SDavid S. Miller return -EINVAL; 16000a625fd2SDavid S. Miller 16010a625fd2SDavid S. Miller index = find_devino_index(dev, ip, p->devino); 16020a625fd2SDavid S. Miller if (index < 0) 16030a625fd2SDavid S. Miller return index; 16040a625fd2SDavid S. Miller 160519e4875fSGrant Likely p->irq = dev->archdata.irqs[index]; 16060a625fd2SDavid S. Miller 16070a625fd2SDavid S. Miller sprintf(p->irq_name, "%s-%d", irq_name, index); 16080a625fd2SDavid S. Miller 16099751bfd1STheodore Ts'o return request_irq(p->irq, handler, 0, p->irq_name, p); 16100a625fd2SDavid S. Miller } 16110a625fd2SDavid S. Miller 16120a625fd2SDavid S. Miller static struct kmem_cache *queue_cache[2]; 16130a625fd2SDavid S. Miller 16140a625fd2SDavid S. Miller static void *new_queue(unsigned long q_type) 16150a625fd2SDavid S. Miller { 16160a625fd2SDavid S. Miller return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 16170a625fd2SDavid S. Miller } 16180a625fd2SDavid S. Miller 16190a625fd2SDavid S. Miller static void free_queue(void *p, unsigned long q_type) 16200a625fd2SDavid S. Miller { 1621150f6d45SAmitoj Kaur Chawla kmem_cache_free(queue_cache[q_type - 1], p); 16220a625fd2SDavid S. Miller } 16230a625fd2SDavid S. Miller 16240a625fd2SDavid S. Miller static int queue_cache_init(void) 16250a625fd2SDavid S. Miller { 16260a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 16270a625fd2SDavid S. Miller queue_cache[HV_NCS_QTYPE_MAU - 1] = 1628527b9525SDavid S. Miller kmem_cache_create("mau_queue", 16290a625fd2SDavid S. Miller (MAU_NUM_ENTRIES * 16300a625fd2SDavid S. Miller MAU_ENTRY_SIZE), 16310a625fd2SDavid S. Miller MAU_ENTRY_SIZE, 0, NULL); 16320a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 16330a625fd2SDavid S. Miller return -ENOMEM; 16340a625fd2SDavid S. Miller 16350a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 16360a625fd2SDavid S. Miller queue_cache[HV_NCS_QTYPE_CWQ - 1] = 16370a625fd2SDavid S. Miller kmem_cache_create("cwq_queue", 16380a625fd2SDavid S. Miller (CWQ_NUM_ENTRIES * 16390a625fd2SDavid S. Miller CWQ_ENTRY_SIZE), 16400a625fd2SDavid S. Miller CWQ_ENTRY_SIZE, 0, NULL); 16410a625fd2SDavid S. Miller if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 16420a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1643203f4500SJan Engelhardt queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 16440a625fd2SDavid S. Miller return -ENOMEM; 16450a625fd2SDavid S. Miller } 16460a625fd2SDavid S. Miller return 0; 16470a625fd2SDavid S. Miller } 16480a625fd2SDavid S. Miller 16490a625fd2SDavid S. Miller static void queue_cache_destroy(void) 16500a625fd2SDavid S. Miller { 16510a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 16520a625fd2SDavid S. Miller kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1653203f4500SJan Engelhardt queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1654203f4500SJan Engelhardt queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; 16550a625fd2SDavid S. Miller } 16560a625fd2SDavid S. Miller 165773810a06SThomas Gleixner static long spu_queue_register_workfn(void *arg) 16580a625fd2SDavid S. Miller { 165973810a06SThomas Gleixner struct spu_qreg *qr = arg; 166073810a06SThomas Gleixner struct spu_queue *p = qr->queue; 166173810a06SThomas Gleixner unsigned long q_type = qr->type; 16620a625fd2SDavid S. Miller unsigned long hv_ret; 16630a625fd2SDavid S. Miller 16640a625fd2SDavid S. Miller hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 16650a625fd2SDavid S. Miller CWQ_NUM_ENTRIES, &p->qhandle); 16660a625fd2SDavid S. Miller if (!hv_ret) 16670a625fd2SDavid S. Miller sun4v_ncs_sethead_marker(p->qhandle, 0); 16680a625fd2SDavid S. Miller 166973810a06SThomas Gleixner return hv_ret ? -EINVAL : 0; 167073810a06SThomas Gleixner } 16710a625fd2SDavid S. Miller 167273810a06SThomas Gleixner static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 167373810a06SThomas Gleixner { 167473810a06SThomas Gleixner int cpu = cpumask_any_and(&p->sharing, cpu_online_mask); 167573810a06SThomas Gleixner struct spu_qreg qr = { .queue = p, .type = q_type }; 16760a625fd2SDavid S. Miller 167773810a06SThomas Gleixner return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr); 16780a625fd2SDavid S. Miller } 16790a625fd2SDavid S. Miller 16800a625fd2SDavid S. Miller static int spu_queue_setup(struct spu_queue *p) 16810a625fd2SDavid S. Miller { 16820a625fd2SDavid S. Miller int err; 16830a625fd2SDavid S. Miller 16840a625fd2SDavid S. Miller p->q = new_queue(p->q_type); 16850a625fd2SDavid S. Miller if (!p->q) 16860a625fd2SDavid S. Miller return -ENOMEM; 16870a625fd2SDavid S. Miller 16880a625fd2SDavid S. Miller err = spu_queue_register(p, p->q_type); 16890a625fd2SDavid S. Miller if (err) { 16900a625fd2SDavid S. Miller free_queue(p->q, p->q_type); 16910a625fd2SDavid S. Miller p->q = NULL; 16920a625fd2SDavid S. Miller } 16930a625fd2SDavid S. Miller 16940a625fd2SDavid S. Miller return err; 16950a625fd2SDavid S. Miller } 16960a625fd2SDavid S. Miller 16970a625fd2SDavid S. Miller static void spu_queue_destroy(struct spu_queue *p) 16980a625fd2SDavid S. Miller { 16990a625fd2SDavid S. Miller unsigned long hv_ret; 17000a625fd2SDavid S. Miller 17010a625fd2SDavid S. Miller if (!p->q) 17020a625fd2SDavid S. Miller return; 17030a625fd2SDavid S. Miller 17040a625fd2SDavid S. Miller hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 17050a625fd2SDavid S. Miller 17060a625fd2SDavid S. Miller if (!hv_ret) 17070a625fd2SDavid S. Miller free_queue(p->q, p->q_type); 17080a625fd2SDavid S. Miller } 17090a625fd2SDavid S. Miller 17100a625fd2SDavid S. Miller static void spu_list_destroy(struct list_head *list) 17110a625fd2SDavid S. Miller { 17120a625fd2SDavid S. Miller struct spu_queue *p, *n; 17130a625fd2SDavid S. Miller 17140a625fd2SDavid S. Miller list_for_each_entry_safe(p, n, list, list) { 17150a625fd2SDavid S. Miller int i; 17160a625fd2SDavid S. Miller 17170a625fd2SDavid S. Miller for (i = 0; i < NR_CPUS; i++) { 17180a625fd2SDavid S. Miller if (cpu_to_cwq[i] == p) 17190a625fd2SDavid S. Miller cpu_to_cwq[i] = NULL; 17200a625fd2SDavid S. Miller } 17210a625fd2SDavid S. Miller 17220a625fd2SDavid S. Miller if (p->irq) { 17230a625fd2SDavid S. Miller free_irq(p->irq, p); 17240a625fd2SDavid S. Miller p->irq = 0; 17250a625fd2SDavid S. Miller } 17260a625fd2SDavid S. Miller spu_queue_destroy(p); 17270a625fd2SDavid S. Miller list_del(&p->list); 17280a625fd2SDavid S. Miller kfree(p); 17290a625fd2SDavid S. Miller } 17300a625fd2SDavid S. Miller } 17310a625fd2SDavid S. Miller 17320a625fd2SDavid S. Miller /* Walk the backward arcs of a CWQ 'exec-unit' node, 17330a625fd2SDavid S. Miller * gathering cpu membership information. 17340a625fd2SDavid S. Miller */ 17350a625fd2SDavid S. Miller static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 17362dc11581SGrant Likely struct platform_device *dev, 17370a625fd2SDavid S. Miller u64 node, struct spu_queue *p, 17380a625fd2SDavid S. Miller struct spu_queue **table) 17390a625fd2SDavid S. Miller { 17400a625fd2SDavid S. Miller u64 arc; 17410a625fd2SDavid S. Miller 17420a625fd2SDavid S. Miller mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 17430a625fd2SDavid S. Miller u64 tgt = mdesc_arc_target(mdesc, arc); 17440a625fd2SDavid S. Miller const char *name = mdesc_node_name(mdesc, tgt); 17450a625fd2SDavid S. Miller const u64 *id; 17460a625fd2SDavid S. Miller 17470a625fd2SDavid S. Miller if (strcmp(name, "cpu")) 17480a625fd2SDavid S. Miller continue; 17490a625fd2SDavid S. Miller id = mdesc_get_property(mdesc, tgt, "id", NULL); 17500a625fd2SDavid S. Miller if (table[*id] != NULL) { 1751b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", 1752b2e870f2SRob Herring dev->dev.of_node); 17530a625fd2SDavid S. Miller return -EINVAL; 17540a625fd2SDavid S. Miller } 1755f9b531feSRusty Russell cpumask_set_cpu(*id, &p->sharing); 17560a625fd2SDavid S. Miller table[*id] = p; 17570a625fd2SDavid S. Miller } 17580a625fd2SDavid S. Miller return 0; 17590a625fd2SDavid S. Miller } 17600a625fd2SDavid S. Miller 17610a625fd2SDavid S. Miller /* Process an 'exec-unit' MDESC node of type 'cwq'. */ 17620a625fd2SDavid S. Miller static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 17632dc11581SGrant Likely struct platform_device *dev, struct mdesc_handle *mdesc, 17640a625fd2SDavid S. Miller u64 node, const char *iname, unsigned long q_type, 17650a625fd2SDavid S. Miller irq_handler_t handler, struct spu_queue **table) 17660a625fd2SDavid S. Miller { 17670a625fd2SDavid S. Miller struct spu_queue *p; 17680a625fd2SDavid S. Miller int err; 17690a625fd2SDavid S. Miller 17700a625fd2SDavid S. Miller p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 17710a625fd2SDavid S. Miller if (!p) { 1772b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", 1773b2e870f2SRob Herring dev->dev.of_node); 17740a625fd2SDavid S. Miller return -ENOMEM; 17750a625fd2SDavid S. Miller } 17760a625fd2SDavid S. Miller 1777f9b531feSRusty Russell cpumask_clear(&p->sharing); 17780a625fd2SDavid S. Miller spin_lock_init(&p->lock); 17790a625fd2SDavid S. Miller p->q_type = q_type; 17800a625fd2SDavid S. Miller INIT_LIST_HEAD(&p->jobs); 17810a625fd2SDavid S. Miller list_add(&p->list, list); 17820a625fd2SDavid S. Miller 17830a625fd2SDavid S. Miller err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 17840a625fd2SDavid S. Miller if (err) 17850a625fd2SDavid S. Miller return err; 17860a625fd2SDavid S. Miller 17870a625fd2SDavid S. Miller err = spu_queue_setup(p); 17880a625fd2SDavid S. Miller if (err) 17890a625fd2SDavid S. Miller return err; 17900a625fd2SDavid S. Miller 17910a625fd2SDavid S. Miller return spu_map_ino(dev, ip, iname, p, handler); 17920a625fd2SDavid S. Miller } 17930a625fd2SDavid S. Miller 17942dc11581SGrant Likely static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, 17950a625fd2SDavid S. Miller struct spu_mdesc_info *ip, struct list_head *list, 17960a625fd2SDavid S. Miller const char *exec_name, unsigned long q_type, 17970a625fd2SDavid S. Miller irq_handler_t handler, struct spu_queue **table) 17980a625fd2SDavid S. Miller { 17990a625fd2SDavid S. Miller int err = 0; 18000a625fd2SDavid S. Miller u64 node; 18010a625fd2SDavid S. Miller 18020a625fd2SDavid S. Miller mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 18030a625fd2SDavid S. Miller const char *type; 18040a625fd2SDavid S. Miller 18050a625fd2SDavid S. Miller type = mdesc_get_property(mdesc, node, "type", NULL); 18060a625fd2SDavid S. Miller if (!type || strcmp(type, exec_name)) 18070a625fd2SDavid S. Miller continue; 18080a625fd2SDavid S. Miller 18090a625fd2SDavid S. Miller err = handle_exec_unit(ip, list, dev, mdesc, node, 18100a625fd2SDavid S. Miller exec_name, q_type, handler, table); 18110a625fd2SDavid S. Miller if (err) { 18120a625fd2SDavid S. Miller spu_list_destroy(list); 18130a625fd2SDavid S. Miller break; 18140a625fd2SDavid S. Miller } 18150a625fd2SDavid S. Miller } 18160a625fd2SDavid S. Miller 18170a625fd2SDavid S. Miller return err; 18180a625fd2SDavid S. Miller } 18190a625fd2SDavid S. Miller 182049cfe4dbSGreg Kroah-Hartman static int get_irq_props(struct mdesc_handle *mdesc, u64 node, 18210a625fd2SDavid S. Miller struct spu_mdesc_info *ip) 18220a625fd2SDavid S. Miller { 1823eb7caf35SDavid S. Miller const u64 *ino; 1824eb7caf35SDavid S. Miller int ino_len; 18250a625fd2SDavid S. Miller int i; 18260a625fd2SDavid S. Miller 18270a625fd2SDavid S. Miller ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1828eb7caf35SDavid S. Miller if (!ino) { 1829eb7caf35SDavid S. Miller printk("NO 'ino'\n"); 18300a625fd2SDavid S. Miller return -ENODEV; 1831eb7caf35SDavid S. Miller } 18320a625fd2SDavid S. Miller 1833eb7caf35SDavid S. Miller ip->num_intrs = ino_len / sizeof(u64); 18340a625fd2SDavid S. Miller ip->ino_table = kzalloc((sizeof(struct ino_blob) * 18350a625fd2SDavid S. Miller ip->num_intrs), 18360a625fd2SDavid S. Miller GFP_KERNEL); 18370a625fd2SDavid S. Miller if (!ip->ino_table) 18380a625fd2SDavid S. Miller return -ENOMEM; 18390a625fd2SDavid S. Miller 18400a625fd2SDavid S. Miller for (i = 0; i < ip->num_intrs; i++) { 18410a625fd2SDavid S. Miller struct ino_blob *b = &ip->ino_table[i]; 1842eb7caf35SDavid S. Miller b->intr = i + 1; 18430a625fd2SDavid S. Miller b->ino = ino[i]; 18440a625fd2SDavid S. Miller } 18450a625fd2SDavid S. Miller 18460a625fd2SDavid S. Miller return 0; 18470a625fd2SDavid S. Miller } 18480a625fd2SDavid S. Miller 184949cfe4dbSGreg Kroah-Hartman static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, 18502dc11581SGrant Likely struct platform_device *dev, 18510a625fd2SDavid S. Miller struct spu_mdesc_info *ip, 18520a625fd2SDavid S. Miller const char *node_name) 18530a625fd2SDavid S. Miller { 18540a625fd2SDavid S. Miller const unsigned int *reg; 18550a625fd2SDavid S. Miller u64 node; 18560a625fd2SDavid S. Miller 1857ff6c7341SDavid S. Miller reg = of_get_property(dev->dev.of_node, "reg", NULL); 18580a625fd2SDavid S. Miller if (!reg) 18590a625fd2SDavid S. Miller return -ENODEV; 18600a625fd2SDavid S. Miller 18610a625fd2SDavid S. Miller mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 18620a625fd2SDavid S. Miller const char *name; 18630a625fd2SDavid S. Miller const u64 *chdl; 18640a625fd2SDavid S. Miller 18650a625fd2SDavid S. Miller name = mdesc_get_property(mdesc, node, "name", NULL); 18660a625fd2SDavid S. Miller if (!name || strcmp(name, node_name)) 18670a625fd2SDavid S. Miller continue; 18680a625fd2SDavid S. Miller chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 18690a625fd2SDavid S. Miller if (!chdl || (*chdl != *reg)) 18700a625fd2SDavid S. Miller continue; 18710a625fd2SDavid S. Miller ip->cfg_handle = *chdl; 18720a625fd2SDavid S. Miller return get_irq_props(mdesc, node, ip); 18730a625fd2SDavid S. Miller } 18740a625fd2SDavid S. Miller 18750a625fd2SDavid S. Miller return -ENODEV; 18760a625fd2SDavid S. Miller } 18770a625fd2SDavid S. Miller 18780a625fd2SDavid S. Miller static unsigned long n2_spu_hvapi_major; 18790a625fd2SDavid S. Miller static unsigned long n2_spu_hvapi_minor; 18800a625fd2SDavid S. Miller 188149cfe4dbSGreg Kroah-Hartman static int n2_spu_hvapi_register(void) 18820a625fd2SDavid S. Miller { 18830a625fd2SDavid S. Miller int err; 18840a625fd2SDavid S. Miller 18850a625fd2SDavid S. Miller n2_spu_hvapi_major = 2; 18860a625fd2SDavid S. Miller n2_spu_hvapi_minor = 0; 18870a625fd2SDavid S. Miller 18880a625fd2SDavid S. Miller err = sun4v_hvapi_register(HV_GRP_NCS, 18890a625fd2SDavid S. Miller n2_spu_hvapi_major, 18900a625fd2SDavid S. Miller &n2_spu_hvapi_minor); 18910a625fd2SDavid S. Miller 18920a625fd2SDavid S. Miller if (!err) 18930a625fd2SDavid S. Miller pr_info("Registered NCS HVAPI version %lu.%lu\n", 18940a625fd2SDavid S. Miller n2_spu_hvapi_major, 18950a625fd2SDavid S. Miller n2_spu_hvapi_minor); 18960a625fd2SDavid S. Miller 18970a625fd2SDavid S. Miller return err; 18980a625fd2SDavid S. Miller } 18990a625fd2SDavid S. Miller 19000a625fd2SDavid S. Miller static void n2_spu_hvapi_unregister(void) 19010a625fd2SDavid S. Miller { 19020a625fd2SDavid S. Miller sun4v_hvapi_unregister(HV_GRP_NCS); 19030a625fd2SDavid S. Miller } 19040a625fd2SDavid S. Miller 19050a625fd2SDavid S. Miller static int global_ref; 19060a625fd2SDavid S. Miller 190749cfe4dbSGreg Kroah-Hartman static int grab_global_resources(void) 19080a625fd2SDavid S. Miller { 19090a625fd2SDavid S. Miller int err = 0; 19100a625fd2SDavid S. Miller 19110a625fd2SDavid S. Miller mutex_lock(&spu_lock); 19120a625fd2SDavid S. Miller 19130a625fd2SDavid S. Miller if (global_ref++) 19140a625fd2SDavid S. Miller goto out; 19150a625fd2SDavid S. Miller 19160a625fd2SDavid S. Miller err = n2_spu_hvapi_register(); 19170a625fd2SDavid S. Miller if (err) 19180a625fd2SDavid S. Miller goto out; 19190a625fd2SDavid S. Miller 19200a625fd2SDavid S. Miller err = queue_cache_init(); 19210a625fd2SDavid S. Miller if (err) 19220a625fd2SDavid S. Miller goto out_hvapi_release; 19230a625fd2SDavid S. Miller 19240a625fd2SDavid S. Miller err = -ENOMEM; 19256396bb22SKees Cook cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *), 19260a625fd2SDavid S. Miller GFP_KERNEL); 19270a625fd2SDavid S. Miller if (!cpu_to_cwq) 19280a625fd2SDavid S. Miller goto out_queue_cache_destroy; 19290a625fd2SDavid S. Miller 19306396bb22SKees Cook cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *), 19310a625fd2SDavid S. Miller GFP_KERNEL); 19320a625fd2SDavid S. Miller if (!cpu_to_mau) 19330a625fd2SDavid S. Miller goto out_free_cwq_table; 19340a625fd2SDavid S. Miller 19350a625fd2SDavid S. Miller err = 0; 19360a625fd2SDavid S. Miller 19370a625fd2SDavid S. Miller out: 19380a625fd2SDavid S. Miller if (err) 19390a625fd2SDavid S. Miller global_ref--; 19400a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 19410a625fd2SDavid S. Miller return err; 19420a625fd2SDavid S. Miller 19430a625fd2SDavid S. Miller out_free_cwq_table: 19440a625fd2SDavid S. Miller kfree(cpu_to_cwq); 19450a625fd2SDavid S. Miller cpu_to_cwq = NULL; 19460a625fd2SDavid S. Miller 19470a625fd2SDavid S. Miller out_queue_cache_destroy: 19480a625fd2SDavid S. Miller queue_cache_destroy(); 19490a625fd2SDavid S. Miller 19500a625fd2SDavid S. Miller out_hvapi_release: 19510a625fd2SDavid S. Miller n2_spu_hvapi_unregister(); 19520a625fd2SDavid S. Miller goto out; 19530a625fd2SDavid S. Miller } 19540a625fd2SDavid S. Miller 19550a625fd2SDavid S. Miller static void release_global_resources(void) 19560a625fd2SDavid S. Miller { 19570a625fd2SDavid S. Miller mutex_lock(&spu_lock); 19580a625fd2SDavid S. Miller if (!--global_ref) { 19590a625fd2SDavid S. Miller kfree(cpu_to_cwq); 19600a625fd2SDavid S. Miller cpu_to_cwq = NULL; 19610a625fd2SDavid S. Miller 19620a625fd2SDavid S. Miller kfree(cpu_to_mau); 19630a625fd2SDavid S. Miller cpu_to_mau = NULL; 19640a625fd2SDavid S. Miller 19650a625fd2SDavid S. Miller queue_cache_destroy(); 19660a625fd2SDavid S. Miller n2_spu_hvapi_unregister(); 19670a625fd2SDavid S. Miller } 19680a625fd2SDavid S. Miller mutex_unlock(&spu_lock); 19690a625fd2SDavid S. Miller } 19700a625fd2SDavid S. Miller 197149cfe4dbSGreg Kroah-Hartman static struct n2_crypto *alloc_n2cp(void) 19720a625fd2SDavid S. Miller { 19730a625fd2SDavid S. Miller struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 19740a625fd2SDavid S. Miller 19750a625fd2SDavid S. Miller if (np) 19760a625fd2SDavid S. Miller INIT_LIST_HEAD(&np->cwq_list); 19770a625fd2SDavid S. Miller 19780a625fd2SDavid S. Miller return np; 19790a625fd2SDavid S. Miller } 19800a625fd2SDavid S. Miller 19810a625fd2SDavid S. Miller static void free_n2cp(struct n2_crypto *np) 19820a625fd2SDavid S. Miller { 19830a625fd2SDavid S. Miller kfree(np->cwq_info.ino_table); 19840a625fd2SDavid S. Miller np->cwq_info.ino_table = NULL; 19850a625fd2SDavid S. Miller 19860a625fd2SDavid S. Miller kfree(np); 19870a625fd2SDavid S. Miller } 19880a625fd2SDavid S. Miller 198949cfe4dbSGreg Kroah-Hartman static void n2_spu_driver_version(void) 19900a625fd2SDavid S. Miller { 19910a625fd2SDavid S. Miller static int n2_spu_version_printed; 19920a625fd2SDavid S. Miller 19930a625fd2SDavid S. Miller if (n2_spu_version_printed++ == 0) 19940a625fd2SDavid S. Miller pr_info("%s", version); 19950a625fd2SDavid S. Miller } 19960a625fd2SDavid S. Miller 199749cfe4dbSGreg Kroah-Hartman static int n2_crypto_probe(struct platform_device *dev) 19980a625fd2SDavid S. Miller { 19990a625fd2SDavid S. Miller struct mdesc_handle *mdesc; 20000a625fd2SDavid S. Miller struct n2_crypto *np; 20010a625fd2SDavid S. Miller int err; 20020a625fd2SDavid S. Miller 20030a625fd2SDavid S. Miller n2_spu_driver_version(); 20040a625fd2SDavid S. Miller 2005b2e870f2SRob Herring pr_info("Found N2CP at %pOF\n", dev->dev.of_node); 20060a625fd2SDavid S. Miller 20070a625fd2SDavid S. Miller np = alloc_n2cp(); 20080a625fd2SDavid S. Miller if (!np) { 2009b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", 2010b2e870f2SRob Herring dev->dev.of_node); 20110a625fd2SDavid S. Miller return -ENOMEM; 20120a625fd2SDavid S. Miller } 20130a625fd2SDavid S. Miller 20140a625fd2SDavid S. Miller err = grab_global_resources(); 20150a625fd2SDavid S. Miller if (err) { 2016b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2017b2e870f2SRob Herring dev->dev.of_node); 20180a625fd2SDavid S. Miller goto out_free_n2cp; 20190a625fd2SDavid S. Miller } 20200a625fd2SDavid S. Miller 20210a625fd2SDavid S. Miller mdesc = mdesc_grab(); 20220a625fd2SDavid S. Miller 20230a625fd2SDavid S. Miller if (!mdesc) { 2024b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2025b2e870f2SRob Herring dev->dev.of_node); 20260a625fd2SDavid S. Miller err = -ENODEV; 20270a625fd2SDavid S. Miller goto out_free_global; 20280a625fd2SDavid S. Miller } 20290a625fd2SDavid S. Miller err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 20300a625fd2SDavid S. Miller if (err) { 2031b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2032b2e870f2SRob Herring dev->dev.of_node); 20330a625fd2SDavid S. Miller mdesc_release(mdesc); 20340a625fd2SDavid S. Miller goto out_free_global; 20350a625fd2SDavid S. Miller } 20360a625fd2SDavid S. Miller 20370a625fd2SDavid S. Miller err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 20380a625fd2SDavid S. Miller "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 20390a625fd2SDavid S. Miller cpu_to_cwq); 20400a625fd2SDavid S. Miller mdesc_release(mdesc); 20410a625fd2SDavid S. Miller 20420a625fd2SDavid S. Miller if (err) { 2043b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", 2044b2e870f2SRob Herring dev->dev.of_node); 20450a625fd2SDavid S. Miller goto out_free_global; 20460a625fd2SDavid S. Miller } 20470a625fd2SDavid S. Miller 20480a625fd2SDavid S. Miller err = n2_register_algs(); 20490a625fd2SDavid S. Miller if (err) { 2050b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", 2051b2e870f2SRob Herring dev->dev.of_node); 20520a625fd2SDavid S. Miller goto out_free_spu_list; 20530a625fd2SDavid S. Miller } 20540a625fd2SDavid S. Miller 20550a625fd2SDavid S. Miller dev_set_drvdata(&dev->dev, np); 20560a625fd2SDavid S. Miller 20570a625fd2SDavid S. Miller return 0; 20580a625fd2SDavid S. Miller 20590a625fd2SDavid S. Miller out_free_spu_list: 20600a625fd2SDavid S. Miller spu_list_destroy(&np->cwq_list); 20610a625fd2SDavid S. Miller 20620a625fd2SDavid S. Miller out_free_global: 20630a625fd2SDavid S. Miller release_global_resources(); 20640a625fd2SDavid S. Miller 20650a625fd2SDavid S. Miller out_free_n2cp: 20660a625fd2SDavid S. Miller free_n2cp(np); 20670a625fd2SDavid S. Miller 20680a625fd2SDavid S. Miller return err; 20690a625fd2SDavid S. Miller } 20700a625fd2SDavid S. Miller 207149cfe4dbSGreg Kroah-Hartman static int n2_crypto_remove(struct platform_device *dev) 20720a625fd2SDavid S. Miller { 20730a625fd2SDavid S. Miller struct n2_crypto *np = dev_get_drvdata(&dev->dev); 20740a625fd2SDavid S. Miller 20750a625fd2SDavid S. Miller n2_unregister_algs(); 20760a625fd2SDavid S. Miller 20770a625fd2SDavid S. Miller spu_list_destroy(&np->cwq_list); 20780a625fd2SDavid S. Miller 20790a625fd2SDavid S. Miller release_global_resources(); 20800a625fd2SDavid S. Miller 20810a625fd2SDavid S. Miller free_n2cp(np); 20820a625fd2SDavid S. Miller 20830a625fd2SDavid S. Miller return 0; 20840a625fd2SDavid S. Miller } 20850a625fd2SDavid S. Miller 208649cfe4dbSGreg Kroah-Hartman static struct n2_mau *alloc_ncp(void) 20870a625fd2SDavid S. Miller { 20880a625fd2SDavid S. Miller struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 20890a625fd2SDavid S. Miller 20900a625fd2SDavid S. Miller if (mp) 20910a625fd2SDavid S. Miller INIT_LIST_HEAD(&mp->mau_list); 20920a625fd2SDavid S. Miller 20930a625fd2SDavid S. Miller return mp; 20940a625fd2SDavid S. Miller } 20950a625fd2SDavid S. Miller 20960a625fd2SDavid S. Miller static void free_ncp(struct n2_mau *mp) 20970a625fd2SDavid S. Miller { 20980a625fd2SDavid S. Miller kfree(mp->mau_info.ino_table); 20990a625fd2SDavid S. Miller mp->mau_info.ino_table = NULL; 21000a625fd2SDavid S. Miller 21010a625fd2SDavid S. Miller kfree(mp); 21020a625fd2SDavid S. Miller } 21030a625fd2SDavid S. Miller 210449cfe4dbSGreg Kroah-Hartman static int n2_mau_probe(struct platform_device *dev) 21050a625fd2SDavid S. Miller { 21060a625fd2SDavid S. Miller struct mdesc_handle *mdesc; 21070a625fd2SDavid S. Miller struct n2_mau *mp; 21080a625fd2SDavid S. Miller int err; 21090a625fd2SDavid S. Miller 21100a625fd2SDavid S. Miller n2_spu_driver_version(); 21110a625fd2SDavid S. Miller 2112b2e870f2SRob Herring pr_info("Found NCP at %pOF\n", dev->dev.of_node); 21130a625fd2SDavid S. Miller 21140a625fd2SDavid S. Miller mp = alloc_ncp(); 21150a625fd2SDavid S. Miller if (!mp) { 2116b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", 2117b2e870f2SRob Herring dev->dev.of_node); 21180a625fd2SDavid S. Miller return -ENOMEM; 21190a625fd2SDavid S. Miller } 21200a625fd2SDavid S. Miller 21210a625fd2SDavid S. Miller err = grab_global_resources(); 21220a625fd2SDavid S. Miller if (err) { 2123b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2124b2e870f2SRob Herring dev->dev.of_node); 21250a625fd2SDavid S. Miller goto out_free_ncp; 21260a625fd2SDavid S. Miller } 21270a625fd2SDavid S. Miller 21280a625fd2SDavid S. Miller mdesc = mdesc_grab(); 21290a625fd2SDavid S. Miller 21300a625fd2SDavid S. Miller if (!mdesc) { 2131b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2132b2e870f2SRob Herring dev->dev.of_node); 21330a625fd2SDavid S. Miller err = -ENODEV; 21340a625fd2SDavid S. Miller goto out_free_global; 21350a625fd2SDavid S. Miller } 21360a625fd2SDavid S. Miller 21370a625fd2SDavid S. Miller err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 21380a625fd2SDavid S. Miller if (err) { 2139b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2140b2e870f2SRob Herring dev->dev.of_node); 21410a625fd2SDavid S. Miller mdesc_release(mdesc); 21420a625fd2SDavid S. Miller goto out_free_global; 21430a625fd2SDavid S. Miller } 21440a625fd2SDavid S. Miller 21450a625fd2SDavid S. Miller err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 21460a625fd2SDavid S. Miller "mau", HV_NCS_QTYPE_MAU, mau_intr, 21470a625fd2SDavid S. Miller cpu_to_mau); 21480a625fd2SDavid S. Miller mdesc_release(mdesc); 21490a625fd2SDavid S. Miller 21500a625fd2SDavid S. Miller if (err) { 2151b2e870f2SRob Herring dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", 2152b2e870f2SRob Herring dev->dev.of_node); 21530a625fd2SDavid S. Miller goto out_free_global; 21540a625fd2SDavid S. Miller } 21550a625fd2SDavid S. Miller 21560a625fd2SDavid S. Miller dev_set_drvdata(&dev->dev, mp); 21570a625fd2SDavid S. Miller 21580a625fd2SDavid S. Miller return 0; 21590a625fd2SDavid S. Miller 21600a625fd2SDavid S. Miller out_free_global: 21610a625fd2SDavid S. Miller release_global_resources(); 21620a625fd2SDavid S. Miller 21630a625fd2SDavid S. Miller out_free_ncp: 21640a625fd2SDavid S. Miller free_ncp(mp); 21650a625fd2SDavid S. Miller 21660a625fd2SDavid S. Miller return err; 21670a625fd2SDavid S. Miller } 21680a625fd2SDavid S. Miller 216949cfe4dbSGreg Kroah-Hartman static int n2_mau_remove(struct platform_device *dev) 21700a625fd2SDavid S. Miller { 21710a625fd2SDavid S. Miller struct n2_mau *mp = dev_get_drvdata(&dev->dev); 21720a625fd2SDavid S. Miller 21730a625fd2SDavid S. Miller spu_list_destroy(&mp->mau_list); 21740a625fd2SDavid S. Miller 21750a625fd2SDavid S. Miller release_global_resources(); 21760a625fd2SDavid S. Miller 21770a625fd2SDavid S. Miller free_ncp(mp); 21780a625fd2SDavid S. Miller 21790a625fd2SDavid S. Miller return 0; 21800a625fd2SDavid S. Miller } 21810a625fd2SDavid S. Miller 21824914b90bSArvind Yadav static const struct of_device_id n2_crypto_match[] = { 21830a625fd2SDavid S. Miller { 21840a625fd2SDavid S. Miller .name = "n2cp", 21850a625fd2SDavid S. Miller .compatible = "SUNW,n2-cwq", 21860a625fd2SDavid S. Miller }, 21870a625fd2SDavid S. Miller { 21880a625fd2SDavid S. Miller .name = "n2cp", 21890a625fd2SDavid S. Miller .compatible = "SUNW,vf-cwq", 21900a625fd2SDavid S. Miller }, 2191eb7caf35SDavid S. Miller { 2192eb7caf35SDavid S. Miller .name = "n2cp", 2193eb7caf35SDavid S. Miller .compatible = "SUNW,kt-cwq", 2194eb7caf35SDavid S. Miller }, 21950a625fd2SDavid S. Miller {}, 21960a625fd2SDavid S. Miller }; 21970a625fd2SDavid S. Miller 21980a625fd2SDavid S. Miller MODULE_DEVICE_TABLE(of, n2_crypto_match); 21990a625fd2SDavid S. Miller 22004ebb24f7SGrant Likely static struct platform_driver n2_crypto_driver = { 2201ff6c7341SDavid S. Miller .driver = { 22020a625fd2SDavid S. Miller .name = "n2cp", 2203ff6c7341SDavid S. Miller .of_match_table = n2_crypto_match, 2204ff6c7341SDavid S. Miller }, 22050a625fd2SDavid S. Miller .probe = n2_crypto_probe, 220649cfe4dbSGreg Kroah-Hartman .remove = n2_crypto_remove, 22070a625fd2SDavid S. Miller }; 22080a625fd2SDavid S. Miller 22094914b90bSArvind Yadav static const struct of_device_id n2_mau_match[] = { 22100a625fd2SDavid S. Miller { 22110a625fd2SDavid S. Miller .name = "ncp", 22120a625fd2SDavid S. Miller .compatible = "SUNW,n2-mau", 22130a625fd2SDavid S. Miller }, 22140a625fd2SDavid S. Miller { 22150a625fd2SDavid S. Miller .name = "ncp", 22160a625fd2SDavid S. Miller .compatible = "SUNW,vf-mau", 22170a625fd2SDavid S. Miller }, 2218eb7caf35SDavid S. Miller { 2219eb7caf35SDavid S. Miller .name = "ncp", 2220eb7caf35SDavid S. Miller .compatible = "SUNW,kt-mau", 2221eb7caf35SDavid S. Miller }, 22220a625fd2SDavid S. Miller {}, 22230a625fd2SDavid S. Miller }; 22240a625fd2SDavid S. Miller 22250a625fd2SDavid S. Miller MODULE_DEVICE_TABLE(of, n2_mau_match); 22260a625fd2SDavid S. Miller 22274ebb24f7SGrant Likely static struct platform_driver n2_mau_driver = { 2228ff6c7341SDavid S. Miller .driver = { 22290a625fd2SDavid S. Miller .name = "ncp", 2230ff6c7341SDavid S. Miller .of_match_table = n2_mau_match, 2231ff6c7341SDavid S. Miller }, 22320a625fd2SDavid S. Miller .probe = n2_mau_probe, 223349cfe4dbSGreg Kroah-Hartman .remove = n2_mau_remove, 22340a625fd2SDavid S. Miller }; 22350a625fd2SDavid S. Miller 2236a103a75aSThierry Reding static struct platform_driver * const drivers[] = { 2237a103a75aSThierry Reding &n2_crypto_driver, 2238a103a75aSThierry Reding &n2_mau_driver, 2239a103a75aSThierry Reding }; 2240a103a75aSThierry Reding 22410a625fd2SDavid S. Miller static int __init n2_init(void) 22420a625fd2SDavid S. Miller { 2243a103a75aSThierry Reding return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 22440a625fd2SDavid S. Miller } 22450a625fd2SDavid S. Miller 22460a625fd2SDavid S. Miller static void __exit n2_exit(void) 22470a625fd2SDavid S. Miller { 2248a103a75aSThierry Reding platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 22490a625fd2SDavid S. Miller } 22500a625fd2SDavid S. Miller 22510a625fd2SDavid S. Miller module_init(n2_init); 22520a625fd2SDavid S. Miller module_exit(n2_exit); 2253