1324429d7SHariprasad Shenai /** 2324429d7SHariprasad Shenai * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3324429d7SHariprasad Shenai * 4324429d7SHariprasad Shenai * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5324429d7SHariprasad Shenai * 6324429d7SHariprasad Shenai * This program is free software; you can redistribute it and/or modify 7324429d7SHariprasad Shenai * it under the terms of the GNU General Public License as published by 8324429d7SHariprasad Shenai * the Free Software Foundation. 9324429d7SHariprasad Shenai * 10324429d7SHariprasad Shenai * Written and Maintained by: 11324429d7SHariprasad Shenai * Manoj Malviya (manojmalviya@chelsio.com) 12324429d7SHariprasad Shenai * Atul Gupta (atul.gupta@chelsio.com) 13324429d7SHariprasad Shenai * Jitendra Lulla (jlulla@chelsio.com) 14324429d7SHariprasad Shenai * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15324429d7SHariprasad Shenai * Harsh Jain (harsh@chelsio.com) 16324429d7SHariprasad Shenai */ 17324429d7SHariprasad Shenai 18324429d7SHariprasad Shenai #include <linux/kernel.h> 19324429d7SHariprasad Shenai #include <linux/module.h> 20324429d7SHariprasad Shenai #include <linux/skbuff.h> 21324429d7SHariprasad Shenai 22324429d7SHariprasad Shenai #include <crypto/aes.h> 23324429d7SHariprasad Shenai #include <crypto/hash.h> 24324429d7SHariprasad Shenai 25324429d7SHariprasad Shenai #include "t4_msg.h" 26324429d7SHariprasad Shenai #include "chcr_core.h" 27324429d7SHariprasad Shenai #include "cxgb4_uld.h" 28324429d7SHariprasad Shenai 29fef4912bSHarsh Jain static struct chcr_driver_data drv_data; 30324429d7SHariprasad Shenai 31324429d7SHariprasad Shenai typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); 32324429d7SHariprasad Shenai static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); 33324429d7SHariprasad Shenai static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 34324429d7SHariprasad Shenai static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 35324429d7SHariprasad Shenai 36324429d7SHariprasad Shenai static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 37324429d7SHariprasad Shenai [CPL_FW6_PLD] = cpl_fw6_pld_handler, 38324429d7SHariprasad Shenai }; 39324429d7SHariprasad Shenai 400fbc81b3SHariprasad Shenai static struct cxgb4_uld_info chcr_uld_info = { 41324429d7SHariprasad Shenai .name = DRV_MODULE_NAME, 420fbc81b3SHariprasad Shenai .nrxq = MAX_ULD_QSETS, 43a1c6fd43SHarsh Jain /* Max ntxq will be derived from fw config file*/ 44324429d7SHariprasad Shenai .rxq_size = 1024, 45324429d7SHariprasad Shenai .add = chcr_uld_add, 46324429d7SHariprasad Shenai .state_change = chcr_uld_state_change, 47324429d7SHariprasad Shenai .rx_handler = chcr_uld_rx_handler, 486dad4e8aSAtul Gupta #ifdef CONFIG_CHELSIO_IPSEC_INLINE 496dad4e8aSAtul Gupta .tx_handler = chcr_uld_tx_handler, 506dad4e8aSAtul Gupta #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 51324429d7SHariprasad Shenai }; 52324429d7SHariprasad Shenai 53fef4912bSHarsh Jain static void detach_work_fn(struct work_struct *work) 54fef4912bSHarsh Jain { 55fef4912bSHarsh Jain struct chcr_dev *dev; 56fef4912bSHarsh Jain 57fef4912bSHarsh Jain dev = container_of(work, struct chcr_dev, detach_work.work); 58fef4912bSHarsh Jain 59fef4912bSHarsh Jain if (atomic_read(&dev->inflight)) { 60fef4912bSHarsh Jain dev->wqretry--; 61fef4912bSHarsh Jain if (dev->wqretry) { 62fef4912bSHarsh Jain pr_debug("Request Inflight Count %d\n", 63fef4912bSHarsh Jain atomic_read(&dev->inflight)); 64fef4912bSHarsh Jain 65fef4912bSHarsh Jain schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 66fef4912bSHarsh Jain } else { 67fef4912bSHarsh Jain WARN(1, "CHCR:%d request Still Pending\n", 68fef4912bSHarsh Jain atomic_read(&dev->inflight)); 69fef4912bSHarsh Jain complete(&dev->detach_comp); 70fef4912bSHarsh Jain } 71fef4912bSHarsh Jain } else { 72fef4912bSHarsh Jain complete(&dev->detach_comp); 73fef4912bSHarsh Jain } 74fef4912bSHarsh Jain } 75fef4912bSHarsh Jain 7614c19b17SHarsh Jain struct uld_ctx *assign_chcr_device(void) 77324429d7SHariprasad Shenai { 7814c19b17SHarsh Jain struct uld_ctx *u_ctx = NULL; 79324429d7SHariprasad Shenai 80324429d7SHariprasad Shenai /* 8114c19b17SHarsh Jain * When multiple devices are present in system select 8214c19b17SHarsh Jain * device in round-robin fashion for crypto operations 8314c19b17SHarsh Jain * Although One session must use the same device to 8414c19b17SHarsh Jain * maintain request-response ordering. 85324429d7SHariprasad Shenai */ 86fef4912bSHarsh Jain mutex_lock(&drv_data.drv_mutex); 87fef4912bSHarsh Jain if (!list_empty(&drv_data.act_dev)) { 88fef4912bSHarsh Jain u_ctx = drv_data.last_dev; 89fef4912bSHarsh Jain if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 90fef4912bSHarsh Jain drv_data.last_dev = list_first_entry(&drv_data.act_dev, 91fef4912bSHarsh Jain struct uld_ctx, entry); 9214c19b17SHarsh Jain else 93fef4912bSHarsh Jain drv_data.last_dev = 94fef4912bSHarsh Jain list_next_entry(drv_data.last_dev, entry); 95f5f7bebcSHarsh Jain } 96fef4912bSHarsh Jain mutex_unlock(&drv_data.drv_mutex); 9714c19b17SHarsh Jain return u_ctx; 98324429d7SHariprasad Shenai } 99324429d7SHariprasad Shenai 100fef4912bSHarsh Jain static void chcr_dev_add(struct uld_ctx *u_ctx) 101324429d7SHariprasad Shenai { 102324429d7SHariprasad Shenai struct chcr_dev *dev; 103324429d7SHariprasad Shenai 104fef4912bSHarsh Jain dev = &u_ctx->dev; 105fef4912bSHarsh Jain dev->state = CHCR_ATTACH; 106fef4912bSHarsh Jain atomic_set(&dev->inflight, 0); 107fef4912bSHarsh Jain mutex_lock(&drv_data.drv_mutex); 108fef4912bSHarsh Jain list_move(&u_ctx->entry, &drv_data.act_dev); 109fef4912bSHarsh Jain if (!drv_data.last_dev) 110fef4912bSHarsh Jain drv_data.last_dev = u_ctx; 111fef4912bSHarsh Jain mutex_unlock(&drv_data.drv_mutex); 112324429d7SHariprasad Shenai } 113324429d7SHariprasad Shenai 114fef4912bSHarsh Jain static void chcr_dev_init(struct uld_ctx *u_ctx) 115324429d7SHariprasad Shenai { 116fef4912bSHarsh Jain struct chcr_dev *dev; 117fef4912bSHarsh Jain 118fef4912bSHarsh Jain dev = &u_ctx->dev; 119fef4912bSHarsh Jain spin_lock_init(&dev->lock_chcr_dev); 120fef4912bSHarsh Jain INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); 121fef4912bSHarsh Jain init_completion(&dev->detach_comp); 122fef4912bSHarsh Jain dev->state = CHCR_INIT; 123fef4912bSHarsh Jain dev->wqretry = WQ_RETRY; 124fef4912bSHarsh Jain atomic_inc(&drv_data.dev_count); 125fef4912bSHarsh Jain atomic_set(&dev->inflight, 0); 126fef4912bSHarsh Jain mutex_lock(&drv_data.drv_mutex); 127fef4912bSHarsh Jain list_add_tail(&u_ctx->entry, &drv_data.inact_dev); 128fef4912bSHarsh Jain if (!drv_data.last_dev) 129fef4912bSHarsh Jain drv_data.last_dev = u_ctx; 130fef4912bSHarsh Jain mutex_unlock(&drv_data.drv_mutex); 13114c19b17SHarsh Jain } 132fef4912bSHarsh Jain 133fef4912bSHarsh Jain static int chcr_dev_move(struct uld_ctx *u_ctx) 134fef4912bSHarsh Jain { 135fef4912bSHarsh Jain mutex_lock(&drv_data.drv_mutex); 136fef4912bSHarsh Jain if (drv_data.last_dev == u_ctx) { 137fef4912bSHarsh Jain if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 138fef4912bSHarsh Jain drv_data.last_dev = list_first_entry(&drv_data.act_dev, 139fef4912bSHarsh Jain struct uld_ctx, entry); 140fef4912bSHarsh Jain else 141fef4912bSHarsh Jain drv_data.last_dev = 142fef4912bSHarsh Jain list_next_entry(drv_data.last_dev, entry); 143fef4912bSHarsh Jain } 144fef4912bSHarsh Jain list_move(&u_ctx->entry, &drv_data.inact_dev); 145fef4912bSHarsh Jain if (list_empty(&drv_data.act_dev)) 146fef4912bSHarsh Jain drv_data.last_dev = NULL; 147fef4912bSHarsh Jain atomic_dec(&drv_data.dev_count); 148fef4912bSHarsh Jain mutex_unlock(&drv_data.drv_mutex); 149fef4912bSHarsh Jain 150324429d7SHariprasad Shenai return 0; 151324429d7SHariprasad Shenai } 152324429d7SHariprasad Shenai 153324429d7SHariprasad Shenai static int cpl_fw6_pld_handler(struct chcr_dev *dev, 154324429d7SHariprasad Shenai unsigned char *input) 155324429d7SHariprasad Shenai { 156324429d7SHariprasad Shenai struct crypto_async_request *req; 157324429d7SHariprasad Shenai struct cpl_fw6_pld *fw6_pld; 158324429d7SHariprasad Shenai u32 ack_err_status = 0; 159324429d7SHariprasad Shenai int error_status = 0; 160ee0863baSHarsh Jain struct adapter *adap = padap(dev); 161324429d7SHariprasad Shenai 162324429d7SHariprasad Shenai fw6_pld = (struct cpl_fw6_pld *)input; 163324429d7SHariprasad Shenai req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 164324429d7SHariprasad Shenai fw6_pld->data[1]); 165324429d7SHariprasad Shenai 166324429d7SHariprasad Shenai ack_err_status = 167324429d7SHariprasad Shenai ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 168f31ba0f9SHarsh Jain if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) 1692debd332SHarsh Jain error_status = -EBADMSG; 170324429d7SHariprasad Shenai /* call completion callback with failure status */ 171324429d7SHariprasad Shenai if (req) { 1722debd332SHarsh Jain error_status = chcr_handle_resp(req, input, error_status); 173324429d7SHariprasad Shenai } else { 174324429d7SHariprasad Shenai pr_err("Incorrect request address from the firmware\n"); 175324429d7SHariprasad Shenai return -EFAULT; 176324429d7SHariprasad Shenai } 177f31ba0f9SHarsh Jain if (error_status) 178f31ba0f9SHarsh Jain atomic_inc(&adap->chcr_stats.error); 179f31ba0f9SHarsh Jain 180324429d7SHariprasad Shenai return 0; 181324429d7SHariprasad Shenai } 182324429d7SHariprasad Shenai 183324429d7SHariprasad Shenai int chcr_send_wr(struct sk_buff *skb) 184324429d7SHariprasad Shenai { 185ab677ff4SHariprasad Shenai return cxgb4_crypto_send(skb->dev, skb); 186324429d7SHariprasad Shenai } 187324429d7SHariprasad Shenai 188324429d7SHariprasad Shenai static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 189324429d7SHariprasad Shenai { 190324429d7SHariprasad Shenai struct uld_ctx *u_ctx; 191324429d7SHariprasad Shenai 192324429d7SHariprasad Shenai /* Create the device and add it in the device list */ 193396d34f9SHarsh Jain if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) 194396d34f9SHarsh Jain return ERR_PTR(-EOPNOTSUPP); 195396d34f9SHarsh Jain 196396d34f9SHarsh Jain /* Create the device and add it in the device list */ 197324429d7SHariprasad Shenai u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 198324429d7SHariprasad Shenai if (!u_ctx) { 199324429d7SHariprasad Shenai u_ctx = ERR_PTR(-ENOMEM); 200324429d7SHariprasad Shenai goto out; 201324429d7SHariprasad Shenai } 202324429d7SHariprasad Shenai u_ctx->lldi = *lld; 203fef4912bSHarsh Jain chcr_dev_init(u_ctx); 2046dad4e8aSAtul Gupta #ifdef CONFIG_CHELSIO_IPSEC_INLINE 2056dad4e8aSAtul Gupta if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) 2066dad4e8aSAtul Gupta chcr_add_xfrmops(lld); 2076dad4e8aSAtul Gupta #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 208324429d7SHariprasad Shenai out: 209324429d7SHariprasad Shenai return u_ctx; 210324429d7SHariprasad Shenai } 211324429d7SHariprasad Shenai 212324429d7SHariprasad Shenai int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 213324429d7SHariprasad Shenai const struct pkt_gl *pgl) 214324429d7SHariprasad Shenai { 215324429d7SHariprasad Shenai struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 216fef4912bSHarsh Jain struct chcr_dev *dev = &u_ctx->dev; 217d2826056SHarsh Jain const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; 218324429d7SHariprasad Shenai 219d2826056SHarsh Jain if (rpl->opcode != CPL_FW6_PLD) { 220324429d7SHariprasad Shenai pr_err("Unsupported opcode\n"); 221324429d7SHariprasad Shenai return 0; 222324429d7SHariprasad Shenai } 223324429d7SHariprasad Shenai 224324429d7SHariprasad Shenai if (!pgl) 225d2826056SHarsh Jain work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]); 226324429d7SHariprasad Shenai else 227d2826056SHarsh Jain work_handlers[rpl->opcode](dev, pgl->va); 228324429d7SHariprasad Shenai return 0; 229324429d7SHariprasad Shenai } 230324429d7SHariprasad Shenai 2316dad4e8aSAtul Gupta #ifdef CONFIG_CHELSIO_IPSEC_INLINE 2326dad4e8aSAtul Gupta int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) 2336dad4e8aSAtul Gupta { 2346dad4e8aSAtul Gupta return chcr_ipsec_xmit(skb, dev); 2356dad4e8aSAtul Gupta } 2366dad4e8aSAtul Gupta #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 2376dad4e8aSAtul Gupta 238fef4912bSHarsh Jain static void chcr_detach_device(struct uld_ctx *u_ctx) 239fef4912bSHarsh Jain { 240fef4912bSHarsh Jain struct chcr_dev *dev = &u_ctx->dev; 241fef4912bSHarsh Jain 242fef4912bSHarsh Jain if (dev->state == CHCR_DETACH) { 243fef4912bSHarsh Jain pr_debug("Detached Event received for already detach device\n"); 244fef4912bSHarsh Jain return; 245fef4912bSHarsh Jain } 246fef4912bSHarsh Jain dev->state = CHCR_DETACH; 247fef4912bSHarsh Jain if (atomic_read(&dev->inflight) != 0) { 248fef4912bSHarsh Jain schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 249fef4912bSHarsh Jain wait_for_completion(&dev->detach_comp); 250fef4912bSHarsh Jain } 251fef4912bSHarsh Jain 252fef4912bSHarsh Jain // Move u_ctx to inactive_dev list 253fef4912bSHarsh Jain chcr_dev_move(u_ctx); 254fef4912bSHarsh Jain } 255fef4912bSHarsh Jain 256324429d7SHariprasad Shenai static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 257324429d7SHariprasad Shenai { 258324429d7SHariprasad Shenai struct uld_ctx *u_ctx = handle; 259324429d7SHariprasad Shenai int ret = 0; 260324429d7SHariprasad Shenai 261324429d7SHariprasad Shenai switch (state) { 262324429d7SHariprasad Shenai case CXGB4_STATE_UP: 263fef4912bSHarsh Jain if (u_ctx->dev.state != CHCR_INIT) { 264fef4912bSHarsh Jain // ALready Initialised. 265fef4912bSHarsh Jain return 0; 266324429d7SHariprasad Shenai } 267fef4912bSHarsh Jain chcr_dev_add(u_ctx); 268324429d7SHariprasad Shenai ret = start_crypto(); 269324429d7SHariprasad Shenai break; 270324429d7SHariprasad Shenai 271324429d7SHariprasad Shenai case CXGB4_STATE_DETACH: 272fef4912bSHarsh Jain chcr_detach_device(u_ctx); 273324429d7SHariprasad Shenai break; 274324429d7SHariprasad Shenai 275324429d7SHariprasad Shenai case CXGB4_STATE_START_RECOVERY: 276324429d7SHariprasad Shenai case CXGB4_STATE_DOWN: 277324429d7SHariprasad Shenai default: 278324429d7SHariprasad Shenai break; 279324429d7SHariprasad Shenai } 280324429d7SHariprasad Shenai return ret; 281324429d7SHariprasad Shenai } 282324429d7SHariprasad Shenai 283324429d7SHariprasad Shenai static int __init chcr_crypto_init(void) 284324429d7SHariprasad Shenai { 285fef4912bSHarsh Jain INIT_LIST_HEAD(&drv_data.act_dev); 286fef4912bSHarsh Jain INIT_LIST_HEAD(&drv_data.inact_dev); 287fef4912bSHarsh Jain atomic_set(&drv_data.dev_count, 0); 288fef4912bSHarsh Jain mutex_init(&drv_data.drv_mutex); 289fef4912bSHarsh Jain drv_data.last_dev = NULL; 29040b06553SGanesh Goudar cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); 291fef4912bSHarsh Jain 292324429d7SHariprasad Shenai return 0; 293324429d7SHariprasad Shenai } 294324429d7SHariprasad Shenai 295324429d7SHariprasad Shenai static void __exit chcr_crypto_exit(void) 296324429d7SHariprasad Shenai { 297324429d7SHariprasad Shenai struct uld_ctx *u_ctx, *tmp; 298c0271a05SAyush Sawal struct adapter *adap; 299324429d7SHariprasad Shenai 300324429d7SHariprasad Shenai stop_crypto(); 301fef4912bSHarsh Jain cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 302324429d7SHariprasad Shenai /* Remove all devices from list */ 303fef4912bSHarsh Jain mutex_lock(&drv_data.drv_mutex); 304fef4912bSHarsh Jain list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { 305c0271a05SAyush Sawal adap = padap(&u_ctx->dev); 306c0271a05SAyush Sawal memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 307fef4912bSHarsh Jain list_del(&u_ctx->entry); 308324429d7SHariprasad Shenai kfree(u_ctx); 309324429d7SHariprasad Shenai } 310fef4912bSHarsh Jain list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { 311c0271a05SAyush Sawal adap = padap(&u_ctx->dev); 312c0271a05SAyush Sawal memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 313fef4912bSHarsh Jain list_del(&u_ctx->entry); 314fef4912bSHarsh Jain kfree(u_ctx); 315fef4912bSHarsh Jain } 316fef4912bSHarsh Jain mutex_unlock(&drv_data.drv_mutex); 317324429d7SHariprasad Shenai } 318324429d7SHariprasad Shenai 319324429d7SHariprasad Shenai module_init(chcr_crypto_init); 320324429d7SHariprasad Shenai module_exit(chcr_crypto_exit); 321324429d7SHariprasad Shenai 322324429d7SHariprasad Shenai MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 323324429d7SHariprasad Shenai MODULE_LICENSE("GPL"); 324324429d7SHariprasad Shenai MODULE_AUTHOR("Chelsio Communications"); 325324429d7SHariprasad Shenai MODULE_VERSION(DRV_VERSION); 326