1 /** 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written and Maintained by: 11 * Manoj Malviya (manojmalviya@chelsio.com) 12 * Atul Gupta (atul.gupta@chelsio.com) 13 * Jitendra Lulla (jlulla@chelsio.com) 14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15 * Harsh Jain (harsh@chelsio.com) 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/skbuff.h> 21 22 #include <crypto/aes.h> 23 #include <crypto/hash.h> 24 25 #include "t4_msg.h" 26 #include "chcr_core.h" 27 #include "cxgb4_uld.h" 28 29 static struct chcr_driver_data drv_data; 30 31 typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); 32 static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); 33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 35 36 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 37 [CPL_FW6_PLD] = cpl_fw6_pld_handler, 38 }; 39 40 static struct cxgb4_uld_info chcr_uld_info = { 41 .name = DRV_MODULE_NAME, 42 .nrxq = MAX_ULD_QSETS, 43 /* Max ntxq will be derived from fw config file*/ 44 .rxq_size = 1024, 45 .add = chcr_uld_add, 46 .state_change = chcr_uld_state_change, 47 .rx_handler = chcr_uld_rx_handler, 48 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 49 .tx_handler = chcr_uld_tx_handler, 50 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 51 }; 52 53 static void detach_work_fn(struct work_struct *work) 54 { 55 struct chcr_dev *dev; 56 57 dev = container_of(work, struct chcr_dev, detach_work.work); 58 59 if (atomic_read(&dev->inflight)) { 60 dev->wqretry--; 61 if (dev->wqretry) { 62 pr_debug("Request Inflight Count %d\n", 63 atomic_read(&dev->inflight)); 64 65 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 66 } else { 67 WARN(1, "CHCR:%d request Still Pending\n", 68 atomic_read(&dev->inflight)); 69 complete(&dev->detach_comp); 70 } 71 } else { 72 complete(&dev->detach_comp); 73 } 74 } 75 76 struct uld_ctx *assign_chcr_device(void) 77 { 78 struct uld_ctx *u_ctx = NULL; 79 80 /* 81 * When multiple devices are present in system select 82 * device in round-robin fashion for crypto operations 83 * Although One session must use the same device to 84 * maintain request-response ordering. 85 */ 86 mutex_lock(&drv_data.drv_mutex); 87 if (!list_empty(&drv_data.act_dev)) { 88 u_ctx = drv_data.last_dev; 89 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 90 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 91 struct uld_ctx, entry); 92 else 93 drv_data.last_dev = 94 list_next_entry(drv_data.last_dev, entry); 95 } 96 mutex_unlock(&drv_data.drv_mutex); 97 return u_ctx; 98 } 99 100 static void chcr_dev_add(struct uld_ctx *u_ctx) 101 { 102 struct chcr_dev *dev; 103 104 dev = &u_ctx->dev; 105 dev->state = CHCR_ATTACH; 106 atomic_set(&dev->inflight, 0); 107 mutex_lock(&drv_data.drv_mutex); 108 list_move(&u_ctx->entry, &drv_data.act_dev); 109 if (!drv_data.last_dev) 110 drv_data.last_dev = u_ctx; 111 mutex_unlock(&drv_data.drv_mutex); 112 } 113 114 static void chcr_dev_init(struct uld_ctx *u_ctx) 115 { 116 struct chcr_dev *dev; 117 118 dev = &u_ctx->dev; 119 spin_lock_init(&dev->lock_chcr_dev); 120 INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); 121 init_completion(&dev->detach_comp); 122 dev->state = CHCR_INIT; 123 dev->wqretry = WQ_RETRY; 124 atomic_inc(&drv_data.dev_count); 125 atomic_set(&dev->inflight, 0); 126 mutex_lock(&drv_data.drv_mutex); 127 list_add_tail(&u_ctx->entry, &drv_data.inact_dev); 128 if (!drv_data.last_dev) 129 drv_data.last_dev = u_ctx; 130 mutex_unlock(&drv_data.drv_mutex); 131 } 132 133 static int chcr_dev_move(struct uld_ctx *u_ctx) 134 { 135 struct adapter *adap; 136 137 mutex_lock(&drv_data.drv_mutex); 138 if (drv_data.last_dev == u_ctx) { 139 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 140 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 141 struct uld_ctx, entry); 142 else 143 drv_data.last_dev = 144 list_next_entry(drv_data.last_dev, entry); 145 } 146 list_move(&u_ctx->entry, &drv_data.inact_dev); 147 if (list_empty(&drv_data.act_dev)) 148 drv_data.last_dev = NULL; 149 adap = padap(&u_ctx->dev); 150 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 151 atomic_dec(&drv_data.dev_count); 152 mutex_unlock(&drv_data.drv_mutex); 153 154 return 0; 155 } 156 157 static int cpl_fw6_pld_handler(struct chcr_dev *dev, 158 unsigned char *input) 159 { 160 struct crypto_async_request *req; 161 struct cpl_fw6_pld *fw6_pld; 162 u32 ack_err_status = 0; 163 int error_status = 0; 164 struct adapter *adap = padap(dev); 165 166 fw6_pld = (struct cpl_fw6_pld *)input; 167 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 168 fw6_pld->data[1]); 169 170 ack_err_status = 171 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 172 if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) 173 error_status = -EBADMSG; 174 /* call completion callback with failure status */ 175 if (req) { 176 error_status = chcr_handle_resp(req, input, error_status); 177 } else { 178 pr_err("Incorrect request address from the firmware\n"); 179 return -EFAULT; 180 } 181 if (error_status) 182 atomic_inc(&adap->chcr_stats.error); 183 184 return 0; 185 } 186 187 int chcr_send_wr(struct sk_buff *skb) 188 { 189 return cxgb4_crypto_send(skb->dev, skb); 190 } 191 192 static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 193 { 194 struct uld_ctx *u_ctx; 195 196 /* Create the device and add it in the device list */ 197 if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) 198 return ERR_PTR(-EOPNOTSUPP); 199 200 /* Create the device and add it in the device list */ 201 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 202 if (!u_ctx) { 203 u_ctx = ERR_PTR(-ENOMEM); 204 goto out; 205 } 206 u_ctx->lldi = *lld; 207 chcr_dev_init(u_ctx); 208 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 209 if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) 210 chcr_add_xfrmops(lld); 211 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 212 out: 213 return u_ctx; 214 } 215 216 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 217 const struct pkt_gl *pgl) 218 { 219 struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 220 struct chcr_dev *dev = &u_ctx->dev; 221 const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; 222 223 if (rpl->opcode != CPL_FW6_PLD) { 224 pr_err("Unsupported opcode\n"); 225 return 0; 226 } 227 228 if (!pgl) 229 work_handlers[rpl->opcode](dev, (unsigned char *)&rsp[1]); 230 else 231 work_handlers[rpl->opcode](dev, pgl->va); 232 return 0; 233 } 234 235 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 236 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) 237 { 238 return chcr_ipsec_xmit(skb, dev); 239 } 240 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 241 242 static void chcr_detach_device(struct uld_ctx *u_ctx) 243 { 244 struct chcr_dev *dev = &u_ctx->dev; 245 246 if (dev->state == CHCR_DETACH) { 247 pr_debug("Detached Event received for already detach device\n"); 248 return; 249 } 250 dev->state = CHCR_DETACH; 251 if (atomic_read(&dev->inflight) != 0) { 252 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 253 wait_for_completion(&dev->detach_comp); 254 } 255 256 // Move u_ctx to inactive_dev list 257 chcr_dev_move(u_ctx); 258 } 259 260 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 261 { 262 struct uld_ctx *u_ctx = handle; 263 int ret = 0; 264 265 switch (state) { 266 case CXGB4_STATE_UP: 267 if (u_ctx->dev.state != CHCR_INIT) { 268 // ALready Initialised. 269 return 0; 270 } 271 chcr_dev_add(u_ctx); 272 ret = start_crypto(); 273 break; 274 275 case CXGB4_STATE_DETACH: 276 chcr_detach_device(u_ctx); 277 break; 278 279 case CXGB4_STATE_START_RECOVERY: 280 case CXGB4_STATE_DOWN: 281 default: 282 break; 283 } 284 return ret; 285 } 286 287 static int __init chcr_crypto_init(void) 288 { 289 INIT_LIST_HEAD(&drv_data.act_dev); 290 INIT_LIST_HEAD(&drv_data.inact_dev); 291 atomic_set(&drv_data.dev_count, 0); 292 mutex_init(&drv_data.drv_mutex); 293 drv_data.last_dev = NULL; 294 cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); 295 296 return 0; 297 } 298 299 static void __exit chcr_crypto_exit(void) 300 { 301 struct uld_ctx *u_ctx, *tmp; 302 303 stop_crypto(); 304 305 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 306 /* Remove all devices from list */ 307 mutex_lock(&drv_data.drv_mutex); 308 list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { 309 list_del(&u_ctx->entry); 310 kfree(u_ctx); 311 } 312 list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { 313 list_del(&u_ctx->entry); 314 kfree(u_ctx); 315 } 316 mutex_unlock(&drv_data.drv_mutex); 317 } 318 319 module_init(chcr_crypto_init); 320 module_exit(chcr_crypto_exit); 321 322 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 323 MODULE_LICENSE("GPL"); 324 MODULE_AUTHOR("Chelsio Communications"); 325 MODULE_VERSION(DRV_VERSION); 326