1 /** 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written and Maintained by: 11 * Manoj Malviya (manojmalviya@chelsio.com) 12 * Atul Gupta (atul.gupta@chelsio.com) 13 * Jitendra Lulla (jlulla@chelsio.com) 14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15 * Harsh Jain (harsh@chelsio.com) 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/skbuff.h> 21 22 #include <crypto/aes.h> 23 #include <crypto/hash.h> 24 25 #include "t4_msg.h" 26 #include "chcr_core.h" 27 #include "cxgb4_uld.h" 28 29 static struct chcr_driver_data drv_data; 30 31 typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input); 32 static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); 33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 35 36 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 37 [CPL_FW6_PLD] = cpl_fw6_pld_handler, 38 #ifdef CONFIG_CHELSIO_TLS_DEVICE 39 [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl, 40 [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl, 41 #endif 42 }; 43 44 static struct cxgb4_uld_info chcr_uld_info = { 45 .name = DRV_MODULE_NAME, 46 .nrxq = MAX_ULD_QSETS, 47 /* Max ntxq will be derived from fw config file*/ 48 .rxq_size = 1024, 49 .add = chcr_uld_add, 50 .state_change = chcr_uld_state_change, 51 .rx_handler = chcr_uld_rx_handler, 52 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 53 .tx_handler = chcr_uld_tx_handler, 54 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 55 }; 56 57 static void detach_work_fn(struct work_struct *work) 58 { 59 struct chcr_dev *dev; 60 61 dev = container_of(work, struct chcr_dev, detach_work.work); 62 63 if (atomic_read(&dev->inflight)) { 64 dev->wqretry--; 65 if (dev->wqretry) { 66 pr_debug("Request Inflight Count %d\n", 67 atomic_read(&dev->inflight)); 68 69 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 70 } else { 71 WARN(1, "CHCR:%d request Still Pending\n", 72 atomic_read(&dev->inflight)); 73 complete(&dev->detach_comp); 74 } 75 } else { 76 complete(&dev->detach_comp); 77 } 78 } 79 80 struct uld_ctx *assign_chcr_device(void) 81 { 82 struct uld_ctx *u_ctx = NULL; 83 84 /* 85 * When multiple devices are present in system select 86 * device in round-robin fashion for crypto operations 87 * Although One session must use the same device to 88 * maintain request-response ordering. 89 */ 90 mutex_lock(&drv_data.drv_mutex); 91 if (!list_empty(&drv_data.act_dev)) { 92 u_ctx = drv_data.last_dev; 93 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 94 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 95 struct uld_ctx, entry); 96 else 97 drv_data.last_dev = 98 list_next_entry(drv_data.last_dev, entry); 99 } 100 mutex_unlock(&drv_data.drv_mutex); 101 return u_ctx; 102 } 103 104 static void chcr_dev_add(struct uld_ctx *u_ctx) 105 { 106 struct chcr_dev *dev; 107 108 dev = &u_ctx->dev; 109 dev->state = CHCR_ATTACH; 110 atomic_set(&dev->inflight, 0); 111 mutex_lock(&drv_data.drv_mutex); 112 list_move(&u_ctx->entry, &drv_data.act_dev); 113 if (!drv_data.last_dev) 114 drv_data.last_dev = u_ctx; 115 mutex_unlock(&drv_data.drv_mutex); 116 } 117 118 static void chcr_dev_init(struct uld_ctx *u_ctx) 119 { 120 struct chcr_dev *dev; 121 122 dev = &u_ctx->dev; 123 spin_lock_init(&dev->lock_chcr_dev); 124 INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); 125 init_completion(&dev->detach_comp); 126 dev->state = CHCR_INIT; 127 dev->wqretry = WQ_RETRY; 128 atomic_inc(&drv_data.dev_count); 129 atomic_set(&dev->inflight, 0); 130 mutex_lock(&drv_data.drv_mutex); 131 list_add_tail(&u_ctx->entry, &drv_data.inact_dev); 132 if (!drv_data.last_dev) 133 drv_data.last_dev = u_ctx; 134 mutex_unlock(&drv_data.drv_mutex); 135 } 136 137 static int chcr_dev_move(struct uld_ctx *u_ctx) 138 { 139 mutex_lock(&drv_data.drv_mutex); 140 if (drv_data.last_dev == u_ctx) { 141 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 142 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 143 struct uld_ctx, entry); 144 else 145 drv_data.last_dev = 146 list_next_entry(drv_data.last_dev, entry); 147 } 148 list_move(&u_ctx->entry, &drv_data.inact_dev); 149 if (list_empty(&drv_data.act_dev)) 150 drv_data.last_dev = NULL; 151 atomic_dec(&drv_data.dev_count); 152 mutex_unlock(&drv_data.drv_mutex); 153 154 return 0; 155 } 156 157 static int cpl_fw6_pld_handler(struct adapter *adap, 158 unsigned char *input) 159 { 160 struct crypto_async_request *req; 161 struct cpl_fw6_pld *fw6_pld; 162 u32 ack_err_status = 0; 163 int error_status = 0; 164 165 fw6_pld = (struct cpl_fw6_pld *)input; 166 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 167 fw6_pld->data[1]); 168 169 ack_err_status = 170 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 171 if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) 172 error_status = -EBADMSG; 173 /* call completion callback with failure status */ 174 if (req) { 175 error_status = chcr_handle_resp(req, input, error_status); 176 } else { 177 pr_err("Incorrect request address from the firmware\n"); 178 return -EFAULT; 179 } 180 if (error_status) 181 atomic_inc(&adap->chcr_stats.error); 182 183 return 0; 184 } 185 186 int chcr_send_wr(struct sk_buff *skb) 187 { 188 return cxgb4_crypto_send(skb->dev, skb); 189 } 190 191 static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 192 { 193 struct uld_ctx *u_ctx; 194 195 /* Create the device and add it in the device list */ 196 if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) 197 return ERR_PTR(-EOPNOTSUPP); 198 199 /* Create the device and add it in the device list */ 200 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 201 if (!u_ctx) { 202 u_ctx = ERR_PTR(-ENOMEM); 203 goto out; 204 } 205 u_ctx->lldi = *lld; 206 chcr_dev_init(u_ctx); 207 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 208 if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) 209 chcr_add_xfrmops(lld); 210 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 211 212 #ifdef CONFIG_CHELSIO_TLS_DEVICE 213 if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 214 chcr_enable_ktls(padap(&u_ctx->dev)); 215 #endif 216 out: 217 return u_ctx; 218 } 219 220 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 221 const struct pkt_gl *pgl) 222 { 223 struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 224 struct chcr_dev *dev = &u_ctx->dev; 225 struct adapter *adap = padap(dev); 226 const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; 227 228 if (!work_handlers[rpl->opcode]) { 229 pr_err("Unsupported opcode %d received\n", rpl->opcode); 230 return 0; 231 } 232 233 if (!pgl) 234 work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]); 235 else 236 work_handlers[rpl->opcode](adap, pgl->va); 237 return 0; 238 } 239 240 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 241 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) 242 { 243 return chcr_ipsec_xmit(skb, dev); 244 } 245 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 246 247 static void chcr_detach_device(struct uld_ctx *u_ctx) 248 { 249 struct chcr_dev *dev = &u_ctx->dev; 250 251 if (dev->state == CHCR_DETACH) { 252 pr_debug("Detached Event received for already detach device\n"); 253 return; 254 } 255 dev->state = CHCR_DETACH; 256 if (atomic_read(&dev->inflight) != 0) { 257 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 258 wait_for_completion(&dev->detach_comp); 259 } 260 261 // Move u_ctx to inactive_dev list 262 chcr_dev_move(u_ctx); 263 } 264 265 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 266 { 267 struct uld_ctx *u_ctx = handle; 268 int ret = 0; 269 270 switch (state) { 271 case CXGB4_STATE_UP: 272 if (u_ctx->dev.state != CHCR_INIT) { 273 // ALready Initialised. 274 return 0; 275 } 276 chcr_dev_add(u_ctx); 277 ret = start_crypto(); 278 break; 279 280 case CXGB4_STATE_DETACH: 281 chcr_detach_device(u_ctx); 282 break; 283 284 case CXGB4_STATE_START_RECOVERY: 285 case CXGB4_STATE_DOWN: 286 default: 287 break; 288 } 289 return ret; 290 } 291 292 static int __init chcr_crypto_init(void) 293 { 294 INIT_LIST_HEAD(&drv_data.act_dev); 295 INIT_LIST_HEAD(&drv_data.inact_dev); 296 atomic_set(&drv_data.dev_count, 0); 297 mutex_init(&drv_data.drv_mutex); 298 drv_data.last_dev = NULL; 299 cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); 300 301 return 0; 302 } 303 304 static void __exit chcr_crypto_exit(void) 305 { 306 struct uld_ctx *u_ctx, *tmp; 307 struct adapter *adap; 308 309 stop_crypto(); 310 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 311 /* Remove all devices from list */ 312 mutex_lock(&drv_data.drv_mutex); 313 list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { 314 adap = padap(&u_ctx->dev); 315 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 316 #ifdef CONFIG_CHELSIO_TLS_DEVICE 317 if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 318 chcr_disable_ktls(adap); 319 #endif 320 list_del(&u_ctx->entry); 321 kfree(u_ctx); 322 } 323 list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { 324 adap = padap(&u_ctx->dev); 325 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 326 #ifdef CONFIG_CHELSIO_TLS_DEVICE 327 if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 328 chcr_disable_ktls(adap); 329 #endif 330 list_del(&u_ctx->entry); 331 kfree(u_ctx); 332 } 333 mutex_unlock(&drv_data.drv_mutex); 334 } 335 336 module_init(chcr_crypto_init); 337 module_exit(chcr_crypto_exit); 338 339 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 340 MODULE_LICENSE("GPL"); 341 MODULE_AUTHOR("Chelsio Communications"); 342 MODULE_VERSION(DRV_VERSION); 343