1 /** 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written and Maintained by: 11 * Manoj Malviya (manojmalviya@chelsio.com) 12 * Atul Gupta (atul.gupta@chelsio.com) 13 * Jitendra Lulla (jlulla@chelsio.com) 14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15 * Harsh Jain (harsh@chelsio.com) 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/skbuff.h> 21 22 #include <crypto/aes.h> 23 #include <crypto/hash.h> 24 25 #include "t4_msg.h" 26 #include "chcr_core.h" 27 #include "cxgb4_uld.h" 28 29 static struct chcr_driver_data drv_data; 30 31 typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input); 32 static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); 33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 35 36 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 37 [CPL_FW6_PLD] = cpl_fw6_pld_handler, 38 #ifdef CONFIG_CHELSIO_TLS_DEVICE 39 [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl, 40 [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl, 41 #endif 42 }; 43 44 static struct cxgb4_uld_info chcr_uld_info = { 45 .name = DRV_MODULE_NAME, 46 .nrxq = MAX_ULD_QSETS, 47 /* Max ntxq will be derived from fw config file*/ 48 .rxq_size = 1024, 49 .add = chcr_uld_add, 50 .state_change = chcr_uld_state_change, 51 .rx_handler = chcr_uld_rx_handler, 52 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE) 53 .tx_handler = chcr_uld_tx_handler, 54 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */ 55 }; 56 57 static void detach_work_fn(struct work_struct *work) 58 { 59 struct chcr_dev *dev; 60 61 dev = container_of(work, struct chcr_dev, detach_work.work); 62 63 if (atomic_read(&dev->inflight)) { 64 dev->wqretry--; 65 if (dev->wqretry) { 66 pr_debug("Request Inflight Count %d\n", 67 atomic_read(&dev->inflight)); 68 69 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 70 } else { 71 WARN(1, "CHCR:%d request Still Pending\n", 72 atomic_read(&dev->inflight)); 73 complete(&dev->detach_comp); 74 } 75 } else { 76 complete(&dev->detach_comp); 77 } 78 } 79 80 struct uld_ctx *assign_chcr_device(void) 81 { 82 struct uld_ctx *u_ctx = NULL; 83 84 /* 85 * When multiple devices are present in system select 86 * device in round-robin fashion for crypto operations 87 * Although One session must use the same device to 88 * maintain request-response ordering. 89 */ 90 mutex_lock(&drv_data.drv_mutex); 91 if (!list_empty(&drv_data.act_dev)) { 92 u_ctx = drv_data.last_dev; 93 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 94 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 95 struct uld_ctx, entry); 96 else 97 drv_data.last_dev = 98 list_next_entry(drv_data.last_dev, entry); 99 } 100 mutex_unlock(&drv_data.drv_mutex); 101 return u_ctx; 102 } 103 104 static void chcr_dev_add(struct uld_ctx *u_ctx) 105 { 106 struct chcr_dev *dev; 107 108 dev = &u_ctx->dev; 109 dev->state = CHCR_ATTACH; 110 atomic_set(&dev->inflight, 0); 111 mutex_lock(&drv_data.drv_mutex); 112 list_move(&u_ctx->entry, &drv_data.act_dev); 113 if (!drv_data.last_dev) 114 drv_data.last_dev = u_ctx; 115 mutex_unlock(&drv_data.drv_mutex); 116 } 117 118 static void chcr_dev_init(struct uld_ctx *u_ctx) 119 { 120 struct chcr_dev *dev; 121 122 dev = &u_ctx->dev; 123 spin_lock_init(&dev->lock_chcr_dev); 124 INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); 125 init_completion(&dev->detach_comp); 126 dev->state = CHCR_INIT; 127 dev->wqretry = WQ_RETRY; 128 atomic_inc(&drv_data.dev_count); 129 atomic_set(&dev->inflight, 0); 130 mutex_lock(&drv_data.drv_mutex); 131 list_add_tail(&u_ctx->entry, &drv_data.inact_dev); 132 mutex_unlock(&drv_data.drv_mutex); 133 } 134 135 static int chcr_dev_move(struct uld_ctx *u_ctx) 136 { 137 mutex_lock(&drv_data.drv_mutex); 138 if (drv_data.last_dev == u_ctx) { 139 if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) 140 drv_data.last_dev = list_first_entry(&drv_data.act_dev, 141 struct uld_ctx, entry); 142 else 143 drv_data.last_dev = 144 list_next_entry(drv_data.last_dev, entry); 145 } 146 list_move(&u_ctx->entry, &drv_data.inact_dev); 147 if (list_empty(&drv_data.act_dev)) 148 drv_data.last_dev = NULL; 149 atomic_dec(&drv_data.dev_count); 150 mutex_unlock(&drv_data.drv_mutex); 151 152 return 0; 153 } 154 155 static int cpl_fw6_pld_handler(struct adapter *adap, 156 unsigned char *input) 157 { 158 struct crypto_async_request *req; 159 struct cpl_fw6_pld *fw6_pld; 160 u32 ack_err_status = 0; 161 int error_status = 0; 162 163 fw6_pld = (struct cpl_fw6_pld *)input; 164 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 165 fw6_pld->data[1]); 166 167 ack_err_status = 168 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 169 if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) 170 error_status = -EBADMSG; 171 /* call completion callback with failure status */ 172 if (req) { 173 error_status = chcr_handle_resp(req, input, error_status); 174 } else { 175 pr_err("Incorrect request address from the firmware\n"); 176 return -EFAULT; 177 } 178 if (error_status) 179 atomic_inc(&adap->chcr_stats.error); 180 181 return 0; 182 } 183 184 int chcr_send_wr(struct sk_buff *skb) 185 { 186 return cxgb4_crypto_send(skb->dev, skb); 187 } 188 189 static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 190 { 191 struct uld_ctx *u_ctx; 192 193 /* Create the device and add it in the device list */ 194 if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) 195 return ERR_PTR(-EOPNOTSUPP); 196 197 /* Create the device and add it in the device list */ 198 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 199 if (!u_ctx) { 200 u_ctx = ERR_PTR(-ENOMEM); 201 goto out; 202 } 203 u_ctx->lldi = *lld; 204 chcr_dev_init(u_ctx); 205 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 206 if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE) 207 chcr_add_xfrmops(lld); 208 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */ 209 210 #ifdef CONFIG_CHELSIO_TLS_DEVICE 211 if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 212 chcr_enable_ktls(padap(&u_ctx->dev)); 213 #endif 214 out: 215 return u_ctx; 216 } 217 218 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 219 const struct pkt_gl *pgl) 220 { 221 struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 222 struct chcr_dev *dev = &u_ctx->dev; 223 struct adapter *adap = padap(dev); 224 const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; 225 226 if (!work_handlers[rpl->opcode]) { 227 pr_err("Unsupported opcode %d received\n", rpl->opcode); 228 return 0; 229 } 230 231 if (!pgl) 232 work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]); 233 else 234 work_handlers[rpl->opcode](adap, pgl->va); 235 return 0; 236 } 237 238 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE) 239 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev) 240 { 241 /* In case if skb's decrypted bit is set, it's nic tls packet, else it's 242 * ipsec packet. 243 */ 244 #ifdef CONFIG_CHELSIO_TLS_DEVICE 245 if (skb->decrypted) 246 return chcr_ktls_xmit(skb, dev); 247 #endif 248 #ifdef CONFIG_CHELSIO_IPSEC_INLINE 249 return chcr_ipsec_xmit(skb, dev); 250 #endif 251 return 0; 252 } 253 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */ 254 255 static void chcr_detach_device(struct uld_ctx *u_ctx) 256 { 257 struct chcr_dev *dev = &u_ctx->dev; 258 259 if (dev->state == CHCR_DETACH) { 260 pr_debug("Detached Event received for already detach device\n"); 261 return; 262 } 263 dev->state = CHCR_DETACH; 264 if (atomic_read(&dev->inflight) != 0) { 265 schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); 266 wait_for_completion(&dev->detach_comp); 267 } 268 269 // Move u_ctx to inactive_dev list 270 chcr_dev_move(u_ctx); 271 } 272 273 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 274 { 275 struct uld_ctx *u_ctx = handle; 276 int ret = 0; 277 278 switch (state) { 279 case CXGB4_STATE_UP: 280 if (u_ctx->dev.state != CHCR_INIT) { 281 // ALready Initialised. 282 return 0; 283 } 284 chcr_dev_add(u_ctx); 285 ret = start_crypto(); 286 break; 287 288 case CXGB4_STATE_DETACH: 289 chcr_detach_device(u_ctx); 290 break; 291 292 case CXGB4_STATE_START_RECOVERY: 293 case CXGB4_STATE_DOWN: 294 default: 295 break; 296 } 297 return ret; 298 } 299 300 static int __init chcr_crypto_init(void) 301 { 302 INIT_LIST_HEAD(&drv_data.act_dev); 303 INIT_LIST_HEAD(&drv_data.inact_dev); 304 atomic_set(&drv_data.dev_count, 0); 305 mutex_init(&drv_data.drv_mutex); 306 drv_data.last_dev = NULL; 307 cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); 308 309 return 0; 310 } 311 312 static void __exit chcr_crypto_exit(void) 313 { 314 struct uld_ctx *u_ctx, *tmp; 315 struct adapter *adap; 316 317 stop_crypto(); 318 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 319 /* Remove all devices from list */ 320 mutex_lock(&drv_data.drv_mutex); 321 list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { 322 adap = padap(&u_ctx->dev); 323 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 324 #ifdef CONFIG_CHELSIO_TLS_DEVICE 325 if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 326 chcr_disable_ktls(adap); 327 #endif 328 list_del(&u_ctx->entry); 329 kfree(u_ctx); 330 } 331 list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { 332 adap = padap(&u_ctx->dev); 333 memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); 334 #ifdef CONFIG_CHELSIO_TLS_DEVICE 335 if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE) 336 chcr_disable_ktls(adap); 337 #endif 338 list_del(&u_ctx->entry); 339 kfree(u_ctx); 340 } 341 mutex_unlock(&drv_data.drv_mutex); 342 } 343 344 module_init(chcr_crypto_init); 345 module_exit(chcr_crypto_exit); 346 347 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 348 MODULE_LICENSE("GPL"); 349 MODULE_AUTHOR("Chelsio Communications"); 350 MODULE_VERSION(DRV_VERSION); 351