1 /** 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written and Maintained by: 11 * Manoj Malviya (manojmalviya@chelsio.com) 12 * Atul Gupta (atul.gupta@chelsio.com) 13 * Jitendra Lulla (jlulla@chelsio.com) 14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15 * Harsh Jain (harsh@chelsio.com) 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/skbuff.h> 21 22 #include <crypto/aes.h> 23 #include <crypto/hash.h> 24 25 #include "t4_msg.h" 26 #include "chcr_core.h" 27 #include "cxgb4_uld.h" 28 29 static LIST_HEAD(uld_ctx_list); 30 static DEFINE_MUTEX(dev_mutex); 31 static atomic_t dev_count; 32 33 typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); 34 static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); 35 static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 36 static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 37 38 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 39 [CPL_FW6_PLD] = cpl_fw6_pld_handler, 40 }; 41 42 static struct cxgb4_uld_info chcr_uld_info = { 43 .name = DRV_MODULE_NAME, 44 .nrxq = MAX_ULD_QSETS, 45 .ntxq = MAX_ULD_QSETS, 46 .rxq_size = 1024, 47 .add = chcr_uld_add, 48 .state_change = chcr_uld_state_change, 49 .rx_handler = chcr_uld_rx_handler, 50 }; 51 52 int assign_chcr_device(struct chcr_dev **dev) 53 { 54 struct uld_ctx *u_ctx; 55 56 /* 57 * Which device to use if multiple devices are available TODO 58 * May be select the device based on round robin. One session 59 * must go to the same device to maintain the ordering. 60 */ 61 mutex_lock(&dev_mutex); /* TODO ? */ 62 u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); 63 if (!u_ctx) { 64 mutex_unlock(&dev_mutex); 65 return -ENXIO; 66 } 67 68 *dev = u_ctx->dev; 69 mutex_unlock(&dev_mutex); 70 return 0; 71 } 72 73 static int chcr_dev_add(struct uld_ctx *u_ctx) 74 { 75 struct chcr_dev *dev; 76 77 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 78 if (!dev) 79 return -ENXIO; 80 81 spin_lock_init(&dev->lock_chcr_dev); 82 u_ctx->dev = dev; 83 dev->u_ctx = u_ctx; 84 atomic_inc(&dev_count); 85 return 0; 86 } 87 88 static int chcr_dev_remove(struct uld_ctx *u_ctx) 89 { 90 kfree(u_ctx->dev); 91 u_ctx->dev = NULL; 92 atomic_dec(&dev_count); 93 return 0; 94 } 95 96 static int cpl_fw6_pld_handler(struct chcr_dev *dev, 97 unsigned char *input) 98 { 99 struct crypto_async_request *req; 100 struct cpl_fw6_pld *fw6_pld; 101 u32 ack_err_status = 0; 102 int error_status = 0; 103 104 fw6_pld = (struct cpl_fw6_pld *)input; 105 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 106 fw6_pld->data[1]); 107 108 ack_err_status = 109 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 110 if (ack_err_status) { 111 if (CHK_MAC_ERR_BIT(ack_err_status) || 112 CHK_PAD_ERR_BIT(ack_err_status)) 113 error_status = -EBADMSG; 114 } 115 /* call completion callback with failure status */ 116 if (req) { 117 error_status = chcr_handle_resp(req, input, error_status); 118 req->complete(req, error_status); 119 } else { 120 pr_err("Incorrect request address from the firmware\n"); 121 return -EFAULT; 122 } 123 return 0; 124 } 125 126 int chcr_send_wr(struct sk_buff *skb) 127 { 128 return cxgb4_crypto_send(skb->dev, skb); 129 } 130 131 static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 132 { 133 struct uld_ctx *u_ctx; 134 135 /* Create the device and add it in the device list */ 136 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 137 if (!u_ctx) { 138 u_ctx = ERR_PTR(-ENOMEM); 139 goto out; 140 } 141 u_ctx->lldi = *lld; 142 mutex_lock(&dev_mutex); 143 list_add_tail(&u_ctx->entry, &uld_ctx_list); 144 mutex_unlock(&dev_mutex); 145 out: 146 return u_ctx; 147 } 148 149 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 150 const struct pkt_gl *pgl) 151 { 152 struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 153 struct chcr_dev *dev = u_ctx->dev; 154 const struct cpl_act_establish *rpl = (struct cpl_act_establish 155 *)rsp; 156 157 if (rpl->ot.opcode != CPL_FW6_PLD) { 158 pr_err("Unsupported opcode\n"); 159 return 0; 160 } 161 162 if (!pgl) 163 work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]); 164 else 165 work_handlers[rpl->ot.opcode](dev, pgl->va); 166 return 0; 167 } 168 169 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 170 { 171 struct uld_ctx *u_ctx = handle; 172 int ret = 0; 173 174 switch (state) { 175 case CXGB4_STATE_UP: 176 if (!u_ctx->dev) { 177 ret = chcr_dev_add(u_ctx); 178 if (ret != 0) 179 return ret; 180 } 181 if (atomic_read(&dev_count) == 1) 182 ret = start_crypto(); 183 break; 184 185 case CXGB4_STATE_DETACH: 186 if (u_ctx->dev) { 187 mutex_lock(&dev_mutex); 188 chcr_dev_remove(u_ctx); 189 mutex_unlock(&dev_mutex); 190 } 191 if (!atomic_read(&dev_count)) 192 stop_crypto(); 193 break; 194 195 case CXGB4_STATE_START_RECOVERY: 196 case CXGB4_STATE_DOWN: 197 default: 198 break; 199 } 200 return ret; 201 } 202 203 static int __init chcr_crypto_init(void) 204 { 205 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { 206 pr_err("ULD register fail: No chcr crypto support in cxgb4"); 207 return -1; 208 } 209 210 return 0; 211 } 212 213 static void __exit chcr_crypto_exit(void) 214 { 215 struct uld_ctx *u_ctx, *tmp; 216 217 if (atomic_read(&dev_count)) 218 stop_crypto(); 219 220 /* Remove all devices from list */ 221 mutex_lock(&dev_mutex); 222 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { 223 if (u_ctx->dev) 224 chcr_dev_remove(u_ctx); 225 kfree(u_ctx); 226 } 227 mutex_unlock(&dev_mutex); 228 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 229 } 230 231 module_init(chcr_crypto_init); 232 module_exit(chcr_crypto_exit); 233 234 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 235 MODULE_LICENSE("GPL"); 236 MODULE_AUTHOR("Chelsio Communications"); 237 MODULE_VERSION(DRV_VERSION); 238