1 /** 2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 3 * 4 * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Written and Maintained by: 11 * Manoj Malviya (manojmalviya@chelsio.com) 12 * Atul Gupta (atul.gupta@chelsio.com) 13 * Jitendra Lulla (jlulla@chelsio.com) 14 * Yeshaswi M R Gowda (yeshaswi@chelsio.com) 15 * Harsh Jain (harsh@chelsio.com) 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/module.h> 20 #include <linux/skbuff.h> 21 22 #include <crypto/aes.h> 23 #include <crypto/hash.h> 24 25 #include "t4_msg.h" 26 #include "chcr_core.h" 27 #include "cxgb4_uld.h" 28 29 static LIST_HEAD(uld_ctx_list); 30 static DEFINE_MUTEX(dev_mutex); 31 static atomic_t dev_count; 32 33 typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); 34 static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); 35 static void *chcr_uld_add(const struct cxgb4_lld_info *lld); 36 static int chcr_uld_state_change(void *handle, enum cxgb4_state state); 37 38 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { 39 [CPL_FW6_PLD] = cpl_fw6_pld_handler, 40 }; 41 42 static struct cxgb4_uld_info chcr_uld_info = { 43 .name = DRV_MODULE_NAME, 44 .nrxq = MAX_ULD_QSETS, 45 .rxq_size = 1024, 46 .add = chcr_uld_add, 47 .state_change = chcr_uld_state_change, 48 .rx_handler = chcr_uld_rx_handler, 49 }; 50 51 int assign_chcr_device(struct chcr_dev **dev) 52 { 53 struct uld_ctx *u_ctx; 54 55 /* 56 * Which device to use if multiple devices are available TODO 57 * May be select the device based on round robin. One session 58 * must go to the same device to maintain the ordering. 59 */ 60 mutex_lock(&dev_mutex); /* TODO ? */ 61 u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); 62 if (!u_ctx) { 63 mutex_unlock(&dev_mutex); 64 return -ENXIO; 65 } 66 67 *dev = u_ctx->dev; 68 mutex_unlock(&dev_mutex); 69 return 0; 70 } 71 72 static int chcr_dev_add(struct uld_ctx *u_ctx) 73 { 74 struct chcr_dev *dev; 75 76 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 77 if (!dev) 78 return -ENXIO; 79 80 spin_lock_init(&dev->lock_chcr_dev); 81 u_ctx->dev = dev; 82 dev->u_ctx = u_ctx; 83 atomic_inc(&dev_count); 84 return 0; 85 } 86 87 static int chcr_dev_remove(struct uld_ctx *u_ctx) 88 { 89 kfree(u_ctx->dev); 90 u_ctx->dev = NULL; 91 atomic_dec(&dev_count); 92 return 0; 93 } 94 95 static int cpl_fw6_pld_handler(struct chcr_dev *dev, 96 unsigned char *input) 97 { 98 struct crypto_async_request *req; 99 struct cpl_fw6_pld *fw6_pld; 100 u32 ack_err_status = 0; 101 int error_status = 0; 102 103 fw6_pld = (struct cpl_fw6_pld *)input; 104 req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( 105 fw6_pld->data[1]); 106 107 ack_err_status = 108 ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); 109 if (ack_err_status) { 110 if (CHK_MAC_ERR_BIT(ack_err_status) || 111 CHK_PAD_ERR_BIT(ack_err_status)) 112 error_status = -EINVAL; 113 } 114 /* call completion callback with failure status */ 115 if (req) { 116 if (!chcr_handle_resp(req, input, error_status)) 117 req->complete(req, error_status); 118 else 119 return -EINVAL; 120 } else { 121 pr_err("Incorrect request address from the firmware\n"); 122 return -EFAULT; 123 } 124 return 0; 125 } 126 127 int chcr_send_wr(struct sk_buff *skb) 128 { 129 return cxgb4_ofld_send(skb->dev, skb); 130 } 131 132 static void *chcr_uld_add(const struct cxgb4_lld_info *lld) 133 { 134 struct uld_ctx *u_ctx; 135 136 /* Create the device and add it in the device list */ 137 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 138 if (!u_ctx) { 139 u_ctx = ERR_PTR(-ENOMEM); 140 goto out; 141 } 142 u_ctx->lldi = *lld; 143 mutex_lock(&dev_mutex); 144 list_add_tail(&u_ctx->entry, &uld_ctx_list); 145 mutex_unlock(&dev_mutex); 146 out: 147 return u_ctx; 148 } 149 150 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 151 const struct pkt_gl *pgl) 152 { 153 struct uld_ctx *u_ctx = (struct uld_ctx *)handle; 154 struct chcr_dev *dev = u_ctx->dev; 155 const struct cpl_act_establish *rpl = (struct cpl_act_establish 156 *)rsp; 157 158 if (rpl->ot.opcode != CPL_FW6_PLD) { 159 pr_err("Unsupported opcode\n"); 160 return 0; 161 } 162 163 if (!pgl) 164 work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]); 165 else 166 work_handlers[rpl->ot.opcode](dev, pgl->va); 167 return 0; 168 } 169 170 static int chcr_uld_state_change(void *handle, enum cxgb4_state state) 171 { 172 struct uld_ctx *u_ctx = handle; 173 int ret = 0; 174 175 switch (state) { 176 case CXGB4_STATE_UP: 177 if (!u_ctx->dev) { 178 ret = chcr_dev_add(u_ctx); 179 if (ret != 0) 180 return ret; 181 } 182 if (atomic_read(&dev_count) == 1) 183 ret = start_crypto(); 184 break; 185 186 case CXGB4_STATE_DETACH: 187 if (u_ctx->dev) { 188 mutex_lock(&dev_mutex); 189 chcr_dev_remove(u_ctx); 190 mutex_unlock(&dev_mutex); 191 } 192 if (!atomic_read(&dev_count)) 193 stop_crypto(); 194 break; 195 196 case CXGB4_STATE_START_RECOVERY: 197 case CXGB4_STATE_DOWN: 198 default: 199 break; 200 } 201 return ret; 202 } 203 204 static int __init chcr_crypto_init(void) 205 { 206 if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) { 207 pr_err("ULD register fail: No chcr crypto support in cxgb4"); 208 return -1; 209 } 210 211 return 0; 212 } 213 214 static void __exit chcr_crypto_exit(void) 215 { 216 struct uld_ctx *u_ctx, *tmp; 217 218 if (atomic_read(&dev_count)) 219 stop_crypto(); 220 221 /* Remove all devices from list */ 222 mutex_lock(&dev_mutex); 223 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { 224 if (u_ctx->dev) 225 chcr_dev_remove(u_ctx); 226 kfree(u_ctx); 227 } 228 mutex_unlock(&dev_mutex); 229 cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); 230 } 231 232 module_init(chcr_crypto_init); 233 module_exit(chcr_crypto_exit); 234 235 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); 236 MODULE_LICENSE("GPL"); 237 MODULE_AUTHOR("Chelsio Communications"); 238 MODULE_VERSION(DRV_VERSION); 239