1 /* 2 * This file is part of the Chelsio T6 Crypto driver for Linux. 3 * 4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 */ 35 36 #ifndef __CHCR_CORE_H__ 37 #define __CHCR_CORE_H__ 38 39 #include <crypto/algapi.h> 40 #include <net/tls.h> 41 #include "t4_hw.h" 42 #include "cxgb4.h" 43 #include "t4_msg.h" 44 #include "cxgb4_uld.h" 45 46 #define DRV_MODULE_NAME "chcr" 47 #define DRV_VERSION "1.0.0.0-ko" 48 #define DRV_DESC "Chelsio T6 Crypto Co-processor Driver" 49 50 #define MAX_PENDING_REQ_TO_HW 20 51 #define CHCR_TEST_RESPONSE_TIMEOUT 1000 52 #define WQ_DETACH_TM (msecs_to_jiffies(50)) 53 #define PAD_ERROR_BIT 1 54 #define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1) 55 56 #define MAC_ERROR_BIT 0 57 #define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) 58 #define MAX_SALT 4 59 #define CIP_WR_MIN_LEN (sizeof(struct chcr_wr) + \ 60 sizeof(struct cpl_rx_phys_dsgl) + \ 61 sizeof(struct ulptx_sgl) + 16) //IV 62 63 #define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \ 64 DUMMY_BYTES + \ 65 sizeof(struct ulptx_sgl)) 66 struct uld_ctx; 67 68 struct _key_ctx { 69 __be32 ctx_hdr; 70 u8 salt[MAX_SALT]; 71 __be64 iv_to_auth; 72 unsigned char key[]; 73 }; 74 75 #define KEYCTX_TX_WR_IV_S 55 76 #define KEYCTX_TX_WR_IV_M 0x1ffULL 77 #define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S) 78 #define KEYCTX_TX_WR_IV_G(x) \ 79 (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M) 80 81 #define KEYCTX_TX_WR_AAD_S 47 82 #define KEYCTX_TX_WR_AAD_M 0xffULL 83 #define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S) 84 #define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \ 85 KEYCTX_TX_WR_AAD_M) 86 87 #define KEYCTX_TX_WR_AADST_S 39 88 #define KEYCTX_TX_WR_AADST_M 0xffULL 89 #define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S) 90 #define KEYCTX_TX_WR_AADST_G(x) \ 91 (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M) 92 93 #define KEYCTX_TX_WR_CIPHER_S 30 94 #define KEYCTX_TX_WR_CIPHER_M 0x1ffULL 95 #define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S) 96 #define KEYCTX_TX_WR_CIPHER_G(x) \ 97 (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M) 98 99 #define KEYCTX_TX_WR_CIPHERST_S 23 100 #define KEYCTX_TX_WR_CIPHERST_M 0x7f 101 #define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S) 102 #define KEYCTX_TX_WR_CIPHERST_G(x) \ 103 (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M) 104 105 #define KEYCTX_TX_WR_AUTH_S 14 106 #define KEYCTX_TX_WR_AUTH_M 0x1ff 107 #define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S) 108 #define KEYCTX_TX_WR_AUTH_G(x) \ 109 (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M) 110 111 #define KEYCTX_TX_WR_AUTHST_S 7 112 #define KEYCTX_TX_WR_AUTHST_M 0x7f 113 #define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S) 114 #define KEYCTX_TX_WR_AUTHST_G(x) \ 115 (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M) 116 117 #define KEYCTX_TX_WR_AUTHIN_S 0 118 #define KEYCTX_TX_WR_AUTHIN_M 0x7f 119 #define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S) 120 #define KEYCTX_TX_WR_AUTHIN_G(x) \ 121 (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M) 122 123 #define WQ_RETRY 5 124 struct chcr_driver_data { 125 struct list_head act_dev; 126 struct list_head inact_dev; 127 atomic_t dev_count; 128 struct mutex drv_mutex; 129 struct uld_ctx *last_dev; 130 }; 131 132 enum chcr_state { 133 CHCR_INIT = 0, 134 CHCR_ATTACH, 135 CHCR_DETACH, 136 }; 137 struct chcr_wr { 138 struct fw_crypto_lookaside_wr wreq; 139 struct ulp_txpkt ulptx; 140 struct ulptx_idata sc_imm; 141 struct cpl_tx_sec_pdu sec_cpl; 142 struct _key_ctx key_ctx; 143 }; 144 145 struct chcr_dev { 146 spinlock_t lock_chcr_dev; 147 enum chcr_state state; 148 atomic_t inflight; 149 int wqretry; 150 struct delayed_work detach_work; 151 struct completion detach_comp; 152 }; 153 154 struct uld_ctx { 155 struct list_head entry; 156 struct cxgb4_lld_info lldi; 157 struct chcr_dev dev; 158 }; 159 160 struct sge_opaque_hdr { 161 void *dev; 162 dma_addr_t addr[MAX_SKB_FRAGS + 1]; 163 }; 164 165 struct chcr_ipsec_req { 166 struct ulp_txpkt ulptx; 167 struct ulptx_idata sc_imm; 168 struct cpl_tx_sec_pdu sec_cpl; 169 struct _key_ctx key_ctx; 170 }; 171 172 struct chcr_ipsec_wr { 173 struct fw_ulptx_wr wreq; 174 struct chcr_ipsec_req req; 175 }; 176 177 #define ESN_IV_INSERT_OFFSET 12 178 struct chcr_ipsec_aadiv { 179 __be32 spi; 180 u8 seq_no[8]; 181 u8 iv[8]; 182 }; 183 184 struct ipsec_sa_entry { 185 int hmac_ctrl; 186 u16 esn; 187 u16 resv; 188 unsigned int enckey_len; 189 unsigned int kctx_len; 190 unsigned int authsize; 191 __be32 key_ctx_hdr; 192 char salt[MAX_SALT]; 193 char key[2 * AES_MAX_KEY_SIZE]; 194 }; 195 196 /* 197 * sgl_len - calculates the size of an SGL of the given capacity 198 * @n: the number of SGL entries 199 * Calculates the number of flits needed for a scatter/gather list that 200 * can hold the given number of entries. 201 */ 202 static inline unsigned int sgl_len(unsigned int n) 203 { 204 n--; 205 return (3 * n) / 2 + (n & 1) + 2; 206 } 207 208 static inline void *padap(struct chcr_dev *dev) 209 { 210 struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev); 211 212 return pci_get_drvdata(u_ctx->lldi.pdev); 213 } 214 215 struct uld_ctx *assign_chcr_device(void); 216 int chcr_send_wr(struct sk_buff *skb); 217 int start_crypto(void); 218 int stop_crypto(void); 219 int chcr_uld_rx_handler(void *handle, const __be64 *rsp, 220 const struct pkt_gl *pgl); 221 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev); 222 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, 223 int err); 224 int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); 225 void chcr_add_xfrmops(const struct cxgb4_lld_info *lld); 226 #ifdef CONFIG_CHELSIO_TLS_DEVICE 227 int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input); 228 int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input); 229 int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev); 230 extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, 231 enum tls_offload_ctx_dir direction, 232 struct tls_crypto_info *crypto_info, 233 u32 start_offload_tcp_sn); 234 extern void chcr_ktls_dev_del(struct net_device *netdev, 235 struct tls_context *tls_ctx, 236 enum tls_offload_ctx_dir direction); 237 #endif 238 #endif /* __CHCR_CORE_H__ */ 239