1 /* 2 * This file is part of the Chelsio T6 Crypto driver for Linux. 3 * 4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written and Maintained by: 35 * Atul Gupta (atul.gupta@chelsio.com) 36 */ 37 38 #define pr_fmt(fmt) "ch_ipsec: " fmt 39 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/crypto.h> 43 #include <linux/skbuff.h> 44 #include <linux/rtnetlink.h> 45 #include <linux/highmem.h> 46 #include <linux/if_vlan.h> 47 #include <linux/ip.h> 48 #include <linux/netdevice.h> 49 #include <net/esp.h> 50 #include <net/xfrm.h> 51 #include <crypto/aes.h> 52 #include <crypto/algapi.h> 53 #include <crypto/hash.h> 54 #include <crypto/sha1.h> 55 #include <crypto/sha2.h> 56 #include <crypto/authenc.h> 57 #include <crypto/internal/aead.h> 58 #include <crypto/null.h> 59 #include <crypto/internal/skcipher.h> 60 #include <crypto/aead.h> 61 #include <crypto/scatterwalk.h> 62 #include <crypto/internal/hash.h> 63 64 #include "chcr_ipsec.h" 65 66 /* 67 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 68 * into a WR. 69 */ 70 #define MAX_IMM_TX_PKT_LEN 256 71 #define GCM_ESP_IV_SIZE 8 72 73 static LIST_HEAD(uld_ctx_list); 74 static DEFINE_MUTEX(dev_mutex); 75 76 static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 77 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); 78 static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); 79 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); 80 static void ch_ipsec_advance_esn_state(struct xfrm_state *x); 81 static void ch_ipsec_xfrm_free_state(struct xfrm_state *x); 82 static void ch_ipsec_xfrm_del_state(struct xfrm_state *x); 83 static int ch_ipsec_xfrm_add_state(struct xfrm_state *x, 84 struct netlink_ext_ack *extack); 85 86 static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = { 87 .xdo_dev_state_add = ch_ipsec_xfrm_add_state, 88 .xdo_dev_state_delete = ch_ipsec_xfrm_del_state, 89 .xdo_dev_state_free = ch_ipsec_xfrm_free_state, 90 .xdo_dev_offload_ok = ch_ipsec_offload_ok, 91 .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state, 92 }; 93 94 static struct cxgb4_uld_info ch_ipsec_uld_info = { 95 .name = CHIPSEC_DRV_MODULE_NAME, 96 .add = ch_ipsec_uld_add, 97 .state_change = ch_ipsec_uld_state_change, 98 .tx_handler = ch_ipsec_xmit, 99 .xfrmdev_ops = &ch_ipsec_xfrmdev_ops, 100 }; 101 102 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop) 103 { 104 struct ipsec_uld_ctx *u_ctx; 105 106 pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC, 107 CHIPSEC_DRV_VERSION); 108 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 109 if (!u_ctx) { 110 u_ctx = ERR_PTR(-ENOMEM); 111 goto out; 112 } 113 u_ctx->lldi = *infop; 114 out: 115 return u_ctx; 116 } 117 118 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state) 119 { 120 struct ipsec_uld_ctx *u_ctx = handle; 121 122 pr_debug("new_state %u\n", new_state); 123 switch (new_state) { 124 case CXGB4_STATE_UP: 125 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev)); 126 mutex_lock(&dev_mutex); 127 list_add_tail(&u_ctx->entry, &uld_ctx_list); 128 mutex_unlock(&dev_mutex); 129 break; 130 case CXGB4_STATE_START_RECOVERY: 131 case CXGB4_STATE_DOWN: 132 case CXGB4_STATE_DETACH: 133 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev)); 134 list_del(&u_ctx->entry); 135 break; 136 default: 137 break; 138 } 139 140 return 0; 141 } 142 143 static int ch_ipsec_setauthsize(struct xfrm_state *x, 144 struct ipsec_sa_entry *sa_entry) 145 { 146 int hmac_ctrl; 147 int authsize = x->aead->alg_icv_len / 8; 148 149 sa_entry->authsize = authsize; 150 151 switch (authsize) { 152 case ICV_8: 153 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 154 break; 155 case ICV_12: 156 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 157 break; 158 case ICV_16: 159 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 160 break; 161 default: 162 return -EINVAL; 163 } 164 return hmac_ctrl; 165 } 166 167 static int ch_ipsec_setkey(struct xfrm_state *x, 168 struct ipsec_sa_entry *sa_entry) 169 { 170 int keylen = (x->aead->alg_key_len + 7) / 8; 171 unsigned char *key = x->aead->alg_key; 172 int ck_size, key_ctx_size = 0; 173 unsigned char ghash_h[AEAD_H_SIZE]; 174 struct crypto_aes_ctx aes; 175 int ret = 0; 176 177 if (keylen > 3) { 178 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 179 memcpy(sa_entry->salt, key + keylen, 4); 180 } 181 182 if (keylen == AES_KEYSIZE_128) { 183 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 184 } else if (keylen == AES_KEYSIZE_192) { 185 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 186 } else if (keylen == AES_KEYSIZE_256) { 187 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 188 } else { 189 pr_err("GCM: Invalid key length %d\n", keylen); 190 ret = -EINVAL; 191 goto out; 192 } 193 194 memcpy(sa_entry->key, key, keylen); 195 sa_entry->enckey_len = keylen; 196 key_ctx_size = sizeof(struct _key_ctx) + 197 ((DIV_ROUND_UP(keylen, 16)) << 4) + 198 AEAD_H_SIZE; 199 200 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 201 CHCR_KEYCTX_MAC_KEY_SIZE_128, 202 0, 0, 203 key_ctx_size >> 4); 204 205 /* Calculate the H = CIPH(K, 0 repeated 16 times). 206 * It will go in key context 207 */ 208 ret = aes_expandkey(&aes, key, keylen); 209 if (ret) { 210 sa_entry->enckey_len = 0; 211 goto out; 212 } 213 memset(ghash_h, 0, AEAD_H_SIZE); 214 aes_encrypt(&aes, ghash_h, ghash_h); 215 memzero_explicit(&aes, sizeof(aes)); 216 217 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 218 16), ghash_h, AEAD_H_SIZE); 219 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + 220 AEAD_H_SIZE; 221 out: 222 return ret; 223 } 224 225 /* 226 * ch_ipsec_xfrm_add_state 227 * returns 0 on success, negative error if failed to send message to FPGA 228 * positive error if FPGA returned a bad response 229 */ 230 static int ch_ipsec_xfrm_add_state(struct xfrm_state *x, 231 struct netlink_ext_ack *extack) 232 { 233 struct ipsec_sa_entry *sa_entry; 234 int res = 0; 235 236 if (x->props.aalgo != SADB_AALG_NONE) { 237 NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states"); 238 return -EINVAL; 239 } 240 if (x->props.calgo != SADB_X_CALG_NONE) { 241 NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states"); 242 return -EINVAL; 243 } 244 if (x->props.family != AF_INET && 245 x->props.family != AF_INET6) { 246 NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm state offloaded"); 247 return -EINVAL; 248 } 249 if (x->props.mode != XFRM_MODE_TRANSPORT && 250 x->props.mode != XFRM_MODE_TUNNEL) { 251 NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm offload"); 252 return -EINVAL; 253 } 254 if (x->id.proto != IPPROTO_ESP) { 255 NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state offloaded"); 256 return -EINVAL; 257 } 258 if (x->encap) { 259 NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state not offloaded"); 260 return -EINVAL; 261 } 262 if (!x->aead) { 263 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead"); 264 return -EINVAL; 265 } 266 if (x->aead->alg_icv_len != 128 && 267 x->aead->alg_icv_len != 96) { 268 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 96b & 128b"); 269 return -EINVAL; 270 } 271 if ((x->aead->alg_key_len != 128 + 32) && 272 (x->aead->alg_key_len != 256 + 32)) { 273 NL_SET_ERR_MSG_MOD(extack, "cannot offload xfrm states with AEAD key length other than 128/256 bit"); 274 return -EINVAL; 275 } 276 if (x->tfcpad) { 277 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding"); 278 return -EINVAL; 279 } 280 if (!x->geniv) { 281 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv"); 282 return -EINVAL; 283 } 284 if (strcmp(x->geniv, "seqiv")) { 285 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv"); 286 return -EINVAL; 287 } 288 if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { 289 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload"); 290 return -EINVAL; 291 } 292 293 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); 294 if (!sa_entry) { 295 res = -ENOMEM; 296 goto out; 297 } 298 299 sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry); 300 if (x->props.flags & XFRM_STATE_ESN) 301 sa_entry->esn = 1; 302 ch_ipsec_setkey(x, sa_entry); 303 x->xso.offload_handle = (unsigned long)sa_entry; 304 try_module_get(THIS_MODULE); 305 out: 306 return res; 307 } 308 309 static void ch_ipsec_xfrm_del_state(struct xfrm_state *x) 310 { 311 /* do nothing */ 312 if (!x->xso.offload_handle) 313 return; 314 } 315 316 static void ch_ipsec_xfrm_free_state(struct xfrm_state *x) 317 { 318 struct ipsec_sa_entry *sa_entry; 319 320 if (!x->xso.offload_handle) 321 return; 322 323 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 324 kfree(sa_entry); 325 module_put(THIS_MODULE); 326 } 327 328 static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 329 { 330 if (x->props.family == AF_INET) { 331 /* Offload with IP options is not supported yet */ 332 if (ip_hdr(skb)->ihl > 5) 333 return false; 334 } else { 335 /* Offload with IPv6 extension headers is not support yet */ 336 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 337 return false; 338 } 339 return true; 340 } 341 342 static void ch_ipsec_advance_esn_state(struct xfrm_state *x) 343 { 344 /* do nothing */ 345 if (!x->xso.offload_handle) 346 return; 347 } 348 349 static int is_eth_imm(const struct sk_buff *skb, 350 struct ipsec_sa_entry *sa_entry) 351 { 352 unsigned int kctx_len; 353 int hdrlen; 354 355 kctx_len = sa_entry->kctx_len; 356 hdrlen = sizeof(struct fw_ulptx_wr) + 357 sizeof(struct chcr_ipsec_req) + kctx_len; 358 359 hdrlen += sizeof(struct cpl_tx_pkt); 360 if (sa_entry->esn) 361 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) 362 << 4); 363 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 364 return hdrlen; 365 return 0; 366 } 367 368 static unsigned int calc_tx_sec_flits(const struct sk_buff *skb, 369 struct ipsec_sa_entry *sa_entry, 370 bool *immediate) 371 { 372 unsigned int kctx_len; 373 unsigned int flits; 374 int aadivlen; 375 int hdrlen; 376 377 kctx_len = sa_entry->kctx_len; 378 hdrlen = is_eth_imm(skb, sa_entry); 379 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 380 16) : 0; 381 aadivlen <<= 4; 382 383 /* If the skb is small enough, we can pump it out as a work request 384 * with only immediate data. In that case we just have to have the 385 * TX Packet header plus the skb data in the Work Request. 386 */ 387 388 if (hdrlen) { 389 *immediate = true; 390 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 391 } 392 393 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 394 395 /* Otherwise, we're going to have to construct a Scatter gather list 396 * of the skb body and fragments. We also include the flits necessary 397 * for the TX Packet Work Request and CPL. We always have a firmware 398 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 399 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 400 * message or, if we're doing a Large Send Offload, an LSO CPL message 401 * with an embedded TX Packet Write CPL message. 402 */ 403 flits += (sizeof(struct fw_ulptx_wr) + 404 sizeof(struct chcr_ipsec_req) + 405 kctx_len + 406 sizeof(struct cpl_tx_pkt_core) + 407 aadivlen) / sizeof(__be64); 408 return flits; 409 } 410 411 static void *copy_esn_pktxt(struct sk_buff *skb, 412 struct net_device *dev, 413 void *pos, 414 struct ipsec_sa_entry *sa_entry) 415 { 416 struct chcr_ipsec_aadiv *aadiv; 417 struct ulptx_idata *sc_imm; 418 struct ip_esp_hdr *esphdr; 419 struct xfrm_offload *xo; 420 struct sge_eth_txq *q; 421 struct adapter *adap; 422 struct port_info *pi; 423 __be64 seqno; 424 u32 qidx; 425 u32 seqlo; 426 u8 *iv; 427 int eoq; 428 int len; 429 430 pi = netdev_priv(dev); 431 adap = pi->adapter; 432 qidx = skb->queue_mapping; 433 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 434 435 /* end of queue, reset pos to start of queue */ 436 eoq = (void *)q->q.stat - pos; 437 if (!eoq) 438 pos = q->q.desc; 439 440 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4; 441 memset(pos, 0, len); 442 aadiv = (struct chcr_ipsec_aadiv *)pos; 443 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb); 444 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); 445 xo = xfrm_offload(skb); 446 447 aadiv->spi = (esphdr->spi); 448 seqlo = ntohl(esphdr->seq_no); 449 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32)); 450 memcpy(aadiv->seq_no, &seqno, 8); 451 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); 452 memcpy(aadiv->iv, iv, 8); 453 454 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) { 455 sc_imm = (struct ulptx_idata *)(pos + 456 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 457 sizeof(__be64)) << 3)); 458 sc_imm->cmd_more = FILL_CMD_MORE(0); 459 sc_imm->len = cpu_to_be32(skb->len); 460 } 461 pos += len; 462 return pos; 463 } 464 465 static void *copy_cpltx_pktxt(struct sk_buff *skb, 466 struct net_device *dev, 467 void *pos, 468 struct ipsec_sa_entry *sa_entry) 469 { 470 struct cpl_tx_pkt_core *cpl; 471 struct sge_eth_txq *q; 472 struct adapter *adap; 473 struct port_info *pi; 474 u32 ctrl0, qidx; 475 u64 cntrl = 0; 476 int left; 477 478 pi = netdev_priv(dev); 479 adap = pi->adapter; 480 qidx = skb->queue_mapping; 481 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 482 483 left = (void *)q->q.stat - pos; 484 if (!left) 485 pos = q->q.desc; 486 487 cpl = (struct cpl_tx_pkt_core *)pos; 488 489 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 490 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 491 TXPKT_PF_V(adap->pf); 492 if (skb_vlan_tag_present(skb)) { 493 q->vlan_ins++; 494 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 495 } 496 497 cpl->ctrl0 = htonl(ctrl0); 498 cpl->pack = htons(0); 499 cpl->len = htons(skb->len); 500 cpl->ctrl1 = cpu_to_be64(cntrl); 501 502 pos += sizeof(struct cpl_tx_pkt_core); 503 /* Copy ESN info for HW */ 504 if (sa_entry->esn) 505 pos = copy_esn_pktxt(skb, dev, pos, sa_entry); 506 return pos; 507 } 508 509 static void *copy_key_cpltx_pktxt(struct sk_buff *skb, 510 struct net_device *dev, 511 void *pos, 512 struct ipsec_sa_entry *sa_entry) 513 { 514 struct _key_ctx *key_ctx; 515 int left, eoq, key_len; 516 struct sge_eth_txq *q; 517 struct adapter *adap; 518 struct port_info *pi; 519 unsigned int qidx; 520 521 pi = netdev_priv(dev); 522 adap = pi->adapter; 523 qidx = skb->queue_mapping; 524 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 525 key_len = sa_entry->kctx_len; 526 527 /* end of queue, reset pos to start of queue */ 528 eoq = (void *)q->q.stat - pos; 529 left = eoq; 530 if (!eoq) { 531 pos = q->q.desc; 532 left = 64 * q->q.size; 533 } 534 535 /* Copy the Key context header */ 536 key_ctx = (struct _key_ctx *)pos; 537 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr; 538 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT); 539 pos += sizeof(struct _key_ctx); 540 left -= sizeof(struct _key_ctx); 541 542 if (likely(key_len <= left)) { 543 memcpy(key_ctx->key, sa_entry->key, key_len); 544 pos += key_len; 545 } else { 546 memcpy(pos, sa_entry->key, left); 547 memcpy(q->q.desc, sa_entry->key + left, 548 key_len - left); 549 pos = (u8 *)q->q.desc + (key_len - left); 550 } 551 /* Copy CPL TX PKT XT */ 552 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry); 553 554 return pos; 555 } 556 557 static void *ch_ipsec_crypto_wreq(struct sk_buff *skb, 558 struct net_device *dev, 559 void *pos, 560 int credits, 561 struct ipsec_sa_entry *sa_entry) 562 { 563 struct port_info *pi = netdev_priv(dev); 564 struct adapter *adap = pi->adapter; 565 unsigned int ivsize = GCM_ESP_IV_SIZE; 566 struct chcr_ipsec_wr *wr; 567 bool immediate = false; 568 u16 immdatalen = 0; 569 unsigned int flits; 570 u32 ivinoffset; 571 u32 aadstart; 572 u32 aadstop; 573 u32 ciphstart; 574 u16 sc_more = 0; 575 u32 ivdrop = 0; 576 u32 esnlen = 0; 577 u32 wr_mid; 578 u16 ndesc; 579 int qidx = skb_get_queue_mapping(skb); 580 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; 581 unsigned int kctx_len = sa_entry->kctx_len; 582 int qid = q->q.cntxt_id; 583 584 atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt); 585 586 flits = calc_tx_sec_flits(skb, sa_entry, &immediate); 587 ndesc = DIV_ROUND_UP(flits, 2); 588 if (sa_entry->esn) 589 ivdrop = 1; 590 591 if (immediate) 592 immdatalen = skb->len; 593 594 if (sa_entry->esn) { 595 esnlen = sizeof(struct chcr_ipsec_aadiv); 596 if (!skb_is_nonlinear(skb)) 597 sc_more = 1; 598 } 599 600 /* WR Header */ 601 wr = (struct chcr_ipsec_wr *)pos; 602 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); 603 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc); 604 605 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 606 netif_tx_stop_queue(q->txq); 607 q->q.stops++; 608 if (!q->dbqt) 609 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 610 } 611 wr_mid |= FW_ULPTX_WR_DATA_F; 612 wr->wreq.flowid_len16 = htonl(wr_mid); 613 614 /* ULPTX */ 615 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); 616 wr->req.ulptx.len = htonl(ndesc - 1); 617 618 /* Sub-command */ 619 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more); 620 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 621 sizeof(wr->req.key_ctx) + 622 kctx_len + 623 sizeof(struct cpl_tx_pkt_core) + 624 esnlen + 625 (esnlen ? 0 : immdatalen)); 626 627 /* CPL_SEC_PDU */ 628 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) : 629 (skb_transport_offset(skb) + 630 sizeof(struct ip_esp_hdr) + 1); 631 wr->req.sec_cpl.op_ivinsrtofst = htonl( 632 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | 633 CPL_TX_SEC_PDU_CPLLEN_V(2) | 634 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | 635 CPL_TX_SEC_PDU_IVINSRTOFST_V( 636 ivinoffset)); 637 638 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen); 639 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1); 640 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET : 641 (skb_transport_offset(skb) + 642 sizeof(struct ip_esp_hdr)); 643 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) + 644 GCM_ESP_IV_SIZE + 1; 645 ciphstart += sa_entry->esn ? esnlen : 0; 646 647 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 648 aadstart, 649 aadstop, 650 ciphstart, 0); 651 652 wr->req.sec_cpl.cipherstop_lo_authinsert = 653 FILL_SEC_CPL_AUTHINSERT(0, ciphstart, 654 sa_entry->authsize, 655 sa_entry->authsize); 656 wr->req.sec_cpl.seqno_numivs = 657 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1, 658 CHCR_SCMD_CIPHER_MODE_AES_GCM, 659 CHCR_SCMD_AUTH_MODE_GHASH, 660 sa_entry->hmac_ctrl, 661 ivsize >> 1); 662 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 663 0, ivdrop, 0); 664 665 pos += sizeof(struct fw_ulptx_wr) + 666 sizeof(struct ulp_txpkt) + 667 sizeof(struct ulptx_idata) + 668 sizeof(struct cpl_tx_sec_pdu); 669 670 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry); 671 672 return pos; 673 } 674 675 /** 676 * flits_to_desc - returns the num of Tx descriptors for the given flits 677 * @n: the number of flits 678 * 679 * Returns the number of Tx descriptors needed for the supplied number 680 * of flits. 681 */ 682 static unsigned int flits_to_desc(unsigned int n) 683 { 684 WARN_ON(n > SGE_MAX_WR_LEN / 8); 685 return DIV_ROUND_UP(n, 8); 686 } 687 688 static unsigned int txq_avail(const struct sge_txq *q) 689 { 690 return q->size - 1 - q->in_use; 691 } 692 693 static void eth_txq_stop(struct sge_eth_txq *q) 694 { 695 netif_tx_stop_queue(q->txq); 696 q->q.stops++; 697 } 698 699 static void txq_advance(struct sge_txq *q, unsigned int n) 700 { 701 q->in_use += n; 702 q->pidx += n; 703 if (q->pidx >= q->size) 704 q->pidx -= q->size; 705 } 706 707 /* 708 * ch_ipsec_xmit called from ULD Tx handler 709 */ 710 int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) 711 { 712 struct xfrm_state *x = xfrm_input_state(skb); 713 unsigned int last_desc, ndesc, flits = 0; 714 struct ipsec_sa_entry *sa_entry; 715 u64 *pos, *end, *before, *sgl; 716 struct tx_sw_desc *sgl_sdesc; 717 int qidx, left, credits; 718 bool immediate = false; 719 struct sge_eth_txq *q; 720 struct adapter *adap; 721 struct port_info *pi; 722 struct sec_path *sp; 723 724 if (!x->xso.offload_handle) 725 return NETDEV_TX_BUSY; 726 727 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 728 729 sp = skb_sec_path(skb); 730 if (sp->len != 1) { 731 out_free: dev_kfree_skb_any(skb); 732 return NETDEV_TX_OK; 733 } 734 735 pi = netdev_priv(dev); 736 adap = pi->adapter; 737 qidx = skb->queue_mapping; 738 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 739 740 cxgb4_reclaim_completed_tx(adap, &q->q, true); 741 742 flits = calc_tx_sec_flits(skb, sa_entry, &immediate); 743 ndesc = flits_to_desc(flits); 744 credits = txq_avail(&q->q) - ndesc; 745 746 if (unlikely(credits < 0)) { 747 eth_txq_stop(q); 748 dev_err(adap->pdev_dev, 749 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n", 750 dev->name, qidx, credits, ndesc, txq_avail(&q->q), 751 flits); 752 return NETDEV_TX_BUSY; 753 } 754 755 last_desc = q->q.pidx + ndesc - 1; 756 if (last_desc >= q->q.size) 757 last_desc -= q->q.size; 758 sgl_sdesc = &q->q.sdesc[last_desc]; 759 760 if (!immediate && 761 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { 762 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 763 q->mapping_err++; 764 goto out_free; 765 } 766 767 pos = (u64 *)&q->q.desc[q->q.pidx]; 768 before = (u64 *)pos; 769 end = (u64 *)pos + flits; 770 /* Setup IPSec CPL */ 771 pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos, 772 credits, sa_entry); 773 if (before > (u64 *)pos) { 774 left = (u8 *)end - (u8 *)q->q.stat; 775 end = (void *)q->q.desc + left; 776 } 777 if (pos == (u64 *)q->q.stat) { 778 left = (u8 *)end - (u8 *)q->q.stat; 779 end = (void *)q->q.desc + left; 780 pos = (void *)q->q.desc; 781 } 782 783 sgl = (void *)pos; 784 if (immediate) { 785 cxgb4_inline_tx_skb(skb, &q->q, sgl); 786 dev_consume_skb_any(skb); 787 } else { 788 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 789 0, sgl_sdesc->addr); 790 skb_orphan(skb); 791 sgl_sdesc->skb = skb; 792 } 793 txq_advance(&q->q, ndesc); 794 795 cxgb4_ring_tx_db(adap, &q->q, ndesc); 796 return NETDEV_TX_OK; 797 } 798 799 static int __init ch_ipsec_init(void) 800 { 801 cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info); 802 803 return 0; 804 } 805 806 static void __exit ch_ipsec_exit(void) 807 { 808 struct ipsec_uld_ctx *u_ctx, *tmp; 809 struct adapter *adap; 810 811 mutex_lock(&dev_mutex); 812 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { 813 adap = pci_get_drvdata(u_ctx->lldi.pdev); 814 atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0); 815 list_del(&u_ctx->entry); 816 kfree(u_ctx); 817 } 818 mutex_unlock(&dev_mutex); 819 cxgb4_unregister_uld(CXGB4_ULD_IPSEC); 820 } 821 822 module_init(ch_ipsec_init); 823 module_exit(ch_ipsec_exit); 824 825 MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards."); 826 MODULE_LICENSE("GPL"); 827 MODULE_AUTHOR("Chelsio Communications"); 828 MODULE_VERSION(CHIPSEC_DRV_VERSION); 829 830