1 /* 2 * This file is part of the Chelsio T6 Crypto driver for Linux. 3 * 4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * Written and Maintained by: 35 * Atul Gupta (atul.gupta@chelsio.com) 36 */ 37 38 #define pr_fmt(fmt) "ch_ipsec: " fmt 39 40 #include <linux/kernel.h> 41 #include <linux/module.h> 42 #include <linux/crypto.h> 43 #include <linux/skbuff.h> 44 #include <linux/rtnetlink.h> 45 #include <linux/highmem.h> 46 #include <linux/if_vlan.h> 47 #include <linux/ip.h> 48 #include <linux/netdevice.h> 49 #include <net/esp.h> 50 #include <net/xfrm.h> 51 #include <crypto/aes.h> 52 #include <crypto/algapi.h> 53 #include <crypto/hash.h> 54 #include <crypto/sha1.h> 55 #include <crypto/sha2.h> 56 #include <crypto/authenc.h> 57 #include <crypto/internal/aead.h> 58 #include <crypto/null.h> 59 #include <crypto/internal/skcipher.h> 60 #include <crypto/aead.h> 61 #include <crypto/scatterwalk.h> 62 #include <crypto/internal/hash.h> 63 64 #include "chcr_ipsec.h" 65 66 /* 67 * Max Tx descriptor space we allow for an Ethernet packet to be inlined 68 * into a WR. 69 */ 70 #define MAX_IMM_TX_PKT_LEN 256 71 #define GCM_ESP_IV_SIZE 8 72 73 static LIST_HEAD(uld_ctx_list); 74 static DEFINE_MUTEX(dev_mutex); 75 76 static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 77 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); 78 static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); 79 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); 80 static void ch_ipsec_advance_esn_state(struct xfrm_state *x); 81 static void ch_ipsec_xfrm_free_state(struct xfrm_state *x); 82 static void ch_ipsec_xfrm_del_state(struct xfrm_state *x); 83 static int ch_ipsec_xfrm_add_state(struct xfrm_state *x); 84 85 static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = { 86 .xdo_dev_state_add = ch_ipsec_xfrm_add_state, 87 .xdo_dev_state_delete = ch_ipsec_xfrm_del_state, 88 .xdo_dev_state_free = ch_ipsec_xfrm_free_state, 89 .xdo_dev_offload_ok = ch_ipsec_offload_ok, 90 .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state, 91 }; 92 93 static struct cxgb4_uld_info ch_ipsec_uld_info = { 94 .name = CHIPSEC_DRV_MODULE_NAME, 95 .add = ch_ipsec_uld_add, 96 .state_change = ch_ipsec_uld_state_change, 97 .tx_handler = ch_ipsec_xmit, 98 .xfrmdev_ops = &ch_ipsec_xfrmdev_ops, 99 }; 100 101 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop) 102 { 103 struct ipsec_uld_ctx *u_ctx; 104 105 pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC, 106 CHIPSEC_DRV_VERSION); 107 u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); 108 if (!u_ctx) { 109 u_ctx = ERR_PTR(-ENOMEM); 110 goto out; 111 } 112 u_ctx->lldi = *infop; 113 out: 114 return u_ctx; 115 } 116 117 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state) 118 { 119 struct ipsec_uld_ctx *u_ctx = handle; 120 121 pr_debug("new_state %u\n", new_state); 122 switch (new_state) { 123 case CXGB4_STATE_UP: 124 pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev)); 125 mutex_lock(&dev_mutex); 126 list_add_tail(&u_ctx->entry, &uld_ctx_list); 127 mutex_unlock(&dev_mutex); 128 break; 129 case CXGB4_STATE_START_RECOVERY: 130 case CXGB4_STATE_DOWN: 131 case CXGB4_STATE_DETACH: 132 pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev)); 133 list_del(&u_ctx->entry); 134 break; 135 default: 136 break; 137 } 138 139 return 0; 140 } 141 142 static int ch_ipsec_setauthsize(struct xfrm_state *x, 143 struct ipsec_sa_entry *sa_entry) 144 { 145 int hmac_ctrl; 146 int authsize = x->aead->alg_icv_len / 8; 147 148 sa_entry->authsize = authsize; 149 150 switch (authsize) { 151 case ICV_8: 152 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; 153 break; 154 case ICV_12: 155 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; 156 break; 157 case ICV_16: 158 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; 159 break; 160 default: 161 return -EINVAL; 162 } 163 return hmac_ctrl; 164 } 165 166 static int ch_ipsec_setkey(struct xfrm_state *x, 167 struct ipsec_sa_entry *sa_entry) 168 { 169 int keylen = (x->aead->alg_key_len + 7) / 8; 170 unsigned char *key = x->aead->alg_key; 171 int ck_size, key_ctx_size = 0; 172 unsigned char ghash_h[AEAD_H_SIZE]; 173 struct crypto_aes_ctx aes; 174 int ret = 0; 175 176 if (keylen > 3) { 177 keylen -= 4; /* nonce/salt is present in the last 4 bytes */ 178 memcpy(sa_entry->salt, key + keylen, 4); 179 } 180 181 if (keylen == AES_KEYSIZE_128) { 182 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; 183 } else if (keylen == AES_KEYSIZE_192) { 184 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; 185 } else if (keylen == AES_KEYSIZE_256) { 186 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; 187 } else { 188 pr_err("GCM: Invalid key length %d\n", keylen); 189 ret = -EINVAL; 190 goto out; 191 } 192 193 memcpy(sa_entry->key, key, keylen); 194 sa_entry->enckey_len = keylen; 195 key_ctx_size = sizeof(struct _key_ctx) + 196 ((DIV_ROUND_UP(keylen, 16)) << 4) + 197 AEAD_H_SIZE; 198 199 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, 200 CHCR_KEYCTX_MAC_KEY_SIZE_128, 201 0, 0, 202 key_ctx_size >> 4); 203 204 /* Calculate the H = CIPH(K, 0 repeated 16 times). 205 * It will go in key context 206 */ 207 ret = aes_expandkey(&aes, key, keylen); 208 if (ret) { 209 sa_entry->enckey_len = 0; 210 goto out; 211 } 212 memset(ghash_h, 0, AEAD_H_SIZE); 213 aes_encrypt(&aes, ghash_h, ghash_h); 214 memzero_explicit(&aes, sizeof(aes)); 215 216 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) * 217 16), ghash_h, AEAD_H_SIZE); 218 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) + 219 AEAD_H_SIZE; 220 out: 221 return ret; 222 } 223 224 /* 225 * ch_ipsec_xfrm_add_state 226 * returns 0 on success, negative error if failed to send message to FPGA 227 * positive error if FPGA returned a bad response 228 */ 229 static int ch_ipsec_xfrm_add_state(struct xfrm_state *x) 230 { 231 struct ipsec_sa_entry *sa_entry; 232 int res = 0; 233 234 if (x->props.aalgo != SADB_AALG_NONE) { 235 pr_debug("Cannot offload authenticated xfrm states\n"); 236 return -EINVAL; 237 } 238 if (x->props.calgo != SADB_X_CALG_NONE) { 239 pr_debug("Cannot offload compressed xfrm states\n"); 240 return -EINVAL; 241 } 242 if (x->props.family != AF_INET && 243 x->props.family != AF_INET6) { 244 pr_debug("Only IPv4/6 xfrm state offloaded\n"); 245 return -EINVAL; 246 } 247 if (x->props.mode != XFRM_MODE_TRANSPORT && 248 x->props.mode != XFRM_MODE_TUNNEL) { 249 pr_debug("Only transport and tunnel xfrm offload\n"); 250 return -EINVAL; 251 } 252 if (x->id.proto != IPPROTO_ESP) { 253 pr_debug("Only ESP xfrm state offloaded\n"); 254 return -EINVAL; 255 } 256 if (x->encap) { 257 pr_debug("Encapsulated xfrm state not offloaded\n"); 258 return -EINVAL; 259 } 260 if (!x->aead) { 261 pr_debug("Cannot offload xfrm states without aead\n"); 262 return -EINVAL; 263 } 264 if (x->aead->alg_icv_len != 128 && 265 x->aead->alg_icv_len != 96) { 266 pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n"); 267 return -EINVAL; 268 } 269 if ((x->aead->alg_key_len != 128 + 32) && 270 (x->aead->alg_key_len != 256 + 32)) { 271 pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n"); 272 return -EINVAL; 273 } 274 if (x->tfcpad) { 275 pr_debug("Cannot offload xfrm states with tfc padding\n"); 276 return -EINVAL; 277 } 278 if (!x->geniv) { 279 pr_debug("Cannot offload xfrm states without geniv\n"); 280 return -EINVAL; 281 } 282 if (strcmp(x->geniv, "seqiv")) { 283 pr_debug("Cannot offload xfrm states with geniv other than seqiv\n"); 284 return -EINVAL; 285 } 286 if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) { 287 pr_debug("Unsupported xfrm offload\n"); 288 return -EINVAL; 289 } 290 291 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); 292 if (!sa_entry) { 293 res = -ENOMEM; 294 goto out; 295 } 296 297 sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry); 298 if (x->props.flags & XFRM_STATE_ESN) 299 sa_entry->esn = 1; 300 ch_ipsec_setkey(x, sa_entry); 301 x->xso.offload_handle = (unsigned long)sa_entry; 302 try_module_get(THIS_MODULE); 303 out: 304 return res; 305 } 306 307 static void ch_ipsec_xfrm_del_state(struct xfrm_state *x) 308 { 309 /* do nothing */ 310 if (!x->xso.offload_handle) 311 return; 312 } 313 314 static void ch_ipsec_xfrm_free_state(struct xfrm_state *x) 315 { 316 struct ipsec_sa_entry *sa_entry; 317 318 if (!x->xso.offload_handle) 319 return; 320 321 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 322 kfree(sa_entry); 323 module_put(THIS_MODULE); 324 } 325 326 static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 327 { 328 if (x->props.family == AF_INET) { 329 /* Offload with IP options is not supported yet */ 330 if (ip_hdr(skb)->ihl > 5) 331 return false; 332 } else { 333 /* Offload with IPv6 extension headers is not support yet */ 334 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 335 return false; 336 } 337 return true; 338 } 339 340 static void ch_ipsec_advance_esn_state(struct xfrm_state *x) 341 { 342 /* do nothing */ 343 if (!x->xso.offload_handle) 344 return; 345 } 346 347 static int is_eth_imm(const struct sk_buff *skb, 348 struct ipsec_sa_entry *sa_entry) 349 { 350 unsigned int kctx_len; 351 int hdrlen; 352 353 kctx_len = sa_entry->kctx_len; 354 hdrlen = sizeof(struct fw_ulptx_wr) + 355 sizeof(struct chcr_ipsec_req) + kctx_len; 356 357 hdrlen += sizeof(struct cpl_tx_pkt); 358 if (sa_entry->esn) 359 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) 360 << 4); 361 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) 362 return hdrlen; 363 return 0; 364 } 365 366 static unsigned int calc_tx_sec_flits(const struct sk_buff *skb, 367 struct ipsec_sa_entry *sa_entry, 368 bool *immediate) 369 { 370 unsigned int kctx_len; 371 unsigned int flits; 372 int aadivlen; 373 int hdrlen; 374 375 kctx_len = sa_entry->kctx_len; 376 hdrlen = is_eth_imm(skb, sa_entry); 377 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 378 16) : 0; 379 aadivlen <<= 4; 380 381 /* If the skb is small enough, we can pump it out as a work request 382 * with only immediate data. In that case we just have to have the 383 * TX Packet header plus the skb data in the Work Request. 384 */ 385 386 if (hdrlen) { 387 *immediate = true; 388 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); 389 } 390 391 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); 392 393 /* Otherwise, we're going to have to construct a Scatter gather list 394 * of the skb body and fragments. We also include the flits necessary 395 * for the TX Packet Work Request and CPL. We always have a firmware 396 * Write Header (incorporated as part of the cpl_tx_pkt_lso and 397 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL 398 * message or, if we're doing a Large Send Offload, an LSO CPL message 399 * with an embedded TX Packet Write CPL message. 400 */ 401 flits += (sizeof(struct fw_ulptx_wr) + 402 sizeof(struct chcr_ipsec_req) + 403 kctx_len + 404 sizeof(struct cpl_tx_pkt_core) + 405 aadivlen) / sizeof(__be64); 406 return flits; 407 } 408 409 static void *copy_esn_pktxt(struct sk_buff *skb, 410 struct net_device *dev, 411 void *pos, 412 struct ipsec_sa_entry *sa_entry) 413 { 414 struct chcr_ipsec_aadiv *aadiv; 415 struct ulptx_idata *sc_imm; 416 struct ip_esp_hdr *esphdr; 417 struct xfrm_offload *xo; 418 struct sge_eth_txq *q; 419 struct adapter *adap; 420 struct port_info *pi; 421 __be64 seqno; 422 u32 qidx; 423 u32 seqlo; 424 u8 *iv; 425 int eoq; 426 int len; 427 428 pi = netdev_priv(dev); 429 adap = pi->adapter; 430 qidx = skb->queue_mapping; 431 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 432 433 /* end of queue, reset pos to start of queue */ 434 eoq = (void *)q->q.stat - pos; 435 if (!eoq) 436 pos = q->q.desc; 437 438 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4; 439 memset(pos, 0, len); 440 aadiv = (struct chcr_ipsec_aadiv *)pos; 441 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb); 442 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); 443 xo = xfrm_offload(skb); 444 445 aadiv->spi = (esphdr->spi); 446 seqlo = ntohl(esphdr->seq_no); 447 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32)); 448 memcpy(aadiv->seq_no, &seqno, 8); 449 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr); 450 memcpy(aadiv->iv, iv, 8); 451 452 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) { 453 sc_imm = (struct ulptx_idata *)(pos + 454 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 455 sizeof(__be64)) << 3)); 456 sc_imm->cmd_more = FILL_CMD_MORE(0); 457 sc_imm->len = cpu_to_be32(skb->len); 458 } 459 pos += len; 460 return pos; 461 } 462 463 static void *copy_cpltx_pktxt(struct sk_buff *skb, 464 struct net_device *dev, 465 void *pos, 466 struct ipsec_sa_entry *sa_entry) 467 { 468 struct cpl_tx_pkt_core *cpl; 469 struct sge_eth_txq *q; 470 struct adapter *adap; 471 struct port_info *pi; 472 u32 ctrl0, qidx; 473 u64 cntrl = 0; 474 int left; 475 476 pi = netdev_priv(dev); 477 adap = pi->adapter; 478 qidx = skb->queue_mapping; 479 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 480 481 left = (void *)q->q.stat - pos; 482 if (!left) 483 pos = q->q.desc; 484 485 cpl = (struct cpl_tx_pkt_core *)pos; 486 487 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F; 488 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | 489 TXPKT_PF_V(adap->pf); 490 if (skb_vlan_tag_present(skb)) { 491 q->vlan_ins++; 492 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb)); 493 } 494 495 cpl->ctrl0 = htonl(ctrl0); 496 cpl->pack = htons(0); 497 cpl->len = htons(skb->len); 498 cpl->ctrl1 = cpu_to_be64(cntrl); 499 500 pos += sizeof(struct cpl_tx_pkt_core); 501 /* Copy ESN info for HW */ 502 if (sa_entry->esn) 503 pos = copy_esn_pktxt(skb, dev, pos, sa_entry); 504 return pos; 505 } 506 507 static void *copy_key_cpltx_pktxt(struct sk_buff *skb, 508 struct net_device *dev, 509 void *pos, 510 struct ipsec_sa_entry *sa_entry) 511 { 512 struct _key_ctx *key_ctx; 513 int left, eoq, key_len; 514 struct sge_eth_txq *q; 515 struct adapter *adap; 516 struct port_info *pi; 517 unsigned int qidx; 518 519 pi = netdev_priv(dev); 520 adap = pi->adapter; 521 qidx = skb->queue_mapping; 522 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 523 key_len = sa_entry->kctx_len; 524 525 /* end of queue, reset pos to start of queue */ 526 eoq = (void *)q->q.stat - pos; 527 left = eoq; 528 if (!eoq) { 529 pos = q->q.desc; 530 left = 64 * q->q.size; 531 } 532 533 /* Copy the Key context header */ 534 key_ctx = (struct _key_ctx *)pos; 535 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr; 536 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT); 537 pos += sizeof(struct _key_ctx); 538 left -= sizeof(struct _key_ctx); 539 540 if (likely(key_len <= left)) { 541 memcpy(key_ctx->key, sa_entry->key, key_len); 542 pos += key_len; 543 } else { 544 memcpy(pos, sa_entry->key, left); 545 memcpy(q->q.desc, sa_entry->key + left, 546 key_len - left); 547 pos = (u8 *)q->q.desc + (key_len - left); 548 } 549 /* Copy CPL TX PKT XT */ 550 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry); 551 552 return pos; 553 } 554 555 static void *ch_ipsec_crypto_wreq(struct sk_buff *skb, 556 struct net_device *dev, 557 void *pos, 558 int credits, 559 struct ipsec_sa_entry *sa_entry) 560 { 561 struct port_info *pi = netdev_priv(dev); 562 struct adapter *adap = pi->adapter; 563 unsigned int ivsize = GCM_ESP_IV_SIZE; 564 struct chcr_ipsec_wr *wr; 565 bool immediate = false; 566 u16 immdatalen = 0; 567 unsigned int flits; 568 u32 ivinoffset; 569 u32 aadstart; 570 u32 aadstop; 571 u32 ciphstart; 572 u16 sc_more = 0; 573 u32 ivdrop = 0; 574 u32 esnlen = 0; 575 u32 wr_mid; 576 u16 ndesc; 577 int qidx = skb_get_queue_mapping(skb); 578 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; 579 unsigned int kctx_len = sa_entry->kctx_len; 580 int qid = q->q.cntxt_id; 581 582 atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt); 583 584 flits = calc_tx_sec_flits(skb, sa_entry, &immediate); 585 ndesc = DIV_ROUND_UP(flits, 2); 586 if (sa_entry->esn) 587 ivdrop = 1; 588 589 if (immediate) 590 immdatalen = skb->len; 591 592 if (sa_entry->esn) { 593 esnlen = sizeof(struct chcr_ipsec_aadiv); 594 if (!skb_is_nonlinear(skb)) 595 sc_more = 1; 596 } 597 598 /* WR Header */ 599 wr = (struct chcr_ipsec_wr *)pos; 600 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR)); 601 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc); 602 603 if (unlikely(credits < ETHTXQ_STOP_THRES)) { 604 netif_tx_stop_queue(q->txq); 605 q->q.stops++; 606 if (!q->dbqt) 607 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F; 608 } 609 wr_mid |= FW_ULPTX_WR_DATA_F; 610 wr->wreq.flowid_len16 = htonl(wr_mid); 611 612 /* ULPTX */ 613 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid); 614 wr->req.ulptx.len = htonl(ndesc - 1); 615 616 /* Sub-command */ 617 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more); 618 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + 619 sizeof(wr->req.key_ctx) + 620 kctx_len + 621 sizeof(struct cpl_tx_pkt_core) + 622 esnlen + 623 (esnlen ? 0 : immdatalen)); 624 625 /* CPL_SEC_PDU */ 626 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) : 627 (skb_transport_offset(skb) + 628 sizeof(struct ip_esp_hdr) + 1); 629 wr->req.sec_cpl.op_ivinsrtofst = htonl( 630 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | 631 CPL_TX_SEC_PDU_CPLLEN_V(2) | 632 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) | 633 CPL_TX_SEC_PDU_IVINSRTOFST_V( 634 ivinoffset)); 635 636 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen); 637 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1); 638 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET : 639 (skb_transport_offset(skb) + 640 sizeof(struct ip_esp_hdr)); 641 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) + 642 GCM_ESP_IV_SIZE + 1; 643 ciphstart += sa_entry->esn ? esnlen : 0; 644 645 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 646 aadstart, 647 aadstop, 648 ciphstart, 0); 649 650 wr->req.sec_cpl.cipherstop_lo_authinsert = 651 FILL_SEC_CPL_AUTHINSERT(0, ciphstart, 652 sa_entry->authsize, 653 sa_entry->authsize); 654 wr->req.sec_cpl.seqno_numivs = 655 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1, 656 CHCR_SCMD_CIPHER_MODE_AES_GCM, 657 CHCR_SCMD_AUTH_MODE_GHASH, 658 sa_entry->hmac_ctrl, 659 ivsize >> 1); 660 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 661 0, ivdrop, 0); 662 663 pos += sizeof(struct fw_ulptx_wr) + 664 sizeof(struct ulp_txpkt) + 665 sizeof(struct ulptx_idata) + 666 sizeof(struct cpl_tx_sec_pdu); 667 668 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry); 669 670 return pos; 671 } 672 673 /** 674 * flits_to_desc - returns the num of Tx descriptors for the given flits 675 * @n: the number of flits 676 * 677 * Returns the number of Tx descriptors needed for the supplied number 678 * of flits. 679 */ 680 static unsigned int flits_to_desc(unsigned int n) 681 { 682 WARN_ON(n > SGE_MAX_WR_LEN / 8); 683 return DIV_ROUND_UP(n, 8); 684 } 685 686 static unsigned int txq_avail(const struct sge_txq *q) 687 { 688 return q->size - 1 - q->in_use; 689 } 690 691 static void eth_txq_stop(struct sge_eth_txq *q) 692 { 693 netif_tx_stop_queue(q->txq); 694 q->q.stops++; 695 } 696 697 static void txq_advance(struct sge_txq *q, unsigned int n) 698 { 699 q->in_use += n; 700 q->pidx += n; 701 if (q->pidx >= q->size) 702 q->pidx -= q->size; 703 } 704 705 /* 706 * ch_ipsec_xmit called from ULD Tx handler 707 */ 708 int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev) 709 { 710 struct xfrm_state *x = xfrm_input_state(skb); 711 unsigned int last_desc, ndesc, flits = 0; 712 struct ipsec_sa_entry *sa_entry; 713 u64 *pos, *end, *before, *sgl; 714 struct tx_sw_desc *sgl_sdesc; 715 int qidx, left, credits; 716 bool immediate = false; 717 struct sge_eth_txq *q; 718 struct adapter *adap; 719 struct port_info *pi; 720 struct sec_path *sp; 721 722 if (!x->xso.offload_handle) 723 return NETDEV_TX_BUSY; 724 725 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 726 727 sp = skb_sec_path(skb); 728 if (sp->len != 1) { 729 out_free: dev_kfree_skb_any(skb); 730 return NETDEV_TX_OK; 731 } 732 733 pi = netdev_priv(dev); 734 adap = pi->adapter; 735 qidx = skb->queue_mapping; 736 q = &adap->sge.ethtxq[qidx + pi->first_qset]; 737 738 cxgb4_reclaim_completed_tx(adap, &q->q, true); 739 740 flits = calc_tx_sec_flits(skb, sa_entry, &immediate); 741 ndesc = flits_to_desc(flits); 742 credits = txq_avail(&q->q) - ndesc; 743 744 if (unlikely(credits < 0)) { 745 eth_txq_stop(q); 746 dev_err(adap->pdev_dev, 747 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n", 748 dev->name, qidx, credits, ndesc, txq_avail(&q->q), 749 flits); 750 return NETDEV_TX_BUSY; 751 } 752 753 last_desc = q->q.pidx + ndesc - 1; 754 if (last_desc >= q->q.size) 755 last_desc -= q->q.size; 756 sgl_sdesc = &q->q.sdesc[last_desc]; 757 758 if (!immediate && 759 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { 760 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); 761 q->mapping_err++; 762 goto out_free; 763 } 764 765 pos = (u64 *)&q->q.desc[q->q.pidx]; 766 before = (u64 *)pos; 767 end = (u64 *)pos + flits; 768 /* Setup IPSec CPL */ 769 pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos, 770 credits, sa_entry); 771 if (before > (u64 *)pos) { 772 left = (u8 *)end - (u8 *)q->q.stat; 773 end = (void *)q->q.desc + left; 774 } 775 if (pos == (u64 *)q->q.stat) { 776 left = (u8 *)end - (u8 *)q->q.stat; 777 end = (void *)q->q.desc + left; 778 pos = (void *)q->q.desc; 779 } 780 781 sgl = (void *)pos; 782 if (immediate) { 783 cxgb4_inline_tx_skb(skb, &q->q, sgl); 784 dev_consume_skb_any(skb); 785 } else { 786 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 787 0, sgl_sdesc->addr); 788 skb_orphan(skb); 789 sgl_sdesc->skb = skb; 790 } 791 txq_advance(&q->q, ndesc); 792 793 cxgb4_ring_tx_db(adap, &q->q, ndesc); 794 return NETDEV_TX_OK; 795 } 796 797 static int __init ch_ipsec_init(void) 798 { 799 cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info); 800 801 return 0; 802 } 803 804 static void __exit ch_ipsec_exit(void) 805 { 806 struct ipsec_uld_ctx *u_ctx, *tmp; 807 struct adapter *adap; 808 809 mutex_lock(&dev_mutex); 810 list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { 811 adap = pci_get_drvdata(u_ctx->lldi.pdev); 812 atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0); 813 list_del(&u_ctx->entry); 814 kfree(u_ctx); 815 } 816 mutex_unlock(&dev_mutex); 817 cxgb4_unregister_uld(CXGB4_ULD_IPSEC); 818 } 819 820 module_init(ch_ipsec_init); 821 module_exit(ch_ipsec_exit); 822 823 MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards."); 824 MODULE_LICENSE("GPL"); 825 MODULE_AUTHOR("Chelsio Communications"); 826 MODULE_VERSION(CHIPSEC_DRV_VERSION); 827 828