1 /******************************************************************************* 2 * 3 * Intel 10 Gigabit PCI Express Linux driver 4 * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program. If not, see <http://www.gnu.org/licenses/>. 17 * 18 * The full GNU General Public License is included in this distribution in 19 * the file called "COPYING". 20 * 21 * Contact Information: 22 * Linux NICS <linux.nics@intel.com> 23 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 * 26 ******************************************************************************/ 27 28 #include "ixgbe.h" 29 #include <net/xfrm.h> 30 #include <crypto/aead.h> 31 32 /** 33 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers 34 * @hw: hw specific details 35 * @idx: register index to write 36 * @key: key byte array 37 * @salt: salt bytes 38 **/ 39 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, 40 u32 key[], u32 salt) 41 { 42 u32 reg; 43 int i; 44 45 for (i = 0; i < 4; i++) 46 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i])); 47 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt)); 48 IXGBE_WRITE_FLUSH(hw); 49 50 reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); 51 reg &= IXGBE_RXTXIDX_IPS_EN; 52 reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; 53 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg); 54 IXGBE_WRITE_FLUSH(hw); 55 } 56 57 /** 58 * ixgbe_ipsec_set_rx_item - set an Rx table item 59 * @hw: hw specific details 60 * @idx: register index to write 61 * @tbl: table selector 62 * 63 * Trigger the device to store into a particular Rx table the 64 * data that has already been loaded into the input register 65 **/ 66 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, 67 enum ixgbe_ipsec_tbl_sel tbl) 68 { 69 u32 reg; 70 71 reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX); 72 reg &= IXGBE_RXTXIDX_IPS_EN; 73 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | 74 idx << IXGBE_RXTXIDX_IDX_SHIFT | 75 IXGBE_RXTXIDX_WRITE; 76 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg); 77 IXGBE_WRITE_FLUSH(hw); 78 } 79 80 /** 81 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info 82 * @hw: hw specific details 83 * @idx: register index to write 84 * @spi: security parameter index 85 * @key: key byte array 86 * @salt: salt bytes 87 * @mode: rx decrypt control bits 88 * @ip_idx: index into IP table for related IP address 89 **/ 90 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, 91 u32 key[], u32 salt, u32 mode, u32 ip_idx) 92 { 93 int i; 94 95 /* store the SPI (in bigendian) and IPidx */ 96 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi)); 97 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); 98 IXGBE_WRITE_FLUSH(hw); 99 100 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl); 101 102 /* store the key, salt, and mode */ 103 for (i = 0; i < 4; i++) 104 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i])); 105 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt)); 106 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); 107 IXGBE_WRITE_FLUSH(hw); 108 109 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl); 110 } 111 112 /** 113 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info 114 * @hw: hw specific details 115 * @idx: register index to write 116 * @addr: IP address byte array 117 **/ 118 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) 119 { 120 int i; 121 122 /* store the ip address */ 123 for (i = 0; i < 4; i++) 124 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i])); 125 IXGBE_WRITE_FLUSH(hw); 126 127 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); 128 } 129 130 /** 131 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset 132 * @adapter: board private structure 133 **/ 134 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) 135 { 136 struct ixgbe_ipsec *ipsec = adapter->ipsec; 137 struct ixgbe_hw *hw = &adapter->hw; 138 u32 buf[4] = {0, 0, 0, 0}; 139 u16 idx; 140 141 /* disable Rx and Tx SA lookup */ 142 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); 143 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); 144 145 /* scrub the tables - split the loops for the max of the IP table */ 146 for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) { 147 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); 148 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); 149 ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf); 150 } 151 for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) { 152 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); 153 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); 154 } 155 156 ipsec->num_rx_sa = 0; 157 ipsec->num_tx_sa = 0; 158 } 159 160 /** 161 * ixgbe_ipsec_stop_data 162 * @adapter: board private structure 163 **/ 164 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) 165 { 166 struct ixgbe_hw *hw = &adapter->hw; 167 bool link = adapter->link_up; 168 u32 t_rdy, r_rdy; 169 u32 limit; 170 u32 reg; 171 172 /* halt data paths */ 173 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 174 reg |= IXGBE_SECTXCTRL_TX_DIS; 175 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); 176 177 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 178 reg |= IXGBE_SECRXCTRL_RX_DIS; 179 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); 180 181 IXGBE_WRITE_FLUSH(hw); 182 183 /* If the tx fifo doesn't have link, but still has data, 184 * we can't clear the tx sec block. Set the MAC loopback 185 * before block clear 186 */ 187 if (!link) { 188 reg = IXGBE_READ_REG(hw, IXGBE_MACC); 189 reg |= IXGBE_MACC_FLU; 190 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); 191 192 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 193 reg |= IXGBE_HLREG0_LPBK; 194 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); 195 196 IXGBE_WRITE_FLUSH(hw); 197 mdelay(3); 198 } 199 200 /* wait for the paths to empty */ 201 limit = 20; 202 do { 203 mdelay(10); 204 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & 205 IXGBE_SECTXSTAT_SECTX_RDY; 206 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & 207 IXGBE_SECRXSTAT_SECRX_RDY; 208 } while (!t_rdy && !r_rdy && limit--); 209 210 /* undo loopback if we played with it earlier */ 211 if (!link) { 212 reg = IXGBE_READ_REG(hw, IXGBE_MACC); 213 reg &= ~IXGBE_MACC_FLU; 214 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); 215 216 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 217 reg &= ~IXGBE_HLREG0_LPBK; 218 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); 219 220 IXGBE_WRITE_FLUSH(hw); 221 } 222 } 223 224 /** 225 * ixgbe_ipsec_stop_engine 226 * @adapter: board private structure 227 **/ 228 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter) 229 { 230 struct ixgbe_hw *hw = &adapter->hw; 231 u32 reg; 232 233 ixgbe_ipsec_stop_data(adapter); 234 235 /* disable Rx and Tx SA lookup */ 236 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); 237 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); 238 239 /* disable the Rx and Tx engines and full packet store-n-forward */ 240 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 241 reg |= IXGBE_SECTXCTRL_SECTX_DIS; 242 reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD; 243 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); 244 245 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 246 reg |= IXGBE_SECRXCTRL_SECRX_DIS; 247 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); 248 249 /* restore the "tx security buffer almost full threshold" to 0x250 */ 250 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250); 251 252 /* Set minimum IFG between packets back to the default 0x1 */ 253 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 254 reg = (reg & 0xfffffff0) | 0x1; 255 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 256 257 /* final set for normal (no ipsec offload) processing */ 258 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS); 259 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS); 260 261 IXGBE_WRITE_FLUSH(hw); 262 } 263 264 /** 265 * ixgbe_ipsec_start_engine 266 * @adapter: board private structure 267 * 268 * NOTE: this increases power consumption whether being used or not 269 **/ 270 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) 271 { 272 struct ixgbe_hw *hw = &adapter->hw; 273 u32 reg; 274 275 ixgbe_ipsec_stop_data(adapter); 276 277 /* Set minimum IFG between packets to 3 */ 278 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 279 reg = (reg & 0xfffffff0) | 0x3; 280 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 281 282 /* Set "tx security buffer almost full threshold" to 0x15 so that the 283 * almost full indication is generated only after buffer contains at 284 * least an entire jumbo packet. 285 */ 286 reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); 287 reg = (reg & 0xfffffc00) | 0x15; 288 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg); 289 290 /* restart the data paths by clearing the DISABLE bits */ 291 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); 292 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); 293 294 /* enable Rx and Tx SA lookup */ 295 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN); 296 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN); 297 298 IXGBE_WRITE_FLUSH(hw); 299 } 300 301 /** 302 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset 303 * @adapter: board private structure 304 **/ 305 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) 306 { 307 struct ixgbe_ipsec *ipsec = adapter->ipsec; 308 struct ixgbe_hw *hw = &adapter->hw; 309 int i; 310 311 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) 312 return; 313 314 /* clean up and restart the engine */ 315 ixgbe_ipsec_stop_engine(adapter); 316 ixgbe_ipsec_clear_hw_tables(adapter); 317 ixgbe_ipsec_start_engine(adapter); 318 319 /* reload the IP addrs */ 320 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { 321 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; 322 323 if (ipsa->used) 324 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); 325 } 326 327 /* reload the Rx and Tx keys */ 328 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { 329 struct rx_sa *rsa = &ipsec->rx_tbl[i]; 330 struct tx_sa *tsa = &ipsec->tx_tbl[i]; 331 332 if (rsa->used) 333 ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi, 334 rsa->key, rsa->salt, 335 rsa->mode, rsa->iptbl_ind); 336 337 if (tsa->used) 338 ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt); 339 } 340 } 341 342 /** 343 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index 344 * @ipsec: pointer to ipsec struct 345 * @rxtable: true if we need to look in the Rx table 346 * 347 * Returns the first unused index in either the Rx or Tx SA table 348 **/ 349 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable) 350 { 351 u32 i; 352 353 if (rxtable) { 354 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) 355 return -ENOSPC; 356 357 /* search rx sa table */ 358 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { 359 if (!ipsec->rx_tbl[i].used) 360 return i; 361 } 362 } else { 363 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) 364 return -ENOSPC; 365 366 /* search tx sa table */ 367 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { 368 if (!ipsec->tx_tbl[i].used) 369 return i; 370 } 371 } 372 373 return -ENOSPC; 374 } 375 376 /** 377 * ixgbe_ipsec_find_rx_state - find the state that matches 378 * @ipsec: pointer to ipsec struct 379 * @daddr: inbound address to match 380 * @proto: protocol to match 381 * @spi: SPI to match 382 * @ip4: true if using an ipv4 address 383 * 384 * Returns a pointer to the matching SA state information 385 **/ 386 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, 387 __be32 *daddr, u8 proto, 388 __be32 spi, bool ip4) 389 { 390 struct rx_sa *rsa; 391 struct xfrm_state *ret = NULL; 392 393 rcu_read_lock(); 394 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi) 395 if (spi == rsa->xs->id.spi && 396 ((ip4 && *daddr == rsa->xs->id.daddr.a4) || 397 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, 398 sizeof(rsa->xs->id.daddr.a6)))) && 399 proto == rsa->xs->id.proto) { 400 ret = rsa->xs; 401 xfrm_state_hold(ret); 402 break; 403 } 404 rcu_read_unlock(); 405 return ret; 406 } 407 408 /** 409 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol 410 * @xs: pointer to xfrm_state struct 411 * @mykey: pointer to key array to populate 412 * @mysalt: pointer to salt value to populate 413 * 414 * This copies the protocol keys and salt to our own data tables. The 415 * 82599 family only supports the one algorithm. 416 **/ 417 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, 418 u32 *mykey, u32 *mysalt) 419 { 420 struct net_device *dev = xs->xso.dev; 421 unsigned char *key_data; 422 char *alg_name = NULL; 423 const char aes_gcm_name[] = "rfc4106(gcm(aes))"; 424 int key_len; 425 426 if (xs->aead) { 427 key_data = &xs->aead->alg_key[0]; 428 key_len = xs->aead->alg_key_len; 429 alg_name = xs->aead->alg_name; 430 } else { 431 netdev_err(dev, "Unsupported IPsec algorithm\n"); 432 return -EINVAL; 433 } 434 435 if (strcmp(alg_name, aes_gcm_name)) { 436 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", 437 aes_gcm_name); 438 return -EINVAL; 439 } 440 441 /* The key bytes come down in a bigendian array of bytes, so 442 * we don't need to do any byteswapping. 443 * 160 accounts for 16 byte key and 4 byte salt 444 */ 445 if (key_len == 160) { 446 *mysalt = ((u32 *)key_data)[4]; 447 } else if (key_len != 128) { 448 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); 449 return -EINVAL; 450 } else { 451 netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n"); 452 *mysalt = 0; 453 } 454 memcpy(mykey, key_data, 16); 455 456 return 0; 457 } 458 459 /** 460 * ixgbe_ipsec_add_sa - program device with a security association 461 * @xs: pointer to transformer state struct 462 **/ 463 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) 464 { 465 struct net_device *dev = xs->xso.dev; 466 struct ixgbe_adapter *adapter = netdev_priv(dev); 467 struct ixgbe_ipsec *ipsec = adapter->ipsec; 468 struct ixgbe_hw *hw = &adapter->hw; 469 int checked, match, first; 470 u16 sa_idx; 471 int ret; 472 int i; 473 474 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { 475 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", 476 xs->id.proto); 477 return -EINVAL; 478 } 479 480 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { 481 struct rx_sa rsa; 482 483 if (xs->calg) { 484 netdev_err(dev, "Compression offload not supported\n"); 485 return -EINVAL; 486 } 487 488 /* find the first unused index */ 489 ret = ixgbe_ipsec_find_empty_idx(ipsec, true); 490 if (ret < 0) { 491 netdev_err(dev, "No space for SA in Rx table!\n"); 492 return ret; 493 } 494 sa_idx = (u16)ret; 495 496 memset(&rsa, 0, sizeof(rsa)); 497 rsa.used = true; 498 rsa.xs = xs; 499 500 if (rsa.xs->id.proto & IPPROTO_ESP) 501 rsa.decrypt = xs->ealg || xs->aead; 502 503 /* get the key and salt */ 504 ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); 505 if (ret) { 506 netdev_err(dev, "Failed to get key data for Rx SA table\n"); 507 return ret; 508 } 509 510 /* get ip for rx sa table */ 511 if (xs->props.family == AF_INET6) 512 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); 513 else 514 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); 515 516 /* The HW does not have a 1:1 mapping from keys to IP addrs, so 517 * check for a matching IP addr entry in the table. If the addr 518 * already exists, use it; else find an unused slot and add the 519 * addr. If one does not exist and there are no unused table 520 * entries, fail the request. 521 */ 522 523 /* Find an existing match or first not used, and stop looking 524 * after we've checked all we know we have. 525 */ 526 checked = 0; 527 match = -1; 528 first = -1; 529 for (i = 0; 530 i < IXGBE_IPSEC_MAX_RX_IP_COUNT && 531 (checked < ipsec->num_rx_sa || first < 0); 532 i++) { 533 if (ipsec->ip_tbl[i].used) { 534 if (!memcmp(ipsec->ip_tbl[i].ipaddr, 535 rsa.ipaddr, sizeof(rsa.ipaddr))) { 536 match = i; 537 break; 538 } 539 checked++; 540 } else if (first < 0) { 541 first = i; /* track the first empty seen */ 542 } 543 } 544 545 if (ipsec->num_rx_sa == 0) 546 first = 0; 547 548 if (match >= 0) { 549 /* addrs are the same, we should use this one */ 550 rsa.iptbl_ind = match; 551 ipsec->ip_tbl[match].ref_cnt++; 552 553 } else if (first >= 0) { 554 /* no matches, but here's an empty slot */ 555 rsa.iptbl_ind = first; 556 557 memcpy(ipsec->ip_tbl[first].ipaddr, 558 rsa.ipaddr, sizeof(rsa.ipaddr)); 559 ipsec->ip_tbl[first].ref_cnt = 1; 560 ipsec->ip_tbl[first].used = true; 561 562 ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr); 563 564 } else { 565 /* no match and no empty slot */ 566 netdev_err(dev, "No space for SA in Rx IP SA table\n"); 567 memset(&rsa, 0, sizeof(rsa)); 568 return -ENOSPC; 569 } 570 571 rsa.mode = IXGBE_RXMOD_VALID; 572 if (rsa.xs->id.proto & IPPROTO_ESP) 573 rsa.mode |= IXGBE_RXMOD_PROTO_ESP; 574 if (rsa.decrypt) 575 rsa.mode |= IXGBE_RXMOD_DECRYPT; 576 if (rsa.xs->props.family == AF_INET6) 577 rsa.mode |= IXGBE_RXMOD_IPV6; 578 579 /* the preparations worked, so save the info */ 580 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); 581 582 ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key, 583 rsa.salt, rsa.mode, rsa.iptbl_ind); 584 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; 585 586 ipsec->num_rx_sa++; 587 588 /* hash the new entry for faster search in Rx path */ 589 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, 590 rsa.xs->id.spi); 591 } else { 592 struct tx_sa tsa; 593 594 /* find the first unused index */ 595 ret = ixgbe_ipsec_find_empty_idx(ipsec, false); 596 if (ret < 0) { 597 netdev_err(dev, "No space for SA in Tx table\n"); 598 return ret; 599 } 600 sa_idx = (u16)ret; 601 602 memset(&tsa, 0, sizeof(tsa)); 603 tsa.used = true; 604 tsa.xs = xs; 605 606 if (xs->id.proto & IPPROTO_ESP) 607 tsa.encrypt = xs->ealg || xs->aead; 608 609 ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); 610 if (ret) { 611 netdev_err(dev, "Failed to get key data for Tx SA table\n"); 612 memset(&tsa, 0, sizeof(tsa)); 613 return ret; 614 } 615 616 /* the preparations worked, so save the info */ 617 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); 618 619 ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt); 620 621 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; 622 623 ipsec->num_tx_sa++; 624 } 625 626 /* enable the engine if not already warmed up */ 627 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) { 628 ixgbe_ipsec_start_engine(adapter); 629 adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED; 630 } 631 632 return 0; 633 } 634 635 /** 636 * ixgbe_ipsec_del_sa - clear out this specific SA 637 * @xs: pointer to transformer state struct 638 **/ 639 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) 640 { 641 struct net_device *dev = xs->xso.dev; 642 struct ixgbe_adapter *adapter = netdev_priv(dev); 643 struct ixgbe_ipsec *ipsec = adapter->ipsec; 644 struct ixgbe_hw *hw = &adapter->hw; 645 u32 zerobuf[4] = {0, 0, 0, 0}; 646 u16 sa_idx; 647 648 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { 649 struct rx_sa *rsa; 650 u8 ipi; 651 652 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; 653 rsa = &ipsec->rx_tbl[sa_idx]; 654 655 if (!rsa->used) { 656 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", 657 sa_idx, xs->xso.offload_handle); 658 return; 659 } 660 661 ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0); 662 hash_del_rcu(&rsa->hlist); 663 664 /* if the IP table entry is referenced by only this SA, 665 * i.e. ref_cnt is only 1, clear the IP table entry as well 666 */ 667 ipi = rsa->iptbl_ind; 668 if (ipsec->ip_tbl[ipi].ref_cnt > 0) { 669 ipsec->ip_tbl[ipi].ref_cnt--; 670 671 if (!ipsec->ip_tbl[ipi].ref_cnt) { 672 memset(&ipsec->ip_tbl[ipi], 0, 673 sizeof(struct rx_ip_sa)); 674 ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf); 675 } 676 } 677 678 memset(rsa, 0, sizeof(struct rx_sa)); 679 ipsec->num_rx_sa--; 680 } else { 681 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; 682 683 if (!ipsec->tx_tbl[sa_idx].used) { 684 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", 685 sa_idx, xs->xso.offload_handle); 686 return; 687 } 688 689 ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0); 690 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); 691 ipsec->num_tx_sa--; 692 } 693 694 /* if there are no SAs left, stop the engine to save energy */ 695 if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) { 696 adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED; 697 ixgbe_ipsec_stop_engine(adapter); 698 } 699 } 700 701 /** 702 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload 703 * @skb: current data packet 704 * @xs: pointer to transformer state struct 705 **/ 706 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 707 { 708 if (xs->props.family == AF_INET) { 709 /* Offload with IPv4 options is not supported yet */ 710 if (ip_hdr(skb)->ihl != 5) 711 return false; 712 } else { 713 /* Offload with IPv6 extension headers is not support yet */ 714 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 715 return false; 716 } 717 718 return true; 719 } 720 721 /** 722 * ixgbe_ipsec_free - called by xfrm garbage collections 723 * @xs: pointer to transformer state struct 724 * 725 * We don't have any garbage to collect, so we shouldn't bother 726 * implementing this function, but the XFRM code doesn't check for 727 * existence before calling the API callback. 728 **/ 729 static void ixgbe_ipsec_free(struct xfrm_state *xs) 730 { 731 } 732 733 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { 734 .xdo_dev_state_add = ixgbe_ipsec_add_sa, 735 .xdo_dev_state_delete = ixgbe_ipsec_del_sa, 736 .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, 737 .xdo_dev_state_free = ixgbe_ipsec_free, 738 }; 739 740 /** 741 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload 742 * @tx_ring: outgoing context 743 * @first: current data packet 744 * @itd: ipsec Tx data for later use in building context descriptor 745 **/ 746 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, 747 struct ixgbe_tx_buffer *first, 748 struct ixgbe_ipsec_tx_data *itd) 749 { 750 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); 751 struct ixgbe_ipsec *ipsec = adapter->ipsec; 752 struct xfrm_state *xs; 753 struct tx_sa *tsa; 754 755 if (unlikely(!first->skb->sp->len)) { 756 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", 757 __func__, first->skb->sp->len); 758 return 0; 759 } 760 761 xs = xfrm_input_state(first->skb); 762 if (unlikely(!xs)) { 763 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", 764 __func__, xs); 765 return 0; 766 } 767 768 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; 769 if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { 770 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", 771 __func__, itd->sa_idx, xs->xso.offload_handle); 772 return 0; 773 } 774 775 tsa = &ipsec->tx_tbl[itd->sa_idx]; 776 if (unlikely(!tsa->used)) { 777 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", 778 __func__, itd->sa_idx); 779 return 0; 780 } 781 782 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; 783 784 itd->flags = 0; 785 if (xs->id.proto == IPPROTO_ESP) { 786 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | 787 IXGBE_ADVTXD_TUCMD_L4T_TCP; 788 if (first->protocol == htons(ETH_P_IP)) 789 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; 790 itd->trailer_len = xs->props.trailer_len; 791 } 792 if (tsa->encrypt) 793 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; 794 795 return 1; 796 } 797 798 /** 799 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor 800 * @rx_ring: receiving ring 801 * @rx_desc: receive data descriptor 802 * @skb: current data packet 803 * 804 * Determine if there was an ipsec encapsulation noticed, and if so set up 805 * the resulting status for later in the receive stack. 806 **/ 807 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, 808 union ixgbe_adv_rx_desc *rx_desc, 809 struct sk_buff *skb) 810 { 811 struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); 812 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 813 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | 814 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); 815 struct ixgbe_ipsec *ipsec = adapter->ipsec; 816 struct xfrm_offload *xo = NULL; 817 struct xfrm_state *xs = NULL; 818 struct ipv6hdr *ip6 = NULL; 819 struct iphdr *ip4 = NULL; 820 void *daddr; 821 __be32 spi; 822 u8 *c_hdr; 823 u8 proto; 824 825 /* Find the ip and crypto headers in the data. 826 * We can assume no vlan header in the way, b/c the 827 * hw won't recognize the IPsec packet and anyway the 828 * currently vlan device doesn't support xfrm offload. 829 */ 830 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { 831 ip4 = (struct iphdr *)(skb->data + ETH_HLEN); 832 daddr = &ip4->daddr; 833 c_hdr = (u8 *)ip4 + ip4->ihl * 4; 834 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { 835 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); 836 daddr = &ip6->daddr; 837 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); 838 } else { 839 return; 840 } 841 842 switch (pkt_info & ipsec_pkt_types) { 843 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): 844 spi = ((struct ip_auth_hdr *)c_hdr)->spi; 845 proto = IPPROTO_AH; 846 break; 847 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): 848 spi = ((struct ip_esp_hdr *)c_hdr)->spi; 849 proto = IPPROTO_ESP; 850 break; 851 default: 852 return; 853 } 854 855 xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); 856 if (unlikely(!xs)) 857 return; 858 859 skb->sp = secpath_dup(skb->sp); 860 if (unlikely(!skb->sp)) 861 return; 862 863 skb->sp->xvec[skb->sp->len++] = xs; 864 skb->sp->olen++; 865 xo = xfrm_offload(skb); 866 xo->flags = CRYPTO_DONE; 867 xo->status = CRYPTO_SUCCESS; 868 869 adapter->rx_ipsec++; 870 } 871 872 /** 873 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation 874 * @adapter: board private structure 875 **/ 876 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) 877 { 878 struct ixgbe_ipsec *ipsec; 879 size_t size; 880 881 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 882 return; 883 884 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); 885 if (!ipsec) 886 goto err1; 887 hash_init(ipsec->rx_sa_list); 888 889 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; 890 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); 891 if (!ipsec->rx_tbl) 892 goto err2; 893 894 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; 895 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); 896 if (!ipsec->tx_tbl) 897 goto err2; 898 899 size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT; 900 ipsec->ip_tbl = kzalloc(size, GFP_KERNEL); 901 if (!ipsec->ip_tbl) 902 goto err2; 903 904 ipsec->num_rx_sa = 0; 905 ipsec->num_tx_sa = 0; 906 907 adapter->ipsec = ipsec; 908 ixgbe_ipsec_stop_engine(adapter); 909 ixgbe_ipsec_clear_hw_tables(adapter); 910 911 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; 912 adapter->netdev->features |= NETIF_F_HW_ESP; 913 adapter->netdev->hw_enc_features |= NETIF_F_HW_ESP; 914 915 return; 916 917 err2: 918 kfree(ipsec->ip_tbl); 919 kfree(ipsec->rx_tbl); 920 kfree(ipsec->tx_tbl); 921 err1: 922 kfree(adapter->ipsec); 923 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); 924 } 925 926 /** 927 * ixgbe_stop_ipsec_offload - tear down the ipsec offload 928 * @adapter: board private structure 929 **/ 930 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) 931 { 932 struct ixgbe_ipsec *ipsec = adapter->ipsec; 933 934 adapter->ipsec = NULL; 935 if (ipsec) { 936 kfree(ipsec->ip_tbl); 937 kfree(ipsec->rx_tbl); 938 kfree(ipsec->tx_tbl); 939 kfree(ipsec); 940 } 941 } 942