1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */ 3 4 #include "ixgbe.h" 5 #include <net/xfrm.h> 6 #include <crypto/aead.h> 7 8 #define IXGBE_IPSEC_KEY_BITS 160 9 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; 10 11 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs); 12 13 /** 14 * ixgbe_ipsec_set_tx_sa - set the Tx SA registers 15 * @hw: hw specific details 16 * @idx: register index to write 17 * @key: key byte array 18 * @salt: salt bytes 19 **/ 20 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx, 21 u32 key[], u32 salt) 22 { 23 u32 reg; 24 int i; 25 26 for (i = 0; i < 4; i++) 27 IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), 28 (__force u32)cpu_to_be32(key[3 - i])); 29 IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt)); 30 IXGBE_WRITE_FLUSH(hw); 31 32 reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX); 33 reg &= IXGBE_RXTXIDX_IPS_EN; 34 reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE; 35 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg); 36 IXGBE_WRITE_FLUSH(hw); 37 } 38 39 /** 40 * ixgbe_ipsec_set_rx_item - set an Rx table item 41 * @hw: hw specific details 42 * @idx: register index to write 43 * @tbl: table selector 44 * 45 * Trigger the device to store into a particular Rx table the 46 * data that has already been loaded into the input register 47 **/ 48 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx, 49 enum ixgbe_ipsec_tbl_sel tbl) 50 { 51 u32 reg; 52 53 reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX); 54 reg &= IXGBE_RXTXIDX_IPS_EN; 55 reg |= tbl << IXGBE_RXIDX_TBL_SHIFT | 56 idx << IXGBE_RXTXIDX_IDX_SHIFT | 57 IXGBE_RXTXIDX_WRITE; 58 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg); 59 IXGBE_WRITE_FLUSH(hw); 60 } 61 62 /** 63 * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info 64 * @hw: hw specific details 65 * @idx: register index to write 66 * @spi: security parameter index 67 * @key: key byte array 68 * @salt: salt bytes 69 * @mode: rx decrypt control bits 70 * @ip_idx: index into IP table for related IP address 71 **/ 72 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi, 73 u32 key[], u32 salt, u32 mode, u32 ip_idx) 74 { 75 int i; 76 77 /* store the SPI (in bigendian) and IPidx */ 78 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 79 (__force u32)cpu_to_le32((__force u32)spi)); 80 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx); 81 IXGBE_WRITE_FLUSH(hw); 82 83 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl); 84 85 /* store the key, salt, and mode */ 86 for (i = 0; i < 4; i++) 87 IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), 88 (__force u32)cpu_to_be32(key[3 - i])); 89 IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt)); 90 IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode); 91 IXGBE_WRITE_FLUSH(hw); 92 93 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl); 94 } 95 96 /** 97 * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info 98 * @hw: hw specific details 99 * @idx: register index to write 100 * @addr: IP address byte array 101 **/ 102 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[]) 103 { 104 int i; 105 106 /* store the ip address */ 107 for (i = 0; i < 4; i++) 108 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), 109 (__force u32)cpu_to_le32((__force u32)addr[i])); 110 IXGBE_WRITE_FLUSH(hw); 111 112 ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl); 113 } 114 115 /** 116 * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset 117 * @adapter: board private structure 118 **/ 119 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter) 120 { 121 struct ixgbe_hw *hw = &adapter->hw; 122 u32 buf[4] = {0, 0, 0, 0}; 123 u16 idx; 124 125 /* disable Rx and Tx SA lookup */ 126 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); 127 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); 128 129 /* scrub the tables - split the loops for the max of the IP table */ 130 for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) { 131 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); 132 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); 133 ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf); 134 } 135 for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) { 136 ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0); 137 ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0); 138 } 139 } 140 141 /** 142 * ixgbe_ipsec_stop_data 143 * @adapter: board private structure 144 **/ 145 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter) 146 { 147 struct ixgbe_hw *hw = &adapter->hw; 148 bool link = adapter->link_up; 149 u32 t_rdy, r_rdy; 150 u32 limit; 151 u32 reg; 152 153 /* halt data paths */ 154 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 155 reg |= IXGBE_SECTXCTRL_TX_DIS; 156 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); 157 158 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 159 reg |= IXGBE_SECRXCTRL_RX_DIS; 160 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); 161 162 /* If both Tx and Rx are ready there are no packets 163 * that we need to flush so the loopback configuration 164 * below is not necessary. 165 */ 166 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & 167 IXGBE_SECTXSTAT_SECTX_RDY; 168 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & 169 IXGBE_SECRXSTAT_SECRX_RDY; 170 if (t_rdy && r_rdy) 171 return; 172 173 /* If the tx fifo doesn't have link, but still has data, 174 * we can't clear the tx sec block. Set the MAC loopback 175 * before block clear 176 */ 177 if (!link) { 178 reg = IXGBE_READ_REG(hw, IXGBE_MACC); 179 reg |= IXGBE_MACC_FLU; 180 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); 181 182 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 183 reg |= IXGBE_HLREG0_LPBK; 184 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); 185 186 IXGBE_WRITE_FLUSH(hw); 187 mdelay(3); 188 } 189 190 /* wait for the paths to empty */ 191 limit = 20; 192 do { 193 mdelay(10); 194 t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & 195 IXGBE_SECTXSTAT_SECTX_RDY; 196 r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & 197 IXGBE_SECRXSTAT_SECRX_RDY; 198 } while (!(t_rdy && r_rdy) && limit--); 199 200 /* undo loopback if we played with it earlier */ 201 if (!link) { 202 reg = IXGBE_READ_REG(hw, IXGBE_MACC); 203 reg &= ~IXGBE_MACC_FLU; 204 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg); 205 206 reg = IXGBE_READ_REG(hw, IXGBE_HLREG0); 207 reg &= ~IXGBE_HLREG0_LPBK; 208 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg); 209 210 IXGBE_WRITE_FLUSH(hw); 211 } 212 } 213 214 /** 215 * ixgbe_ipsec_stop_engine 216 * @adapter: board private structure 217 **/ 218 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter) 219 { 220 struct ixgbe_hw *hw = &adapter->hw; 221 u32 reg; 222 223 ixgbe_ipsec_stop_data(adapter); 224 225 /* disable Rx and Tx SA lookup */ 226 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0); 227 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0); 228 229 /* disable the Rx and Tx engines and full packet store-n-forward */ 230 reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL); 231 reg |= IXGBE_SECTXCTRL_SECTX_DIS; 232 reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD; 233 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg); 234 235 reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 236 reg |= IXGBE_SECRXCTRL_SECRX_DIS; 237 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg); 238 239 /* restore the "tx security buffer almost full threshold" to 0x250 */ 240 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250); 241 242 /* Set minimum IFG between packets back to the default 0x1 */ 243 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 244 reg = (reg & 0xfffffff0) | 0x1; 245 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 246 247 /* final set for normal (no ipsec offload) processing */ 248 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS); 249 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS); 250 251 IXGBE_WRITE_FLUSH(hw); 252 } 253 254 /** 255 * ixgbe_ipsec_start_engine 256 * @adapter: board private structure 257 * 258 * NOTE: this increases power consumption whether being used or not 259 **/ 260 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter) 261 { 262 struct ixgbe_hw *hw = &adapter->hw; 263 u32 reg; 264 265 ixgbe_ipsec_stop_data(adapter); 266 267 /* Set minimum IFG between packets to 3 */ 268 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); 269 reg = (reg & 0xfffffff0) | 0x3; 270 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); 271 272 /* Set "tx security buffer almost full threshold" to 0x15 so that the 273 * almost full indication is generated only after buffer contains at 274 * least an entire jumbo packet. 275 */ 276 reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF); 277 reg = (reg & 0xfffffc00) | 0x15; 278 IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg); 279 280 /* restart the data paths by clearing the DISABLE bits */ 281 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0); 282 IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD); 283 284 /* enable Rx and Tx SA lookup */ 285 IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN); 286 IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN); 287 288 IXGBE_WRITE_FLUSH(hw); 289 } 290 291 /** 292 * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset 293 * @adapter: board private structure 294 * 295 * Reload the HW tables from the SW tables after they've been bashed 296 * by a chip reset. 297 * 298 * Any VF entries are removed from the SW and HW tables since either 299 * (a) the VF also gets reset on PF reset and will ask again for the 300 * offloads, or (b) the VF has been removed by a change in the num_vfs. 301 **/ 302 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter) 303 { 304 struct ixgbe_ipsec *ipsec = adapter->ipsec; 305 struct ixgbe_hw *hw = &adapter->hw; 306 int i; 307 308 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) 309 return; 310 311 /* clean up and restart the engine */ 312 ixgbe_ipsec_stop_engine(adapter); 313 ixgbe_ipsec_clear_hw_tables(adapter); 314 ixgbe_ipsec_start_engine(adapter); 315 316 /* reload the Rx and Tx keys */ 317 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { 318 struct rx_sa *r = &ipsec->rx_tbl[i]; 319 struct tx_sa *t = &ipsec->tx_tbl[i]; 320 321 if (r->used) { 322 if (r->mode & IXGBE_RXTXMOD_VF) 323 ixgbe_ipsec_del_sa(r->xs); 324 else 325 ixgbe_ipsec_set_rx_sa(hw, i, r->xs->id.spi, 326 r->key, r->salt, 327 r->mode, r->iptbl_ind); 328 } 329 330 if (t->used) { 331 if (t->mode & IXGBE_RXTXMOD_VF) 332 ixgbe_ipsec_del_sa(t->xs); 333 else 334 ixgbe_ipsec_set_tx_sa(hw, i, t->key, t->salt); 335 } 336 } 337 338 /* reload the IP addrs */ 339 for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) { 340 struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i]; 341 342 if (ipsa->used) 343 ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr); 344 } 345 } 346 347 /** 348 * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index 349 * @ipsec: pointer to ipsec struct 350 * @rxtable: true if we need to look in the Rx table 351 * 352 * Returns the first unused index in either the Rx or Tx SA table 353 **/ 354 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable) 355 { 356 u32 i; 357 358 if (rxtable) { 359 if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT) 360 return -ENOSPC; 361 362 /* search rx sa table */ 363 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { 364 if (!ipsec->rx_tbl[i].used) 365 return i; 366 } 367 } else { 368 if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT) 369 return -ENOSPC; 370 371 /* search tx sa table */ 372 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) { 373 if (!ipsec->tx_tbl[i].used) 374 return i; 375 } 376 } 377 378 return -ENOSPC; 379 } 380 381 /** 382 * ixgbe_ipsec_find_rx_state - find the state that matches 383 * @ipsec: pointer to ipsec struct 384 * @daddr: inbound address to match 385 * @proto: protocol to match 386 * @spi: SPI to match 387 * @ip4: true if using an ipv4 address 388 * 389 * Returns a pointer to the matching SA state information 390 **/ 391 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec, 392 __be32 *daddr, u8 proto, 393 __be32 spi, bool ip4) 394 { 395 struct rx_sa *rsa; 396 struct xfrm_state *ret = NULL; 397 398 rcu_read_lock(); 399 hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, 400 (__force u32)spi) { 401 if (rsa->mode & IXGBE_RXTXMOD_VF) 402 continue; 403 if (spi == rsa->xs->id.spi && 404 ((ip4 && *daddr == rsa->xs->id.daddr.a4) || 405 (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6, 406 sizeof(rsa->xs->id.daddr.a6)))) && 407 proto == rsa->xs->id.proto) { 408 ret = rsa->xs; 409 xfrm_state_hold(ret); 410 break; 411 } 412 } 413 rcu_read_unlock(); 414 return ret; 415 } 416 417 /** 418 * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol 419 * @xs: pointer to xfrm_state struct 420 * @mykey: pointer to key array to populate 421 * @mysalt: pointer to salt value to populate 422 * 423 * This copies the protocol keys and salt to our own data tables. The 424 * 82599 family only supports the one algorithm. 425 **/ 426 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs, 427 u32 *mykey, u32 *mysalt) 428 { 429 struct net_device *dev = xs->xso.dev; 430 unsigned char *key_data; 431 char *alg_name = NULL; 432 int key_len; 433 434 if (!xs->aead) { 435 netdev_err(dev, "Unsupported IPsec algorithm\n"); 436 return -EINVAL; 437 } 438 439 if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) { 440 netdev_err(dev, "IPsec offload requires %d bit authentication\n", 441 IXGBE_IPSEC_AUTH_BITS); 442 return -EINVAL; 443 } 444 445 key_data = &xs->aead->alg_key[0]; 446 key_len = xs->aead->alg_key_len; 447 alg_name = xs->aead->alg_name; 448 449 if (strcmp(alg_name, aes_gcm_name)) { 450 netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n", 451 aes_gcm_name); 452 return -EINVAL; 453 } 454 455 /* The key bytes come down in a bigendian array of bytes, so 456 * we don't need to do any byteswapping. 457 * 160 accounts for 16 byte key and 4 byte salt 458 */ 459 if (key_len == IXGBE_IPSEC_KEY_BITS) { 460 *mysalt = ((u32 *)key_data)[4]; 461 } else if (key_len != (IXGBE_IPSEC_KEY_BITS - (sizeof(*mysalt) * 8))) { 462 netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n"); 463 return -EINVAL; 464 } else { 465 netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n"); 466 *mysalt = 0; 467 } 468 memcpy(mykey, key_data, 16); 469 470 return 0; 471 } 472 473 /** 474 * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters 475 * @xs: pointer to transformer state struct 476 **/ 477 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs) 478 { 479 struct net_device *dev = xs->xso.dev; 480 struct ixgbe_adapter *adapter = netdev_priv(dev); 481 struct ixgbe_hw *hw = &adapter->hw; 482 u32 mfval, manc, reg; 483 int num_filters = 4; 484 bool manc_ipv4; 485 u32 bmcipval; 486 int i, j; 487 488 #define MANC_EN_IPV4_FILTER BIT(24) 489 #define MFVAL_IPV4_FILTER_SHIFT 16 490 #define MFVAL_IPV6_FILTER_SHIFT 24 491 #define MIPAF_ARR(_m, _n) (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4)) 492 493 #define IXGBE_BMCIP(_n) (0x5050 + ((_n) * 4)) 494 #define IXGBE_BMCIPVAL 0x5060 495 #define BMCIP_V4 0x2 496 #define BMCIP_V6 0x3 497 #define BMCIP_MASK 0x3 498 499 manc = IXGBE_READ_REG(hw, IXGBE_MANC); 500 manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER); 501 mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL); 502 bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL); 503 504 if (xs->props.family == AF_INET) { 505 /* are there any IPv4 filters to check? */ 506 if (manc_ipv4) { 507 /* the 4 ipv4 filters are all in MIPAF(3, i) */ 508 for (i = 0; i < num_filters; i++) { 509 if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i))) 510 continue; 511 512 reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i)); 513 if (reg == xs->id.daddr.a4) 514 return 1; 515 } 516 } 517 518 if ((bmcipval & BMCIP_MASK) == BMCIP_V4) { 519 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3)); 520 if (reg == xs->id.daddr.a4) 521 return 1; 522 } 523 524 } else { 525 /* if there are ipv4 filters, they are in the last ipv6 slot */ 526 if (manc_ipv4) 527 num_filters = 3; 528 529 for (i = 0; i < num_filters; i++) { 530 if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i))) 531 continue; 532 533 for (j = 0; j < 4; j++) { 534 reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j)); 535 if (reg != xs->id.daddr.a6[j]) 536 break; 537 } 538 if (j == 4) /* did we match all 4 words? */ 539 return 1; 540 } 541 542 if ((bmcipval & BMCIP_MASK) == BMCIP_V6) { 543 for (j = 0; j < 4; j++) { 544 reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j)); 545 if (reg != xs->id.daddr.a6[j]) 546 break; 547 } 548 if (j == 4) /* did we match all 4 words? */ 549 return 1; 550 } 551 } 552 553 return 0; 554 } 555 556 /** 557 * ixgbe_ipsec_add_sa - program device with a security association 558 * @xs: pointer to transformer state struct 559 **/ 560 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) 561 { 562 struct net_device *dev = xs->xso.dev; 563 struct ixgbe_adapter *adapter = netdev_priv(dev); 564 struct ixgbe_ipsec *ipsec = adapter->ipsec; 565 struct ixgbe_hw *hw = &adapter->hw; 566 int checked, match, first; 567 u16 sa_idx; 568 int ret; 569 int i; 570 571 if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { 572 netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n", 573 xs->id.proto); 574 return -EINVAL; 575 } 576 577 if (ixgbe_ipsec_check_mgmt_ip(xs)) { 578 netdev_err(dev, "IPsec IP addr clash with mgmt filters\n"); 579 return -EINVAL; 580 } 581 582 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { 583 struct rx_sa rsa; 584 585 if (xs->calg) { 586 netdev_err(dev, "Compression offload not supported\n"); 587 return -EINVAL; 588 } 589 590 /* find the first unused index */ 591 ret = ixgbe_ipsec_find_empty_idx(ipsec, true); 592 if (ret < 0) { 593 netdev_err(dev, "No space for SA in Rx table!\n"); 594 return ret; 595 } 596 sa_idx = (u16)ret; 597 598 memset(&rsa, 0, sizeof(rsa)); 599 rsa.used = true; 600 rsa.xs = xs; 601 602 if (rsa.xs->id.proto & IPPROTO_ESP) 603 rsa.decrypt = xs->ealg || xs->aead; 604 605 /* get the key and salt */ 606 ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt); 607 if (ret) { 608 netdev_err(dev, "Failed to get key data for Rx SA table\n"); 609 return ret; 610 } 611 612 /* get ip for rx sa table */ 613 if (xs->props.family == AF_INET6) 614 memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16); 615 else 616 memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4); 617 618 /* The HW does not have a 1:1 mapping from keys to IP addrs, so 619 * check for a matching IP addr entry in the table. If the addr 620 * already exists, use it; else find an unused slot and add the 621 * addr. If one does not exist and there are no unused table 622 * entries, fail the request. 623 */ 624 625 /* Find an existing match or first not used, and stop looking 626 * after we've checked all we know we have. 627 */ 628 checked = 0; 629 match = -1; 630 first = -1; 631 for (i = 0; 632 i < IXGBE_IPSEC_MAX_RX_IP_COUNT && 633 (checked < ipsec->num_rx_sa || first < 0); 634 i++) { 635 if (ipsec->ip_tbl[i].used) { 636 if (!memcmp(ipsec->ip_tbl[i].ipaddr, 637 rsa.ipaddr, sizeof(rsa.ipaddr))) { 638 match = i; 639 break; 640 } 641 checked++; 642 } else if (first < 0) { 643 first = i; /* track the first empty seen */ 644 } 645 } 646 647 if (ipsec->num_rx_sa == 0) 648 first = 0; 649 650 if (match >= 0) { 651 /* addrs are the same, we should use this one */ 652 rsa.iptbl_ind = match; 653 ipsec->ip_tbl[match].ref_cnt++; 654 655 } else if (first >= 0) { 656 /* no matches, but here's an empty slot */ 657 rsa.iptbl_ind = first; 658 659 memcpy(ipsec->ip_tbl[first].ipaddr, 660 rsa.ipaddr, sizeof(rsa.ipaddr)); 661 ipsec->ip_tbl[first].ref_cnt = 1; 662 ipsec->ip_tbl[first].used = true; 663 664 ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr); 665 666 } else { 667 /* no match and no empty slot */ 668 netdev_err(dev, "No space for SA in Rx IP SA table\n"); 669 memset(&rsa, 0, sizeof(rsa)); 670 return -ENOSPC; 671 } 672 673 rsa.mode = IXGBE_RXMOD_VALID; 674 if (rsa.xs->id.proto & IPPROTO_ESP) 675 rsa.mode |= IXGBE_RXMOD_PROTO_ESP; 676 if (rsa.decrypt) 677 rsa.mode |= IXGBE_RXMOD_DECRYPT; 678 if (rsa.xs->props.family == AF_INET6) 679 rsa.mode |= IXGBE_RXMOD_IPV6; 680 681 /* the preparations worked, so save the info */ 682 memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa)); 683 684 ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key, 685 rsa.salt, rsa.mode, rsa.iptbl_ind); 686 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX; 687 688 ipsec->num_rx_sa++; 689 690 /* hash the new entry for faster search in Rx path */ 691 hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist, 692 (__force u32)rsa.xs->id.spi); 693 } else { 694 struct tx_sa tsa; 695 696 if (adapter->num_vfs) 697 return -EOPNOTSUPP; 698 699 /* find the first unused index */ 700 ret = ixgbe_ipsec_find_empty_idx(ipsec, false); 701 if (ret < 0) { 702 netdev_err(dev, "No space for SA in Tx table\n"); 703 return ret; 704 } 705 sa_idx = (u16)ret; 706 707 memset(&tsa, 0, sizeof(tsa)); 708 tsa.used = true; 709 tsa.xs = xs; 710 711 if (xs->id.proto & IPPROTO_ESP) 712 tsa.encrypt = xs->ealg || xs->aead; 713 714 ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt); 715 if (ret) { 716 netdev_err(dev, "Failed to get key data for Tx SA table\n"); 717 memset(&tsa, 0, sizeof(tsa)); 718 return ret; 719 } 720 721 /* the preparations worked, so save the info */ 722 memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa)); 723 724 ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt); 725 726 xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX; 727 728 ipsec->num_tx_sa++; 729 } 730 731 /* enable the engine if not already warmed up */ 732 if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) { 733 ixgbe_ipsec_start_engine(adapter); 734 adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED; 735 } 736 737 return 0; 738 } 739 740 /** 741 * ixgbe_ipsec_del_sa - clear out this specific SA 742 * @xs: pointer to transformer state struct 743 **/ 744 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs) 745 { 746 struct net_device *dev = xs->xso.dev; 747 struct ixgbe_adapter *adapter = netdev_priv(dev); 748 struct ixgbe_ipsec *ipsec = adapter->ipsec; 749 struct ixgbe_hw *hw = &adapter->hw; 750 u32 zerobuf[4] = {0, 0, 0, 0}; 751 u16 sa_idx; 752 753 if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { 754 struct rx_sa *rsa; 755 u8 ipi; 756 757 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; 758 rsa = &ipsec->rx_tbl[sa_idx]; 759 760 if (!rsa->used) { 761 netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n", 762 sa_idx, xs->xso.offload_handle); 763 return; 764 } 765 766 ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0); 767 hash_del_rcu(&rsa->hlist); 768 769 /* if the IP table entry is referenced by only this SA, 770 * i.e. ref_cnt is only 1, clear the IP table entry as well 771 */ 772 ipi = rsa->iptbl_ind; 773 if (ipsec->ip_tbl[ipi].ref_cnt > 0) { 774 ipsec->ip_tbl[ipi].ref_cnt--; 775 776 if (!ipsec->ip_tbl[ipi].ref_cnt) { 777 memset(&ipsec->ip_tbl[ipi], 0, 778 sizeof(struct rx_ip_sa)); 779 ixgbe_ipsec_set_rx_ip(hw, ipi, 780 (__force __be32 *)zerobuf); 781 } 782 } 783 784 memset(rsa, 0, sizeof(struct rx_sa)); 785 ipsec->num_rx_sa--; 786 } else { 787 sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; 788 789 if (!ipsec->tx_tbl[sa_idx].used) { 790 netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n", 791 sa_idx, xs->xso.offload_handle); 792 return; 793 } 794 795 ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0); 796 memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa)); 797 ipsec->num_tx_sa--; 798 } 799 800 /* if there are no SAs left, stop the engine to save energy */ 801 if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) { 802 adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED; 803 ixgbe_ipsec_stop_engine(adapter); 804 } 805 } 806 807 /** 808 * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload 809 * @skb: current data packet 810 * @xs: pointer to transformer state struct 811 **/ 812 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 813 { 814 if (xs->props.family == AF_INET) { 815 /* Offload with IPv4 options is not supported yet */ 816 if (ip_hdr(skb)->ihl != 5) 817 return false; 818 } else { 819 /* Offload with IPv6 extension headers is not support yet */ 820 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 821 return false; 822 } 823 824 return true; 825 } 826 827 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { 828 .xdo_dev_state_add = ixgbe_ipsec_add_sa, 829 .xdo_dev_state_delete = ixgbe_ipsec_del_sa, 830 .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, 831 }; 832 833 /** 834 * ixgbe_ipsec_vf_clear - clear the tables of data for a VF 835 * @adapter: board private structure 836 * @vf: VF id to be removed 837 **/ 838 void ixgbe_ipsec_vf_clear(struct ixgbe_adapter *adapter, u32 vf) 839 { 840 struct ixgbe_ipsec *ipsec = adapter->ipsec; 841 int i; 842 843 /* search rx sa table */ 844 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_rx_sa; i++) { 845 if (!ipsec->rx_tbl[i].used) 846 continue; 847 if (ipsec->rx_tbl[i].mode & IXGBE_RXTXMOD_VF && 848 ipsec->rx_tbl[i].vf == vf) 849 ixgbe_ipsec_del_sa(ipsec->rx_tbl[i].xs); 850 } 851 852 /* search tx sa table */ 853 for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT && ipsec->num_tx_sa; i++) { 854 if (!ipsec->tx_tbl[i].used) 855 continue; 856 if (ipsec->tx_tbl[i].mode & IXGBE_RXTXMOD_VF && 857 ipsec->tx_tbl[i].vf == vf) 858 ixgbe_ipsec_del_sa(ipsec->tx_tbl[i].xs); 859 } 860 } 861 862 /** 863 * ixgbe_ipsec_vf_add_sa - translate VF request to SA add 864 * @adapter: board private structure 865 * @msgbuf: The message buffer 866 * @vf: the VF index 867 * 868 * Make up a new xs and algorithm info from the data sent by the VF. 869 * We only need to sketch in just enough to set up the HW offload. 870 * Put the resulting offload_handle into the return message to the VF. 871 * 872 * Returns 0 or error value 873 **/ 874 int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 875 { 876 struct ixgbe_ipsec *ipsec = adapter->ipsec; 877 struct xfrm_algo_desc *algo; 878 struct sa_mbx_msg *sam; 879 struct xfrm_state *xs; 880 size_t aead_len; 881 u16 sa_idx; 882 u32 pfsa; 883 int err; 884 885 sam = (struct sa_mbx_msg *)(&msgbuf[1]); 886 if (!adapter->vfinfo[vf].trusted || 887 !(adapter->flags2 & IXGBE_FLAG2_VF_IPSEC_ENABLED)) { 888 e_warn(drv, "VF %d attempted to add an IPsec SA\n", vf); 889 err = -EACCES; 890 goto err_out; 891 } 892 893 /* Tx IPsec offload doesn't seem to work on this 894 * device, so block these requests for now. 895 */ 896 if (!(sam->flags & XFRM_OFFLOAD_INBOUND)) { 897 err = -EOPNOTSUPP; 898 goto err_out; 899 } 900 901 xs = kzalloc(sizeof(*xs), GFP_KERNEL); 902 if (unlikely(!xs)) { 903 err = -ENOMEM; 904 goto err_out; 905 } 906 907 xs->xso.flags = sam->flags; 908 xs->id.spi = sam->spi; 909 xs->id.proto = sam->proto; 910 xs->props.family = sam->family; 911 if (xs->props.family == AF_INET6) 912 memcpy(&xs->id.daddr.a6, sam->addr, sizeof(xs->id.daddr.a6)); 913 else 914 memcpy(&xs->id.daddr.a4, sam->addr, sizeof(xs->id.daddr.a4)); 915 xs->xso.dev = adapter->netdev; 916 917 algo = xfrm_aead_get_byname(aes_gcm_name, IXGBE_IPSEC_AUTH_BITS, 1); 918 if (unlikely(!algo)) { 919 err = -ENOENT; 920 goto err_xs; 921 } 922 923 aead_len = sizeof(*xs->aead) + IXGBE_IPSEC_KEY_BITS / 8; 924 xs->aead = kzalloc(aead_len, GFP_KERNEL); 925 if (unlikely(!xs->aead)) { 926 err = -ENOMEM; 927 goto err_xs; 928 } 929 930 xs->props.ealgo = algo->desc.sadb_alg_id; 931 xs->geniv = algo->uinfo.aead.geniv; 932 xs->aead->alg_icv_len = IXGBE_IPSEC_AUTH_BITS; 933 xs->aead->alg_key_len = IXGBE_IPSEC_KEY_BITS; 934 memcpy(xs->aead->alg_key, sam->key, sizeof(sam->key)); 935 memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name)); 936 937 /* set up the HW offload */ 938 err = ixgbe_ipsec_add_sa(xs); 939 if (err) 940 goto err_aead; 941 942 pfsa = xs->xso.offload_handle; 943 if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { 944 sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; 945 ipsec->rx_tbl[sa_idx].vf = vf; 946 ipsec->rx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; 947 } else { 948 sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; 949 ipsec->tx_tbl[sa_idx].vf = vf; 950 ipsec->tx_tbl[sa_idx].mode |= IXGBE_RXTXMOD_VF; 951 } 952 953 msgbuf[1] = xs->xso.offload_handle; 954 955 return 0; 956 957 err_aead: 958 memset(xs->aead, 0, sizeof(*xs->aead)); 959 kfree(xs->aead); 960 err_xs: 961 memset(xs, 0, sizeof(*xs)); 962 kfree(xs); 963 err_out: 964 msgbuf[1] = err; 965 return err; 966 } 967 968 /** 969 * ixgbe_ipsec_vf_del_sa - translate VF request to SA delete 970 * @adapter: board private structure 971 * @msgbuf: The message buffer 972 * @vf: the VF index 973 * 974 * Given the offload_handle sent by the VF, look for the related SA table 975 * entry and use its xs field to call for a delete of the SA. 976 * 977 * Note: We silently ignore requests to delete entries that are already 978 * set to unused because when a VF is set to "DOWN", the PF first 979 * gets a reset and clears all the VF's entries; then the VF's 980 * XFRM stack sends individual deletes for each entry, which the 981 * reset already removed. In the future it might be good to try to 982 * optimize this so not so many unnecessary delete messages are sent. 983 * 984 * Returns 0 or error value 985 **/ 986 int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) 987 { 988 struct ixgbe_ipsec *ipsec = adapter->ipsec; 989 struct xfrm_state *xs; 990 u32 pfsa = msgbuf[1]; 991 u16 sa_idx; 992 993 if (!adapter->vfinfo[vf].trusted) { 994 e_err(drv, "vf %d attempted to delete an SA\n", vf); 995 return -EPERM; 996 } 997 998 if (pfsa < IXGBE_IPSEC_BASE_TX_INDEX) { 999 struct rx_sa *rsa; 1000 1001 sa_idx = pfsa - IXGBE_IPSEC_BASE_RX_INDEX; 1002 if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { 1003 e_err(drv, "vf %d SA index %d out of range\n", 1004 vf, sa_idx); 1005 return -EINVAL; 1006 } 1007 1008 rsa = &ipsec->rx_tbl[sa_idx]; 1009 1010 if (!rsa->used) 1011 return 0; 1012 1013 if (!(rsa->mode & IXGBE_RXTXMOD_VF) || 1014 rsa->vf != vf) { 1015 e_err(drv, "vf %d bad Rx SA index %d\n", vf, sa_idx); 1016 return -ENOENT; 1017 } 1018 1019 xs = ipsec->rx_tbl[sa_idx].xs; 1020 } else { 1021 struct tx_sa *tsa; 1022 1023 sa_idx = pfsa - IXGBE_IPSEC_BASE_TX_INDEX; 1024 if (sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT) { 1025 e_err(drv, "vf %d SA index %d out of range\n", 1026 vf, sa_idx); 1027 return -EINVAL; 1028 } 1029 1030 tsa = &ipsec->tx_tbl[sa_idx]; 1031 1032 if (!tsa->used) 1033 return 0; 1034 1035 if (!(tsa->mode & IXGBE_RXTXMOD_VF) || 1036 tsa->vf != vf) { 1037 e_err(drv, "vf %d bad Tx SA index %d\n", vf, sa_idx); 1038 return -ENOENT; 1039 } 1040 1041 xs = ipsec->tx_tbl[sa_idx].xs; 1042 } 1043 1044 ixgbe_ipsec_del_sa(xs); 1045 1046 /* remove the xs that was made-up in the add request */ 1047 memset(xs, 0, sizeof(*xs)); 1048 kfree(xs); 1049 1050 return 0; 1051 } 1052 1053 /** 1054 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload 1055 * @tx_ring: outgoing context 1056 * @first: current data packet 1057 * @itd: ipsec Tx data for later use in building context descriptor 1058 **/ 1059 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, 1060 struct ixgbe_tx_buffer *first, 1061 struct ixgbe_ipsec_tx_data *itd) 1062 { 1063 struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); 1064 struct ixgbe_ipsec *ipsec = adapter->ipsec; 1065 struct xfrm_state *xs; 1066 struct tx_sa *tsa; 1067 1068 if (unlikely(!first->skb->sp->len)) { 1069 netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", 1070 __func__, first->skb->sp->len); 1071 return 0; 1072 } 1073 1074 xs = xfrm_input_state(first->skb); 1075 if (unlikely(!xs)) { 1076 netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n", 1077 __func__, xs); 1078 return 0; 1079 } 1080 1081 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; 1082 if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) { 1083 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", 1084 __func__, itd->sa_idx, xs->xso.offload_handle); 1085 return 0; 1086 } 1087 1088 tsa = &ipsec->tx_tbl[itd->sa_idx]; 1089 if (unlikely(!tsa->used)) { 1090 netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n", 1091 __func__, itd->sa_idx); 1092 return 0; 1093 } 1094 1095 first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC; 1096 1097 if (xs->id.proto == IPPROTO_ESP) { 1098 1099 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP | 1100 IXGBE_ADVTXD_TUCMD_L4T_TCP; 1101 if (first->protocol == htons(ETH_P_IP)) 1102 itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4; 1103 1104 /* The actual trailer length is authlen (16 bytes) plus 1105 * 2 bytes for the proto and the padlen values, plus 1106 * padlen bytes of padding. This ends up not the same 1107 * as the static value found in xs->props.trailer_len (21). 1108 * 1109 * ... but if we're doing GSO, don't bother as the stack 1110 * doesn't add a trailer for those. 1111 */ 1112 if (!skb_is_gso(first->skb)) { 1113 /* The "correct" way to get the auth length would be 1114 * to use 1115 * authlen = crypto_aead_authsize(xs->data); 1116 * but since we know we only have one size to worry 1117 * about * we can let the compiler use the constant 1118 * and save us a few CPU cycles. 1119 */ 1120 const int authlen = IXGBE_IPSEC_AUTH_BITS / 8; 1121 struct sk_buff *skb = first->skb; 1122 u8 padlen; 1123 int ret; 1124 1125 ret = skb_copy_bits(skb, skb->len - (authlen + 2), 1126 &padlen, 1); 1127 if (unlikely(ret)) 1128 return 0; 1129 itd->trailer_len = authlen + 2 + padlen; 1130 } 1131 } 1132 if (tsa->encrypt) 1133 itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN; 1134 1135 return 1; 1136 } 1137 1138 /** 1139 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor 1140 * @rx_ring: receiving ring 1141 * @rx_desc: receive data descriptor 1142 * @skb: current data packet 1143 * 1144 * Determine if there was an ipsec encapsulation noticed, and if so set up 1145 * the resulting status for later in the receive stack. 1146 **/ 1147 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, 1148 union ixgbe_adv_rx_desc *rx_desc, 1149 struct sk_buff *skb) 1150 { 1151 struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev); 1152 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1153 __le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH | 1154 IXGBE_RXDADV_PKTTYPE_IPSEC_ESP); 1155 struct ixgbe_ipsec *ipsec = adapter->ipsec; 1156 struct xfrm_offload *xo = NULL; 1157 struct xfrm_state *xs = NULL; 1158 struct ipv6hdr *ip6 = NULL; 1159 struct iphdr *ip4 = NULL; 1160 void *daddr; 1161 __be32 spi; 1162 u8 *c_hdr; 1163 u8 proto; 1164 1165 /* Find the ip and crypto headers in the data. 1166 * We can assume no vlan header in the way, b/c the 1167 * hw won't recognize the IPsec packet and anyway the 1168 * currently vlan device doesn't support xfrm offload. 1169 */ 1170 if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) { 1171 ip4 = (struct iphdr *)(skb->data + ETH_HLEN); 1172 daddr = &ip4->daddr; 1173 c_hdr = (u8 *)ip4 + ip4->ihl * 4; 1174 } else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) { 1175 ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN); 1176 daddr = &ip6->daddr; 1177 c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr); 1178 } else { 1179 return; 1180 } 1181 1182 switch (pkt_info & ipsec_pkt_types) { 1183 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH): 1184 spi = ((struct ip_auth_hdr *)c_hdr)->spi; 1185 proto = IPPROTO_AH; 1186 break; 1187 case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP): 1188 spi = ((struct ip_esp_hdr *)c_hdr)->spi; 1189 proto = IPPROTO_ESP; 1190 break; 1191 default: 1192 return; 1193 } 1194 1195 xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4); 1196 if (unlikely(!xs)) 1197 return; 1198 1199 skb->sp = secpath_dup(skb->sp); 1200 if (unlikely(!skb->sp)) 1201 return; 1202 1203 skb->sp->xvec[skb->sp->len++] = xs; 1204 skb->sp->olen++; 1205 xo = xfrm_offload(skb); 1206 xo->flags = CRYPTO_DONE; 1207 xo->status = CRYPTO_SUCCESS; 1208 1209 adapter->rx_ipsec++; 1210 } 1211 1212 /** 1213 * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation 1214 * @adapter: board private structure 1215 **/ 1216 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter) 1217 { 1218 struct ixgbe_hw *hw = &adapter->hw; 1219 struct ixgbe_ipsec *ipsec; 1220 u32 t_dis, r_dis; 1221 size_t size; 1222 1223 if (hw->mac.type == ixgbe_mac_82598EB) 1224 return; 1225 1226 /* If there is no support for either Tx or Rx offload 1227 * we should not be advertising support for IPsec. 1228 */ 1229 t_dis = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) & 1230 IXGBE_SECTXSTAT_SECTX_OFF_DIS; 1231 r_dis = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) & 1232 IXGBE_SECRXSTAT_SECRX_OFF_DIS; 1233 if (t_dis || r_dis) 1234 return; 1235 1236 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL); 1237 if (!ipsec) 1238 goto err1; 1239 hash_init(ipsec->rx_sa_list); 1240 1241 size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; 1242 ipsec->rx_tbl = kzalloc(size, GFP_KERNEL); 1243 if (!ipsec->rx_tbl) 1244 goto err2; 1245 1246 size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT; 1247 ipsec->tx_tbl = kzalloc(size, GFP_KERNEL); 1248 if (!ipsec->tx_tbl) 1249 goto err2; 1250 1251 size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT; 1252 ipsec->ip_tbl = kzalloc(size, GFP_KERNEL); 1253 if (!ipsec->ip_tbl) 1254 goto err2; 1255 1256 ipsec->num_rx_sa = 0; 1257 ipsec->num_tx_sa = 0; 1258 1259 adapter->ipsec = ipsec; 1260 ixgbe_ipsec_stop_engine(adapter); 1261 ixgbe_ipsec_clear_hw_tables(adapter); 1262 1263 adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops; 1264 1265 return; 1266 1267 err2: 1268 kfree(ipsec->ip_tbl); 1269 kfree(ipsec->rx_tbl); 1270 kfree(ipsec->tx_tbl); 1271 kfree(ipsec); 1272 err1: 1273 netdev_err(adapter->netdev, "Unable to allocate memory for SA tables"); 1274 } 1275 1276 /** 1277 * ixgbe_stop_ipsec_offload - tear down the ipsec offload 1278 * @adapter: board private structure 1279 **/ 1280 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter) 1281 { 1282 struct ixgbe_ipsec *ipsec = adapter->ipsec; 1283 1284 adapter->ipsec = NULL; 1285 if (ipsec) { 1286 kfree(ipsec->ip_tbl); 1287 kfree(ipsec->rx_tbl); 1288 kfree(ipsec->tx_tbl); 1289 kfree(ipsec); 1290 } 1291 } 1292