1 /*******************************************************************************
2  *
3  * Intel 10 Gigabit PCI Express Linux driver
4  * Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * Linux NICS <linux.nics@intel.com>
23  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25  *
26  ******************************************************************************/
27 
28 #include "ixgbe.h"
29 #include <net/xfrm.h>
30 #include <crypto/aead.h>
31 
32 /**
33  * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
34  * @hw: hw specific details
35  * @idx: register index to write
36  * @key: key byte array
37  * @salt: salt bytes
38  **/
39 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
40 				  u32 key[], u32 salt)
41 {
42 	u32 reg;
43 	int i;
44 
45 	for (i = 0; i < 4; i++)
46 		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i), cpu_to_be32(key[3 - i]));
47 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, cpu_to_be32(salt));
48 	IXGBE_WRITE_FLUSH(hw);
49 
50 	reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
51 	reg &= IXGBE_RXTXIDX_IPS_EN;
52 	reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
53 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
54 	IXGBE_WRITE_FLUSH(hw);
55 }
56 
57 /**
58  * ixgbe_ipsec_set_rx_item - set an Rx table item
59  * @hw: hw specific details
60  * @idx: register index to write
61  * @tbl: table selector
62  *
63  * Trigger the device to store into a particular Rx table the
64  * data that has already been loaded into the input register
65  **/
66 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
67 				    enum ixgbe_ipsec_tbl_sel tbl)
68 {
69 	u32 reg;
70 
71 	reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
72 	reg &= IXGBE_RXTXIDX_IPS_EN;
73 	reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
74 	       idx << IXGBE_RXTXIDX_IDX_SHIFT |
75 	       IXGBE_RXTXIDX_WRITE;
76 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
77 	IXGBE_WRITE_FLUSH(hw);
78 }
79 
80 /**
81  * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
82  * @hw: hw specific details
83  * @idx: register index to write
84  * @spi: security parameter index
85  * @key: key byte array
86  * @salt: salt bytes
87  * @mode: rx decrypt control bits
88  * @ip_idx: index into IP table for related IP address
89  **/
90 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
91 				  u32 key[], u32 salt, u32 mode, u32 ip_idx)
92 {
93 	int i;
94 
95 	/* store the SPI (in bigendian) and IPidx */
96 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, cpu_to_le32(spi));
97 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
98 	IXGBE_WRITE_FLUSH(hw);
99 
100 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
101 
102 	/* store the key, salt, and mode */
103 	for (i = 0; i < 4; i++)
104 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i), cpu_to_be32(key[3 - i]));
105 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, cpu_to_be32(salt));
106 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
107 	IXGBE_WRITE_FLUSH(hw);
108 
109 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
110 }
111 
112 /**
113  * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
114  * @hw: hw specific details
115  * @idx: register index to write
116  * @addr: IP address byte array
117  **/
118 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
119 {
120 	int i;
121 
122 	/* store the ip address */
123 	for (i = 0; i < 4; i++)
124 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i), cpu_to_le32(addr[i]));
125 	IXGBE_WRITE_FLUSH(hw);
126 
127 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
128 }
129 
130 /**
131  * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
132  * @adapter: board private structure
133  **/
134 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
135 {
136 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
137 	struct ixgbe_hw *hw = &adapter->hw;
138 	u32 buf[4] = {0, 0, 0, 0};
139 	u16 idx;
140 
141 	/* disable Rx and Tx SA lookup */
142 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
143 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
144 
145 	/* scrub the tables - split the loops for the max of the IP table */
146 	for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
147 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
148 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
149 		ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
150 	}
151 	for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
152 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
153 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
154 	}
155 
156 	ipsec->num_rx_sa = 0;
157 	ipsec->num_tx_sa = 0;
158 }
159 
160 /**
161  * ixgbe_ipsec_stop_data
162  * @adapter: board private structure
163  **/
164 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
165 {
166 	struct ixgbe_hw *hw = &adapter->hw;
167 	bool link = adapter->link_up;
168 	u32 t_rdy, r_rdy;
169 	u32 limit;
170 	u32 reg;
171 
172 	/* halt data paths */
173 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
174 	reg |= IXGBE_SECTXCTRL_TX_DIS;
175 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
176 
177 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
178 	reg |= IXGBE_SECRXCTRL_RX_DIS;
179 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
180 
181 	IXGBE_WRITE_FLUSH(hw);
182 
183 	/* If the tx fifo doesn't have link, but still has data,
184 	 * we can't clear the tx sec block.  Set the MAC loopback
185 	 * before block clear
186 	 */
187 	if (!link) {
188 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
189 		reg |= IXGBE_MACC_FLU;
190 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
191 
192 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
193 		reg |= IXGBE_HLREG0_LPBK;
194 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
195 
196 		IXGBE_WRITE_FLUSH(hw);
197 		mdelay(3);
198 	}
199 
200 	/* wait for the paths to empty */
201 	limit = 20;
202 	do {
203 		mdelay(10);
204 		t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
205 			IXGBE_SECTXSTAT_SECTX_RDY;
206 		r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
207 			IXGBE_SECRXSTAT_SECRX_RDY;
208 	} while (!t_rdy && !r_rdy && limit--);
209 
210 	/* undo loopback if we played with it earlier */
211 	if (!link) {
212 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
213 		reg &= ~IXGBE_MACC_FLU;
214 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
215 
216 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
217 		reg &= ~IXGBE_HLREG0_LPBK;
218 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
219 
220 		IXGBE_WRITE_FLUSH(hw);
221 	}
222 }
223 
224 /**
225  * ixgbe_ipsec_stop_engine
226  * @adapter: board private structure
227  **/
228 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
229 {
230 	struct ixgbe_hw *hw = &adapter->hw;
231 	u32 reg;
232 
233 	ixgbe_ipsec_stop_data(adapter);
234 
235 	/* disable Rx and Tx SA lookup */
236 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
237 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
238 
239 	/* disable the Rx and Tx engines and full packet store-n-forward */
240 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
241 	reg |= IXGBE_SECTXCTRL_SECTX_DIS;
242 	reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
243 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
244 
245 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
246 	reg |= IXGBE_SECRXCTRL_SECRX_DIS;
247 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
248 
249 	/* restore the "tx security buffer almost full threshold" to 0x250 */
250 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
251 
252 	/* Set minimum IFG between packets back to the default 0x1 */
253 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
254 	reg = (reg & 0xfffffff0) | 0x1;
255 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
256 
257 	/* final set for normal (no ipsec offload) processing */
258 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
259 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
260 
261 	IXGBE_WRITE_FLUSH(hw);
262 }
263 
264 /**
265  * ixgbe_ipsec_start_engine
266  * @adapter: board private structure
267  *
268  * NOTE: this increases power consumption whether being used or not
269  **/
270 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
271 {
272 	struct ixgbe_hw *hw = &adapter->hw;
273 	u32 reg;
274 
275 	ixgbe_ipsec_stop_data(adapter);
276 
277 	/* Set minimum IFG between packets to 3 */
278 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
279 	reg = (reg & 0xfffffff0) | 0x3;
280 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
281 
282 	/* Set "tx security buffer almost full threshold" to 0x15 so that the
283 	 * almost full indication is generated only after buffer contains at
284 	 * least an entire jumbo packet.
285 	 */
286 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
287 	reg = (reg & 0xfffffc00) | 0x15;
288 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
289 
290 	/* restart the data paths by clearing the DISABLE bits */
291 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
292 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
293 
294 	/* enable Rx and Tx SA lookup */
295 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
296 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
297 
298 	IXGBE_WRITE_FLUSH(hw);
299 }
300 
301 /**
302  * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
303  * @adapter: board private structure
304  **/
305 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
306 {
307 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
308 	struct ixgbe_hw *hw = &adapter->hw;
309 	int i;
310 
311 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
312 		return;
313 
314 	/* clean up and restart the engine */
315 	ixgbe_ipsec_stop_engine(adapter);
316 	ixgbe_ipsec_clear_hw_tables(adapter);
317 	ixgbe_ipsec_start_engine(adapter);
318 
319 	/* reload the IP addrs */
320 	for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
321 		struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
322 
323 		if (ipsa->used)
324 			ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
325 	}
326 
327 	/* reload the Rx and Tx keys */
328 	for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
329 		struct rx_sa *rsa = &ipsec->rx_tbl[i];
330 		struct tx_sa *tsa = &ipsec->tx_tbl[i];
331 
332 		if (rsa->used)
333 			ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
334 					      rsa->key, rsa->salt,
335 					      rsa->mode, rsa->iptbl_ind);
336 
337 		if (tsa->used)
338 			ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
339 	}
340 }
341 
342 /**
343  * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
344  * @ipsec: pointer to ipsec struct
345  * @rxtable: true if we need to look in the Rx table
346  *
347  * Returns the first unused index in either the Rx or Tx SA table
348  **/
349 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
350 {
351 	u32 i;
352 
353 	if (rxtable) {
354 		if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
355 			return -ENOSPC;
356 
357 		/* search rx sa table */
358 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
359 			if (!ipsec->rx_tbl[i].used)
360 				return i;
361 		}
362 	} else {
363 		if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
364 			return -ENOSPC;
365 
366 		/* search tx sa table */
367 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
368 			if (!ipsec->tx_tbl[i].used)
369 				return i;
370 		}
371 	}
372 
373 	return -ENOSPC;
374 }
375 
376 /**
377  * ixgbe_ipsec_find_rx_state - find the state that matches
378  * @ipsec: pointer to ipsec struct
379  * @daddr: inbound address to match
380  * @proto: protocol to match
381  * @spi: SPI to match
382  * @ip4: true if using an ipv4 address
383  *
384  * Returns a pointer to the matching SA state information
385  **/
386 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
387 						    __be32 *daddr, u8 proto,
388 						    __be32 spi, bool ip4)
389 {
390 	struct rx_sa *rsa;
391 	struct xfrm_state *ret = NULL;
392 
393 	rcu_read_lock();
394 	hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist, spi)
395 		if (spi == rsa->xs->id.spi &&
396 		    ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
397 		      (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
398 				       sizeof(rsa->xs->id.daddr.a6)))) &&
399 		    proto == rsa->xs->id.proto) {
400 			ret = rsa->xs;
401 			xfrm_state_hold(ret);
402 			break;
403 		}
404 	rcu_read_unlock();
405 	return ret;
406 }
407 
408 /**
409  * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
410  * @xs: pointer to xfrm_state struct
411  * @mykey: pointer to key array to populate
412  * @mysalt: pointer to salt value to populate
413  *
414  * This copies the protocol keys and salt to our own data tables.  The
415  * 82599 family only supports the one algorithm.
416  **/
417 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
418 					u32 *mykey, u32 *mysalt)
419 {
420 	struct net_device *dev = xs->xso.dev;
421 	unsigned char *key_data;
422 	char *alg_name = NULL;
423 	const char aes_gcm_name[] = "rfc4106(gcm(aes))";
424 	int key_len;
425 
426 	if (!xs->aead) {
427 		netdev_err(dev, "Unsupported IPsec algorithm\n");
428 		return -EINVAL;
429 	}
430 
431 	if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
432 		netdev_err(dev, "IPsec offload requires %d bit authentication\n",
433 			   IXGBE_IPSEC_AUTH_BITS);
434 		return -EINVAL;
435 	}
436 
437 	key_data = &xs->aead->alg_key[0];
438 	key_len = xs->aead->alg_key_len;
439 	alg_name = xs->aead->alg_name;
440 
441 	if (strcmp(alg_name, aes_gcm_name)) {
442 		netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
443 			   aes_gcm_name);
444 		return -EINVAL;
445 	}
446 
447 	/* The key bytes come down in a bigendian array of bytes, so
448 	 * we don't need to do any byteswapping.
449 	 * 160 accounts for 16 byte key and 4 byte salt
450 	 */
451 	if (key_len == 160) {
452 		*mysalt = ((u32 *)key_data)[4];
453 	} else if (key_len != 128) {
454 		netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
455 		return -EINVAL;
456 	} else {
457 		netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
458 		*mysalt = 0;
459 	}
460 	memcpy(mykey, key_data, 16);
461 
462 	return 0;
463 }
464 
465 /**
466  * ixgbe_ipsec_add_sa - program device with a security association
467  * @xs: pointer to transformer state struct
468  **/
469 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
470 {
471 	struct net_device *dev = xs->xso.dev;
472 	struct ixgbe_adapter *adapter = netdev_priv(dev);
473 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
474 	struct ixgbe_hw *hw = &adapter->hw;
475 	int checked, match, first;
476 	u16 sa_idx;
477 	int ret;
478 	int i;
479 
480 	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
481 		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
482 			   xs->id.proto);
483 		return -EINVAL;
484 	}
485 
486 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
487 		struct rx_sa rsa;
488 
489 		if (xs->calg) {
490 			netdev_err(dev, "Compression offload not supported\n");
491 			return -EINVAL;
492 		}
493 
494 		/* find the first unused index */
495 		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
496 		if (ret < 0) {
497 			netdev_err(dev, "No space for SA in Rx table!\n");
498 			return ret;
499 		}
500 		sa_idx = (u16)ret;
501 
502 		memset(&rsa, 0, sizeof(rsa));
503 		rsa.used = true;
504 		rsa.xs = xs;
505 
506 		if (rsa.xs->id.proto & IPPROTO_ESP)
507 			rsa.decrypt = xs->ealg || xs->aead;
508 
509 		/* get the key and salt */
510 		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
511 		if (ret) {
512 			netdev_err(dev, "Failed to get key data for Rx SA table\n");
513 			return ret;
514 		}
515 
516 		/* get ip for rx sa table */
517 		if (xs->props.family == AF_INET6)
518 			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
519 		else
520 			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
521 
522 		/* The HW does not have a 1:1 mapping from keys to IP addrs, so
523 		 * check for a matching IP addr entry in the table.  If the addr
524 		 * already exists, use it; else find an unused slot and add the
525 		 * addr.  If one does not exist and there are no unused table
526 		 * entries, fail the request.
527 		 */
528 
529 		/* Find an existing match or first not used, and stop looking
530 		 * after we've checked all we know we have.
531 		 */
532 		checked = 0;
533 		match = -1;
534 		first = -1;
535 		for (i = 0;
536 		     i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
537 		     (checked < ipsec->num_rx_sa || first < 0);
538 		     i++) {
539 			if (ipsec->ip_tbl[i].used) {
540 				if (!memcmp(ipsec->ip_tbl[i].ipaddr,
541 					    rsa.ipaddr, sizeof(rsa.ipaddr))) {
542 					match = i;
543 					break;
544 				}
545 				checked++;
546 			} else if (first < 0) {
547 				first = i;  /* track the first empty seen */
548 			}
549 		}
550 
551 		if (ipsec->num_rx_sa == 0)
552 			first = 0;
553 
554 		if (match >= 0) {
555 			/* addrs are the same, we should use this one */
556 			rsa.iptbl_ind = match;
557 			ipsec->ip_tbl[match].ref_cnt++;
558 
559 		} else if (first >= 0) {
560 			/* no matches, but here's an empty slot */
561 			rsa.iptbl_ind = first;
562 
563 			memcpy(ipsec->ip_tbl[first].ipaddr,
564 			       rsa.ipaddr, sizeof(rsa.ipaddr));
565 			ipsec->ip_tbl[first].ref_cnt = 1;
566 			ipsec->ip_tbl[first].used = true;
567 
568 			ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
569 
570 		} else {
571 			/* no match and no empty slot */
572 			netdev_err(dev, "No space for SA in Rx IP SA table\n");
573 			memset(&rsa, 0, sizeof(rsa));
574 			return -ENOSPC;
575 		}
576 
577 		rsa.mode = IXGBE_RXMOD_VALID;
578 		if (rsa.xs->id.proto & IPPROTO_ESP)
579 			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
580 		if (rsa.decrypt)
581 			rsa.mode |= IXGBE_RXMOD_DECRYPT;
582 		if (rsa.xs->props.family == AF_INET6)
583 			rsa.mode |= IXGBE_RXMOD_IPV6;
584 
585 		/* the preparations worked, so save the info */
586 		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
587 
588 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
589 				      rsa.salt, rsa.mode, rsa.iptbl_ind);
590 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
591 
592 		ipsec->num_rx_sa++;
593 
594 		/* hash the new entry for faster search in Rx path */
595 		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
596 			     rsa.xs->id.spi);
597 	} else {
598 		struct tx_sa tsa;
599 
600 		/* find the first unused index */
601 		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
602 		if (ret < 0) {
603 			netdev_err(dev, "No space for SA in Tx table\n");
604 			return ret;
605 		}
606 		sa_idx = (u16)ret;
607 
608 		memset(&tsa, 0, sizeof(tsa));
609 		tsa.used = true;
610 		tsa.xs = xs;
611 
612 		if (xs->id.proto & IPPROTO_ESP)
613 			tsa.encrypt = xs->ealg || xs->aead;
614 
615 		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
616 		if (ret) {
617 			netdev_err(dev, "Failed to get key data for Tx SA table\n");
618 			memset(&tsa, 0, sizeof(tsa));
619 			return ret;
620 		}
621 
622 		/* the preparations worked, so save the info */
623 		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
624 
625 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
626 
627 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
628 
629 		ipsec->num_tx_sa++;
630 	}
631 
632 	/* enable the engine if not already warmed up */
633 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
634 		ixgbe_ipsec_start_engine(adapter);
635 		adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
636 	}
637 
638 	return 0;
639 }
640 
641 /**
642  * ixgbe_ipsec_del_sa - clear out this specific SA
643  * @xs: pointer to transformer state struct
644  **/
645 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
646 {
647 	struct net_device *dev = xs->xso.dev;
648 	struct ixgbe_adapter *adapter = netdev_priv(dev);
649 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
650 	struct ixgbe_hw *hw = &adapter->hw;
651 	u32 zerobuf[4] = {0, 0, 0, 0};
652 	u16 sa_idx;
653 
654 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
655 		struct rx_sa *rsa;
656 		u8 ipi;
657 
658 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
659 		rsa = &ipsec->rx_tbl[sa_idx];
660 
661 		if (!rsa->used) {
662 			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
663 				   sa_idx, xs->xso.offload_handle);
664 			return;
665 		}
666 
667 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
668 		hash_del_rcu(&rsa->hlist);
669 
670 		/* if the IP table entry is referenced by only this SA,
671 		 * i.e. ref_cnt is only 1, clear the IP table entry as well
672 		 */
673 		ipi = rsa->iptbl_ind;
674 		if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
675 			ipsec->ip_tbl[ipi].ref_cnt--;
676 
677 			if (!ipsec->ip_tbl[ipi].ref_cnt) {
678 				memset(&ipsec->ip_tbl[ipi], 0,
679 				       sizeof(struct rx_ip_sa));
680 				ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
681 			}
682 		}
683 
684 		memset(rsa, 0, sizeof(struct rx_sa));
685 		ipsec->num_rx_sa--;
686 	} else {
687 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
688 
689 		if (!ipsec->tx_tbl[sa_idx].used) {
690 			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
691 				   sa_idx, xs->xso.offload_handle);
692 			return;
693 		}
694 
695 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
696 		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
697 		ipsec->num_tx_sa--;
698 	}
699 
700 	/* if there are no SAs left, stop the engine to save energy */
701 	if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
702 		adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
703 		ixgbe_ipsec_stop_engine(adapter);
704 	}
705 }
706 
707 /**
708  * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
709  * @skb: current data packet
710  * @xs: pointer to transformer state struct
711  **/
712 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
713 {
714 	if (xs->props.family == AF_INET) {
715 		/* Offload with IPv4 options is not supported yet */
716 		if (ip_hdr(skb)->ihl != 5)
717 			return false;
718 	} else {
719 		/* Offload with IPv6 extension headers is not support yet */
720 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
721 			return false;
722 	}
723 
724 	return true;
725 }
726 
727 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
728 	.xdo_dev_state_add = ixgbe_ipsec_add_sa,
729 	.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
730 	.xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
731 };
732 
733 /**
734  * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
735  * @tx_ring: outgoing context
736  * @first: current data packet
737  * @itd: ipsec Tx data for later use in building context descriptor
738  **/
739 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
740 		   struct ixgbe_tx_buffer *first,
741 		   struct ixgbe_ipsec_tx_data *itd)
742 {
743 	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
744 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
745 	struct xfrm_state *xs;
746 	struct tx_sa *tsa;
747 
748 	if (unlikely(!first->skb->sp->len)) {
749 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
750 			   __func__, first->skb->sp->len);
751 		return 0;
752 	}
753 
754 	xs = xfrm_input_state(first->skb);
755 	if (unlikely(!xs)) {
756 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
757 			   __func__, xs);
758 		return 0;
759 	}
760 
761 	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
762 	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
763 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
764 			   __func__, itd->sa_idx, xs->xso.offload_handle);
765 		return 0;
766 	}
767 
768 	tsa = &ipsec->tx_tbl[itd->sa_idx];
769 	if (unlikely(!tsa->used)) {
770 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
771 			   __func__, itd->sa_idx);
772 		return 0;
773 	}
774 
775 	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
776 
777 	if (xs->id.proto == IPPROTO_ESP) {
778 
779 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
780 			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
781 		if (first->protocol == htons(ETH_P_IP))
782 			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
783 
784 		/* The actual trailer length is authlen (16 bytes) plus
785 		 * 2 bytes for the proto and the padlen values, plus
786 		 * padlen bytes of padding.  This ends up not the same
787 		 * as the static value found in xs->props.trailer_len (21).
788 		 *
789 		 * ... but if we're doing GSO, don't bother as the stack
790 		 * doesn't add a trailer for those.
791 		 */
792 		if (!skb_is_gso(first->skb)) {
793 			/* The "correct" way to get the auth length would be
794 			 * to use
795 			 *    authlen = crypto_aead_authsize(xs->data);
796 			 * but since we know we only have one size to worry
797 			 * about * we can let the compiler use the constant
798 			 * and save us a few CPU cycles.
799 			 */
800 			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
801 			struct sk_buff *skb = first->skb;
802 			u8 padlen;
803 			int ret;
804 
805 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
806 					    &padlen, 1);
807 			if (unlikely(ret))
808 				return 0;
809 			itd->trailer_len = authlen + 2 + padlen;
810 		}
811 	}
812 	if (tsa->encrypt)
813 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
814 
815 	return 1;
816 }
817 
818 /**
819  * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
820  * @rx_ring: receiving ring
821  * @rx_desc: receive data descriptor
822  * @skb: current data packet
823  *
824  * Determine if there was an ipsec encapsulation noticed, and if so set up
825  * the resulting status for later in the receive stack.
826  **/
827 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
828 		    union ixgbe_adv_rx_desc *rx_desc,
829 		    struct sk_buff *skb)
830 {
831 	struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
832 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
833 	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
834 					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
835 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
836 	struct xfrm_offload *xo = NULL;
837 	struct xfrm_state *xs = NULL;
838 	struct ipv6hdr *ip6 = NULL;
839 	struct iphdr *ip4 = NULL;
840 	void *daddr;
841 	__be32 spi;
842 	u8 *c_hdr;
843 	u8 proto;
844 
845 	/* Find the ip and crypto headers in the data.
846 	 * We can assume no vlan header in the way, b/c the
847 	 * hw won't recognize the IPsec packet and anyway the
848 	 * currently vlan device doesn't support xfrm offload.
849 	 */
850 	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
851 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
852 		daddr = &ip4->daddr;
853 		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
854 	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
855 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
856 		daddr = &ip6->daddr;
857 		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
858 	} else {
859 		return;
860 	}
861 
862 	switch (pkt_info & ipsec_pkt_types) {
863 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
864 		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
865 		proto = IPPROTO_AH;
866 		break;
867 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
868 		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
869 		proto = IPPROTO_ESP;
870 		break;
871 	default:
872 		return;
873 	}
874 
875 	xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
876 	if (unlikely(!xs))
877 		return;
878 
879 	skb->sp = secpath_dup(skb->sp);
880 	if (unlikely(!skb->sp))
881 		return;
882 
883 	skb->sp->xvec[skb->sp->len++] = xs;
884 	skb->sp->olen++;
885 	xo = xfrm_offload(skb);
886 	xo->flags = CRYPTO_DONE;
887 	xo->status = CRYPTO_SUCCESS;
888 
889 	adapter->rx_ipsec++;
890 }
891 
892 /**
893  * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
894  * @adapter: board private structure
895  **/
896 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
897 {
898 	struct ixgbe_ipsec *ipsec;
899 	size_t size;
900 
901 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
902 		return;
903 
904 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
905 	if (!ipsec)
906 		goto err1;
907 	hash_init(ipsec->rx_sa_list);
908 
909 	size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
910 	ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
911 	if (!ipsec->rx_tbl)
912 		goto err2;
913 
914 	size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
915 	ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
916 	if (!ipsec->tx_tbl)
917 		goto err2;
918 
919 	size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
920 	ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
921 	if (!ipsec->ip_tbl)
922 		goto err2;
923 
924 	ipsec->num_rx_sa = 0;
925 	ipsec->num_tx_sa = 0;
926 
927 	adapter->ipsec = ipsec;
928 	ixgbe_ipsec_stop_engine(adapter);
929 	ixgbe_ipsec_clear_hw_tables(adapter);
930 
931 	adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
932 
933 #define IXGBE_ESP_FEATURES	(NETIF_F_HW_ESP | \
934 				 NETIF_F_HW_ESP_TX_CSUM | \
935 				 NETIF_F_GSO_ESP)
936 
937 	adapter->netdev->features |= IXGBE_ESP_FEATURES;
938 	adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
939 
940 	return;
941 
942 err2:
943 	kfree(ipsec->ip_tbl);
944 	kfree(ipsec->rx_tbl);
945 	kfree(ipsec->tx_tbl);
946 err1:
947 	kfree(adapter->ipsec);
948 	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
949 }
950 
951 /**
952  * ixgbe_stop_ipsec_offload - tear down the ipsec offload
953  * @adapter: board private structure
954  **/
955 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
956 {
957 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
958 
959 	adapter->ipsec = NULL;
960 	if (ipsec) {
961 		kfree(ipsec->ip_tbl);
962 		kfree(ipsec->rx_tbl);
963 		kfree(ipsec->tx_tbl);
964 		kfree(ipsec);
965 	}
966 }
967