1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
3 
4 #include "ixgbe.h"
5 #include <net/xfrm.h>
6 #include <crypto/aead.h>
7 
8 /**
9  * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
10  * @hw: hw specific details
11  * @idx: register index to write
12  * @key: key byte array
13  * @salt: salt bytes
14  **/
15 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
16 				  u32 key[], u32 salt)
17 {
18 	u32 reg;
19 	int i;
20 
21 	for (i = 0; i < 4; i++)
22 		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
23 				(__force u32)cpu_to_be32(key[3 - i]));
24 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
25 	IXGBE_WRITE_FLUSH(hw);
26 
27 	reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
28 	reg &= IXGBE_RXTXIDX_IPS_EN;
29 	reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
30 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
31 	IXGBE_WRITE_FLUSH(hw);
32 }
33 
34 /**
35  * ixgbe_ipsec_set_rx_item - set an Rx table item
36  * @hw: hw specific details
37  * @idx: register index to write
38  * @tbl: table selector
39  *
40  * Trigger the device to store into a particular Rx table the
41  * data that has already been loaded into the input register
42  **/
43 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
44 				    enum ixgbe_ipsec_tbl_sel tbl)
45 {
46 	u32 reg;
47 
48 	reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
49 	reg &= IXGBE_RXTXIDX_IPS_EN;
50 	reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
51 	       idx << IXGBE_RXTXIDX_IDX_SHIFT |
52 	       IXGBE_RXTXIDX_WRITE;
53 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
54 	IXGBE_WRITE_FLUSH(hw);
55 }
56 
57 /**
58  * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
59  * @hw: hw specific details
60  * @idx: register index to write
61  * @spi: security parameter index
62  * @key: key byte array
63  * @salt: salt bytes
64  * @mode: rx decrypt control bits
65  * @ip_idx: index into IP table for related IP address
66  **/
67 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
68 				  u32 key[], u32 salt, u32 mode, u32 ip_idx)
69 {
70 	int i;
71 
72 	/* store the SPI (in bigendian) and IPidx */
73 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
74 			(__force u32)cpu_to_le32((__force u32)spi));
75 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
76 	IXGBE_WRITE_FLUSH(hw);
77 
78 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
79 
80 	/* store the key, salt, and mode */
81 	for (i = 0; i < 4; i++)
82 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
83 				(__force u32)cpu_to_be32(key[3 - i]));
84 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
85 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
86 	IXGBE_WRITE_FLUSH(hw);
87 
88 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
89 }
90 
91 /**
92  * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
93  * @hw: hw specific details
94  * @idx: register index to write
95  * @addr: IP address byte array
96  **/
97 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
98 {
99 	int i;
100 
101 	/* store the ip address */
102 	for (i = 0; i < 4; i++)
103 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
104 				(__force u32)cpu_to_le32((__force u32)addr[i]));
105 	IXGBE_WRITE_FLUSH(hw);
106 
107 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
108 }
109 
110 /**
111  * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
112  * @adapter: board private structure
113  **/
114 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
115 {
116 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
117 	struct ixgbe_hw *hw = &adapter->hw;
118 	u32 buf[4] = {0, 0, 0, 0};
119 	u16 idx;
120 
121 	/* disable Rx and Tx SA lookup */
122 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
123 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
124 
125 	/* scrub the tables - split the loops for the max of the IP table */
126 	for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
127 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
128 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
129 		ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
130 	}
131 	for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
132 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
133 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
134 	}
135 
136 	ipsec->num_rx_sa = 0;
137 	ipsec->num_tx_sa = 0;
138 }
139 
140 /**
141  * ixgbe_ipsec_stop_data
142  * @adapter: board private structure
143  **/
144 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
145 {
146 	struct ixgbe_hw *hw = &adapter->hw;
147 	bool link = adapter->link_up;
148 	u32 t_rdy, r_rdy;
149 	u32 limit;
150 	u32 reg;
151 
152 	/* halt data paths */
153 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
154 	reg |= IXGBE_SECTXCTRL_TX_DIS;
155 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
156 
157 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
158 	reg |= IXGBE_SECRXCTRL_RX_DIS;
159 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
160 
161 	IXGBE_WRITE_FLUSH(hw);
162 
163 	/* If the tx fifo doesn't have link, but still has data,
164 	 * we can't clear the tx sec block.  Set the MAC loopback
165 	 * before block clear
166 	 */
167 	if (!link) {
168 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
169 		reg |= IXGBE_MACC_FLU;
170 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
171 
172 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
173 		reg |= IXGBE_HLREG0_LPBK;
174 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
175 
176 		IXGBE_WRITE_FLUSH(hw);
177 		mdelay(3);
178 	}
179 
180 	/* wait for the paths to empty */
181 	limit = 20;
182 	do {
183 		mdelay(10);
184 		t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
185 			IXGBE_SECTXSTAT_SECTX_RDY;
186 		r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
187 			IXGBE_SECRXSTAT_SECRX_RDY;
188 	} while (!t_rdy && !r_rdy && limit--);
189 
190 	/* undo loopback if we played with it earlier */
191 	if (!link) {
192 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
193 		reg &= ~IXGBE_MACC_FLU;
194 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
195 
196 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
197 		reg &= ~IXGBE_HLREG0_LPBK;
198 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
199 
200 		IXGBE_WRITE_FLUSH(hw);
201 	}
202 }
203 
204 /**
205  * ixgbe_ipsec_stop_engine
206  * @adapter: board private structure
207  **/
208 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
209 {
210 	struct ixgbe_hw *hw = &adapter->hw;
211 	u32 reg;
212 
213 	ixgbe_ipsec_stop_data(adapter);
214 
215 	/* disable Rx and Tx SA lookup */
216 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
217 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
218 
219 	/* disable the Rx and Tx engines and full packet store-n-forward */
220 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
221 	reg |= IXGBE_SECTXCTRL_SECTX_DIS;
222 	reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
223 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
224 
225 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
226 	reg |= IXGBE_SECRXCTRL_SECRX_DIS;
227 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
228 
229 	/* restore the "tx security buffer almost full threshold" to 0x250 */
230 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
231 
232 	/* Set minimum IFG between packets back to the default 0x1 */
233 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
234 	reg = (reg & 0xfffffff0) | 0x1;
235 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
236 
237 	/* final set for normal (no ipsec offload) processing */
238 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
239 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
240 
241 	IXGBE_WRITE_FLUSH(hw);
242 }
243 
244 /**
245  * ixgbe_ipsec_start_engine
246  * @adapter: board private structure
247  *
248  * NOTE: this increases power consumption whether being used or not
249  **/
250 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
251 {
252 	struct ixgbe_hw *hw = &adapter->hw;
253 	u32 reg;
254 
255 	ixgbe_ipsec_stop_data(adapter);
256 
257 	/* Set minimum IFG between packets to 3 */
258 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
259 	reg = (reg & 0xfffffff0) | 0x3;
260 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
261 
262 	/* Set "tx security buffer almost full threshold" to 0x15 so that the
263 	 * almost full indication is generated only after buffer contains at
264 	 * least an entire jumbo packet.
265 	 */
266 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
267 	reg = (reg & 0xfffffc00) | 0x15;
268 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
269 
270 	/* restart the data paths by clearing the DISABLE bits */
271 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
272 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
273 
274 	/* enable Rx and Tx SA lookup */
275 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
276 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
277 
278 	IXGBE_WRITE_FLUSH(hw);
279 }
280 
281 /**
282  * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
283  * @adapter: board private structure
284  **/
285 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
286 {
287 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
288 	struct ixgbe_hw *hw = &adapter->hw;
289 	int i;
290 
291 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
292 		return;
293 
294 	/* clean up and restart the engine */
295 	ixgbe_ipsec_stop_engine(adapter);
296 	ixgbe_ipsec_clear_hw_tables(adapter);
297 	ixgbe_ipsec_start_engine(adapter);
298 
299 	/* reload the IP addrs */
300 	for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
301 		struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
302 
303 		if (ipsa->used)
304 			ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
305 	}
306 
307 	/* reload the Rx and Tx keys */
308 	for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
309 		struct rx_sa *rsa = &ipsec->rx_tbl[i];
310 		struct tx_sa *tsa = &ipsec->tx_tbl[i];
311 
312 		if (rsa->used)
313 			ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
314 					      rsa->key, rsa->salt,
315 					      rsa->mode, rsa->iptbl_ind);
316 
317 		if (tsa->used)
318 			ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
319 	}
320 }
321 
322 /**
323  * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
324  * @ipsec: pointer to ipsec struct
325  * @rxtable: true if we need to look in the Rx table
326  *
327  * Returns the first unused index in either the Rx or Tx SA table
328  **/
329 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
330 {
331 	u32 i;
332 
333 	if (rxtable) {
334 		if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
335 			return -ENOSPC;
336 
337 		/* search rx sa table */
338 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
339 			if (!ipsec->rx_tbl[i].used)
340 				return i;
341 		}
342 	} else {
343 		if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
344 			return -ENOSPC;
345 
346 		/* search tx sa table */
347 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
348 			if (!ipsec->tx_tbl[i].used)
349 				return i;
350 		}
351 	}
352 
353 	return -ENOSPC;
354 }
355 
356 /**
357  * ixgbe_ipsec_find_rx_state - find the state that matches
358  * @ipsec: pointer to ipsec struct
359  * @daddr: inbound address to match
360  * @proto: protocol to match
361  * @spi: SPI to match
362  * @ip4: true if using an ipv4 address
363  *
364  * Returns a pointer to the matching SA state information
365  **/
366 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
367 						    __be32 *daddr, u8 proto,
368 						    __be32 spi, bool ip4)
369 {
370 	struct rx_sa *rsa;
371 	struct xfrm_state *ret = NULL;
372 
373 	rcu_read_lock();
374 	hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
375 				   (__force u32)spi) {
376 		if (spi == rsa->xs->id.spi &&
377 		    ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
378 		      (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
379 				       sizeof(rsa->xs->id.daddr.a6)))) &&
380 		    proto == rsa->xs->id.proto) {
381 			ret = rsa->xs;
382 			xfrm_state_hold(ret);
383 			break;
384 		}
385 	}
386 	rcu_read_unlock();
387 	return ret;
388 }
389 
390 /**
391  * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
392  * @xs: pointer to xfrm_state struct
393  * @mykey: pointer to key array to populate
394  * @mysalt: pointer to salt value to populate
395  *
396  * This copies the protocol keys and salt to our own data tables.  The
397  * 82599 family only supports the one algorithm.
398  **/
399 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
400 					u32 *mykey, u32 *mysalt)
401 {
402 	struct net_device *dev = xs->xso.dev;
403 	unsigned char *key_data;
404 	char *alg_name = NULL;
405 	const char aes_gcm_name[] = "rfc4106(gcm(aes))";
406 	int key_len;
407 
408 	if (!xs->aead) {
409 		netdev_err(dev, "Unsupported IPsec algorithm\n");
410 		return -EINVAL;
411 	}
412 
413 	if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
414 		netdev_err(dev, "IPsec offload requires %d bit authentication\n",
415 			   IXGBE_IPSEC_AUTH_BITS);
416 		return -EINVAL;
417 	}
418 
419 	key_data = &xs->aead->alg_key[0];
420 	key_len = xs->aead->alg_key_len;
421 	alg_name = xs->aead->alg_name;
422 
423 	if (strcmp(alg_name, aes_gcm_name)) {
424 		netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
425 			   aes_gcm_name);
426 		return -EINVAL;
427 	}
428 
429 	/* The key bytes come down in a bigendian array of bytes, so
430 	 * we don't need to do any byteswapping.
431 	 * 160 accounts for 16 byte key and 4 byte salt
432 	 */
433 	if (key_len == 160) {
434 		*mysalt = ((u32 *)key_data)[4];
435 	} else if (key_len != 128) {
436 		netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
437 		return -EINVAL;
438 	} else {
439 		netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
440 		*mysalt = 0;
441 	}
442 	memcpy(mykey, key_data, 16);
443 
444 	return 0;
445 }
446 
447 /**
448  * ixgbe_ipsec_add_sa - program device with a security association
449  * @xs: pointer to transformer state struct
450  **/
451 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
452 {
453 	struct net_device *dev = xs->xso.dev;
454 	struct ixgbe_adapter *adapter = netdev_priv(dev);
455 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
456 	struct ixgbe_hw *hw = &adapter->hw;
457 	int checked, match, first;
458 	u16 sa_idx;
459 	int ret;
460 	int i;
461 
462 	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
463 		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
464 			   xs->id.proto);
465 		return -EINVAL;
466 	}
467 
468 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
469 		struct rx_sa rsa;
470 
471 		if (xs->calg) {
472 			netdev_err(dev, "Compression offload not supported\n");
473 			return -EINVAL;
474 		}
475 
476 		/* find the first unused index */
477 		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
478 		if (ret < 0) {
479 			netdev_err(dev, "No space for SA in Rx table!\n");
480 			return ret;
481 		}
482 		sa_idx = (u16)ret;
483 
484 		memset(&rsa, 0, sizeof(rsa));
485 		rsa.used = true;
486 		rsa.xs = xs;
487 
488 		if (rsa.xs->id.proto & IPPROTO_ESP)
489 			rsa.decrypt = xs->ealg || xs->aead;
490 
491 		/* get the key and salt */
492 		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
493 		if (ret) {
494 			netdev_err(dev, "Failed to get key data for Rx SA table\n");
495 			return ret;
496 		}
497 
498 		/* get ip for rx sa table */
499 		if (xs->props.family == AF_INET6)
500 			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
501 		else
502 			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
503 
504 		/* The HW does not have a 1:1 mapping from keys to IP addrs, so
505 		 * check for a matching IP addr entry in the table.  If the addr
506 		 * already exists, use it; else find an unused slot and add the
507 		 * addr.  If one does not exist and there are no unused table
508 		 * entries, fail the request.
509 		 */
510 
511 		/* Find an existing match or first not used, and stop looking
512 		 * after we've checked all we know we have.
513 		 */
514 		checked = 0;
515 		match = -1;
516 		first = -1;
517 		for (i = 0;
518 		     i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
519 		     (checked < ipsec->num_rx_sa || first < 0);
520 		     i++) {
521 			if (ipsec->ip_tbl[i].used) {
522 				if (!memcmp(ipsec->ip_tbl[i].ipaddr,
523 					    rsa.ipaddr, sizeof(rsa.ipaddr))) {
524 					match = i;
525 					break;
526 				}
527 				checked++;
528 			} else if (first < 0) {
529 				first = i;  /* track the first empty seen */
530 			}
531 		}
532 
533 		if (ipsec->num_rx_sa == 0)
534 			first = 0;
535 
536 		if (match >= 0) {
537 			/* addrs are the same, we should use this one */
538 			rsa.iptbl_ind = match;
539 			ipsec->ip_tbl[match].ref_cnt++;
540 
541 		} else if (first >= 0) {
542 			/* no matches, but here's an empty slot */
543 			rsa.iptbl_ind = first;
544 
545 			memcpy(ipsec->ip_tbl[first].ipaddr,
546 			       rsa.ipaddr, sizeof(rsa.ipaddr));
547 			ipsec->ip_tbl[first].ref_cnt = 1;
548 			ipsec->ip_tbl[first].used = true;
549 
550 			ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
551 
552 		} else {
553 			/* no match and no empty slot */
554 			netdev_err(dev, "No space for SA in Rx IP SA table\n");
555 			memset(&rsa, 0, sizeof(rsa));
556 			return -ENOSPC;
557 		}
558 
559 		rsa.mode = IXGBE_RXMOD_VALID;
560 		if (rsa.xs->id.proto & IPPROTO_ESP)
561 			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
562 		if (rsa.decrypt)
563 			rsa.mode |= IXGBE_RXMOD_DECRYPT;
564 		if (rsa.xs->props.family == AF_INET6)
565 			rsa.mode |= IXGBE_RXMOD_IPV6;
566 
567 		/* the preparations worked, so save the info */
568 		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
569 
570 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
571 				      rsa.salt, rsa.mode, rsa.iptbl_ind);
572 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
573 
574 		ipsec->num_rx_sa++;
575 
576 		/* hash the new entry for faster search in Rx path */
577 		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
578 			     (__force u64)rsa.xs->id.spi);
579 	} else {
580 		struct tx_sa tsa;
581 
582 		/* find the first unused index */
583 		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
584 		if (ret < 0) {
585 			netdev_err(dev, "No space for SA in Tx table\n");
586 			return ret;
587 		}
588 		sa_idx = (u16)ret;
589 
590 		memset(&tsa, 0, sizeof(tsa));
591 		tsa.used = true;
592 		tsa.xs = xs;
593 
594 		if (xs->id.proto & IPPROTO_ESP)
595 			tsa.encrypt = xs->ealg || xs->aead;
596 
597 		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
598 		if (ret) {
599 			netdev_err(dev, "Failed to get key data for Tx SA table\n");
600 			memset(&tsa, 0, sizeof(tsa));
601 			return ret;
602 		}
603 
604 		/* the preparations worked, so save the info */
605 		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
606 
607 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
608 
609 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
610 
611 		ipsec->num_tx_sa++;
612 	}
613 
614 	/* enable the engine if not already warmed up */
615 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
616 		ixgbe_ipsec_start_engine(adapter);
617 		adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * ixgbe_ipsec_del_sa - clear out this specific SA
625  * @xs: pointer to transformer state struct
626  **/
627 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
628 {
629 	struct net_device *dev = xs->xso.dev;
630 	struct ixgbe_adapter *adapter = netdev_priv(dev);
631 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
632 	struct ixgbe_hw *hw = &adapter->hw;
633 	u32 zerobuf[4] = {0, 0, 0, 0};
634 	u16 sa_idx;
635 
636 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
637 		struct rx_sa *rsa;
638 		u8 ipi;
639 
640 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
641 		rsa = &ipsec->rx_tbl[sa_idx];
642 
643 		if (!rsa->used) {
644 			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
645 				   sa_idx, xs->xso.offload_handle);
646 			return;
647 		}
648 
649 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
650 		hash_del_rcu(&rsa->hlist);
651 
652 		/* if the IP table entry is referenced by only this SA,
653 		 * i.e. ref_cnt is only 1, clear the IP table entry as well
654 		 */
655 		ipi = rsa->iptbl_ind;
656 		if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
657 			ipsec->ip_tbl[ipi].ref_cnt--;
658 
659 			if (!ipsec->ip_tbl[ipi].ref_cnt) {
660 				memset(&ipsec->ip_tbl[ipi], 0,
661 				       sizeof(struct rx_ip_sa));
662 				ixgbe_ipsec_set_rx_ip(hw, ipi,
663 						      (__force __be32 *)zerobuf);
664 			}
665 		}
666 
667 		memset(rsa, 0, sizeof(struct rx_sa));
668 		ipsec->num_rx_sa--;
669 	} else {
670 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
671 
672 		if (!ipsec->tx_tbl[sa_idx].used) {
673 			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
674 				   sa_idx, xs->xso.offload_handle);
675 			return;
676 		}
677 
678 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
679 		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
680 		ipsec->num_tx_sa--;
681 	}
682 
683 	/* if there are no SAs left, stop the engine to save energy */
684 	if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
685 		adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
686 		ixgbe_ipsec_stop_engine(adapter);
687 	}
688 }
689 
690 /**
691  * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
692  * @skb: current data packet
693  * @xs: pointer to transformer state struct
694  **/
695 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
696 {
697 	if (xs->props.family == AF_INET) {
698 		/* Offload with IPv4 options is not supported yet */
699 		if (ip_hdr(skb)->ihl != 5)
700 			return false;
701 	} else {
702 		/* Offload with IPv6 extension headers is not support yet */
703 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
704 			return false;
705 	}
706 
707 	return true;
708 }
709 
710 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
711 	.xdo_dev_state_add = ixgbe_ipsec_add_sa,
712 	.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
713 	.xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
714 };
715 
716 /**
717  * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
718  * @tx_ring: outgoing context
719  * @first: current data packet
720  * @itd: ipsec Tx data for later use in building context descriptor
721  **/
722 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
723 		   struct ixgbe_tx_buffer *first,
724 		   struct ixgbe_ipsec_tx_data *itd)
725 {
726 	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
727 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
728 	struct xfrm_state *xs;
729 	struct tx_sa *tsa;
730 
731 	if (unlikely(!first->skb->sp->len)) {
732 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
733 			   __func__, first->skb->sp->len);
734 		return 0;
735 	}
736 
737 	xs = xfrm_input_state(first->skb);
738 	if (unlikely(!xs)) {
739 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
740 			   __func__, xs);
741 		return 0;
742 	}
743 
744 	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
745 	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
746 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
747 			   __func__, itd->sa_idx, xs->xso.offload_handle);
748 		return 0;
749 	}
750 
751 	tsa = &ipsec->tx_tbl[itd->sa_idx];
752 	if (unlikely(!tsa->used)) {
753 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
754 			   __func__, itd->sa_idx);
755 		return 0;
756 	}
757 
758 	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
759 
760 	if (xs->id.proto == IPPROTO_ESP) {
761 
762 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
763 			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
764 		if (first->protocol == htons(ETH_P_IP))
765 			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
766 
767 		/* The actual trailer length is authlen (16 bytes) plus
768 		 * 2 bytes for the proto and the padlen values, plus
769 		 * padlen bytes of padding.  This ends up not the same
770 		 * as the static value found in xs->props.trailer_len (21).
771 		 *
772 		 * ... but if we're doing GSO, don't bother as the stack
773 		 * doesn't add a trailer for those.
774 		 */
775 		if (!skb_is_gso(first->skb)) {
776 			/* The "correct" way to get the auth length would be
777 			 * to use
778 			 *    authlen = crypto_aead_authsize(xs->data);
779 			 * but since we know we only have one size to worry
780 			 * about * we can let the compiler use the constant
781 			 * and save us a few CPU cycles.
782 			 */
783 			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
784 			struct sk_buff *skb = first->skb;
785 			u8 padlen;
786 			int ret;
787 
788 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
789 					    &padlen, 1);
790 			if (unlikely(ret))
791 				return 0;
792 			itd->trailer_len = authlen + 2 + padlen;
793 		}
794 	}
795 	if (tsa->encrypt)
796 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
797 
798 	return 1;
799 }
800 
801 /**
802  * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
803  * @rx_ring: receiving ring
804  * @rx_desc: receive data descriptor
805  * @skb: current data packet
806  *
807  * Determine if there was an ipsec encapsulation noticed, and if so set up
808  * the resulting status for later in the receive stack.
809  **/
810 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
811 		    union ixgbe_adv_rx_desc *rx_desc,
812 		    struct sk_buff *skb)
813 {
814 	struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
815 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
816 	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
817 					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
818 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
819 	struct xfrm_offload *xo = NULL;
820 	struct xfrm_state *xs = NULL;
821 	struct ipv6hdr *ip6 = NULL;
822 	struct iphdr *ip4 = NULL;
823 	void *daddr;
824 	__be32 spi;
825 	u8 *c_hdr;
826 	u8 proto;
827 
828 	/* Find the ip and crypto headers in the data.
829 	 * We can assume no vlan header in the way, b/c the
830 	 * hw won't recognize the IPsec packet and anyway the
831 	 * currently vlan device doesn't support xfrm offload.
832 	 */
833 	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
834 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
835 		daddr = &ip4->daddr;
836 		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
837 	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
838 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
839 		daddr = &ip6->daddr;
840 		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
841 	} else {
842 		return;
843 	}
844 
845 	switch (pkt_info & ipsec_pkt_types) {
846 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
847 		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
848 		proto = IPPROTO_AH;
849 		break;
850 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
851 		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
852 		proto = IPPROTO_ESP;
853 		break;
854 	default:
855 		return;
856 	}
857 
858 	xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
859 	if (unlikely(!xs))
860 		return;
861 
862 	skb->sp = secpath_dup(skb->sp);
863 	if (unlikely(!skb->sp))
864 		return;
865 
866 	skb->sp->xvec[skb->sp->len++] = xs;
867 	skb->sp->olen++;
868 	xo = xfrm_offload(skb);
869 	xo->flags = CRYPTO_DONE;
870 	xo->status = CRYPTO_SUCCESS;
871 
872 	adapter->rx_ipsec++;
873 }
874 
875 /**
876  * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
877  * @adapter: board private structure
878  **/
879 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
880 {
881 	struct ixgbe_ipsec *ipsec;
882 	size_t size;
883 
884 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
885 		return;
886 
887 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
888 	if (!ipsec)
889 		goto err1;
890 	hash_init(ipsec->rx_sa_list);
891 
892 	size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
893 	ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
894 	if (!ipsec->rx_tbl)
895 		goto err2;
896 
897 	size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
898 	ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
899 	if (!ipsec->tx_tbl)
900 		goto err2;
901 
902 	size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
903 	ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
904 	if (!ipsec->ip_tbl)
905 		goto err2;
906 
907 	ipsec->num_rx_sa = 0;
908 	ipsec->num_tx_sa = 0;
909 
910 	adapter->ipsec = ipsec;
911 	ixgbe_ipsec_stop_engine(adapter);
912 	ixgbe_ipsec_clear_hw_tables(adapter);
913 
914 	adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
915 
916 #define IXGBE_ESP_FEATURES	(NETIF_F_HW_ESP | \
917 				 NETIF_F_HW_ESP_TX_CSUM | \
918 				 NETIF_F_GSO_ESP)
919 
920 	adapter->netdev->features |= IXGBE_ESP_FEATURES;
921 	adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
922 
923 	return;
924 
925 err2:
926 	kfree(ipsec->ip_tbl);
927 	kfree(ipsec->rx_tbl);
928 	kfree(ipsec->tx_tbl);
929 	kfree(ipsec);
930 err1:
931 	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
932 }
933 
934 /**
935  * ixgbe_stop_ipsec_offload - tear down the ipsec offload
936  * @adapter: board private structure
937  **/
938 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
939 {
940 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
941 
942 	adapter->ipsec = NULL;
943 	if (ipsec) {
944 		kfree(ipsec->ip_tbl);
945 		kfree(ipsec->rx_tbl);
946 		kfree(ipsec->tx_tbl);
947 		kfree(ipsec);
948 	}
949 }
950