xref: /openbmc/linux/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c (revision 6396bb221514d2876fd6dc0aa2a1f240d99b37bb)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 Oracle and/or its affiliates. All rights reserved. */
3 
4 #include "ixgbe.h"
5 #include <net/xfrm.h>
6 #include <crypto/aead.h>
7 
8 /**
9  * ixgbe_ipsec_set_tx_sa - set the Tx SA registers
10  * @hw: hw specific details
11  * @idx: register index to write
12  * @key: key byte array
13  * @salt: salt bytes
14  **/
15 static void ixgbe_ipsec_set_tx_sa(struct ixgbe_hw *hw, u16 idx,
16 				  u32 key[], u32 salt)
17 {
18 	u32 reg;
19 	int i;
20 
21 	for (i = 0; i < 4; i++)
22 		IXGBE_WRITE_REG(hw, IXGBE_IPSTXKEY(i),
23 				(__force u32)cpu_to_be32(key[3 - i]));
24 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXSALT, (__force u32)cpu_to_be32(salt));
25 	IXGBE_WRITE_FLUSH(hw);
26 
27 	reg = IXGBE_READ_REG(hw, IXGBE_IPSTXIDX);
28 	reg &= IXGBE_RXTXIDX_IPS_EN;
29 	reg |= idx << IXGBE_RXTXIDX_IDX_SHIFT | IXGBE_RXTXIDX_WRITE;
30 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, reg);
31 	IXGBE_WRITE_FLUSH(hw);
32 }
33 
34 /**
35  * ixgbe_ipsec_set_rx_item - set an Rx table item
36  * @hw: hw specific details
37  * @idx: register index to write
38  * @tbl: table selector
39  *
40  * Trigger the device to store into a particular Rx table the
41  * data that has already been loaded into the input register
42  **/
43 static void ixgbe_ipsec_set_rx_item(struct ixgbe_hw *hw, u16 idx,
44 				    enum ixgbe_ipsec_tbl_sel tbl)
45 {
46 	u32 reg;
47 
48 	reg = IXGBE_READ_REG(hw, IXGBE_IPSRXIDX);
49 	reg &= IXGBE_RXTXIDX_IPS_EN;
50 	reg |= tbl << IXGBE_RXIDX_TBL_SHIFT |
51 	       idx << IXGBE_RXTXIDX_IDX_SHIFT |
52 	       IXGBE_RXTXIDX_WRITE;
53 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, reg);
54 	IXGBE_WRITE_FLUSH(hw);
55 }
56 
57 /**
58  * ixgbe_ipsec_set_rx_sa - set up the register bits to save SA info
59  * @hw: hw specific details
60  * @idx: register index to write
61  * @spi: security parameter index
62  * @key: key byte array
63  * @salt: salt bytes
64  * @mode: rx decrypt control bits
65  * @ip_idx: index into IP table for related IP address
66  **/
67 static void ixgbe_ipsec_set_rx_sa(struct ixgbe_hw *hw, u16 idx, __be32 spi,
68 				  u32 key[], u32 salt, u32 mode, u32 ip_idx)
69 {
70 	int i;
71 
72 	/* store the SPI (in bigendian) and IPidx */
73 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI,
74 			(__force u32)cpu_to_le32((__force u32)spi));
75 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, ip_idx);
76 	IXGBE_WRITE_FLUSH(hw);
77 
78 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_spi_tbl);
79 
80 	/* store the key, salt, and mode */
81 	for (i = 0; i < 4; i++)
82 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXKEY(i),
83 				(__force u32)cpu_to_be32(key[3 - i]));
84 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXSALT, (__force u32)cpu_to_be32(salt));
85 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXMOD, mode);
86 	IXGBE_WRITE_FLUSH(hw);
87 
88 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_key_tbl);
89 }
90 
91 /**
92  * ixgbe_ipsec_set_rx_ip - set up the register bits to save SA IP addr info
93  * @hw: hw specific details
94  * @idx: register index to write
95  * @addr: IP address byte array
96  **/
97 static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
98 {
99 	int i;
100 
101 	/* store the ip address */
102 	for (i = 0; i < 4; i++)
103 		IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPADDR(i),
104 				(__force u32)cpu_to_le32((__force u32)addr[i]));
105 	IXGBE_WRITE_FLUSH(hw);
106 
107 	ixgbe_ipsec_set_rx_item(hw, idx, ips_rx_ip_tbl);
108 }
109 
110 /**
111  * ixgbe_ipsec_clear_hw_tables - because some tables don't get cleared on reset
112  * @adapter: board private structure
113  **/
114 static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
115 {
116 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
117 	struct ixgbe_hw *hw = &adapter->hw;
118 	u32 buf[4] = {0, 0, 0, 0};
119 	u16 idx;
120 
121 	/* disable Rx and Tx SA lookup */
122 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
123 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
124 
125 	/* scrub the tables - split the loops for the max of the IP table */
126 	for (idx = 0; idx < IXGBE_IPSEC_MAX_RX_IP_COUNT; idx++) {
127 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
128 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
129 		ixgbe_ipsec_set_rx_ip(hw, idx, (__be32 *)buf);
130 	}
131 	for (; idx < IXGBE_IPSEC_MAX_SA_COUNT; idx++) {
132 		ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
133 		ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
134 	}
135 
136 	ipsec->num_rx_sa = 0;
137 	ipsec->num_tx_sa = 0;
138 }
139 
140 /**
141  * ixgbe_ipsec_stop_data
142  * @adapter: board private structure
143  **/
144 static void ixgbe_ipsec_stop_data(struct ixgbe_adapter *adapter)
145 {
146 	struct ixgbe_hw *hw = &adapter->hw;
147 	bool link = adapter->link_up;
148 	u32 t_rdy, r_rdy;
149 	u32 limit;
150 	u32 reg;
151 
152 	/* halt data paths */
153 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
154 	reg |= IXGBE_SECTXCTRL_TX_DIS;
155 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
156 
157 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
158 	reg |= IXGBE_SECRXCTRL_RX_DIS;
159 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
160 
161 	IXGBE_WRITE_FLUSH(hw);
162 
163 	/* If the tx fifo doesn't have link, but still has data,
164 	 * we can't clear the tx sec block.  Set the MAC loopback
165 	 * before block clear
166 	 */
167 	if (!link) {
168 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
169 		reg |= IXGBE_MACC_FLU;
170 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
171 
172 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
173 		reg |= IXGBE_HLREG0_LPBK;
174 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
175 
176 		IXGBE_WRITE_FLUSH(hw);
177 		mdelay(3);
178 	}
179 
180 	/* wait for the paths to empty */
181 	limit = 20;
182 	do {
183 		mdelay(10);
184 		t_rdy = IXGBE_READ_REG(hw, IXGBE_SECTXSTAT) &
185 			IXGBE_SECTXSTAT_SECTX_RDY;
186 		r_rdy = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT) &
187 			IXGBE_SECRXSTAT_SECRX_RDY;
188 	} while (!t_rdy && !r_rdy && limit--);
189 
190 	/* undo loopback if we played with it earlier */
191 	if (!link) {
192 		reg = IXGBE_READ_REG(hw, IXGBE_MACC);
193 		reg &= ~IXGBE_MACC_FLU;
194 		IXGBE_WRITE_REG(hw, IXGBE_MACC, reg);
195 
196 		reg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
197 		reg &= ~IXGBE_HLREG0_LPBK;
198 		IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg);
199 
200 		IXGBE_WRITE_FLUSH(hw);
201 	}
202 }
203 
204 /**
205  * ixgbe_ipsec_stop_engine
206  * @adapter: board private structure
207  **/
208 static void ixgbe_ipsec_stop_engine(struct ixgbe_adapter *adapter)
209 {
210 	struct ixgbe_hw *hw = &adapter->hw;
211 	u32 reg;
212 
213 	ixgbe_ipsec_stop_data(adapter);
214 
215 	/* disable Rx and Tx SA lookup */
216 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, 0);
217 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, 0);
218 
219 	/* disable the Rx and Tx engines and full packet store-n-forward */
220 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
221 	reg |= IXGBE_SECTXCTRL_SECTX_DIS;
222 	reg &= ~IXGBE_SECTXCTRL_STORE_FORWARD;
223 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, reg);
224 
225 	reg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
226 	reg |= IXGBE_SECRXCTRL_SECRX_DIS;
227 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, reg);
228 
229 	/* restore the "tx security buffer almost full threshold" to 0x250 */
230 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, 0x250);
231 
232 	/* Set minimum IFG between packets back to the default 0x1 */
233 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
234 	reg = (reg & 0xfffffff0) | 0x1;
235 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
236 
237 	/* final set for normal (no ipsec offload) processing */
238 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_SECTX_DIS);
239 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, IXGBE_SECRXCTRL_SECRX_DIS);
240 
241 	IXGBE_WRITE_FLUSH(hw);
242 }
243 
244 /**
245  * ixgbe_ipsec_start_engine
246  * @adapter: board private structure
247  *
248  * NOTE: this increases power consumption whether being used or not
249  **/
250 static void ixgbe_ipsec_start_engine(struct ixgbe_adapter *adapter)
251 {
252 	struct ixgbe_hw *hw = &adapter->hw;
253 	u32 reg;
254 
255 	ixgbe_ipsec_stop_data(adapter);
256 
257 	/* Set minimum IFG between packets to 3 */
258 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
259 	reg = (reg & 0xfffffff0) | 0x3;
260 	IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
261 
262 	/* Set "tx security buffer almost full threshold" to 0x15 so that the
263 	 * almost full indication is generated only after buffer contains at
264 	 * least an entire jumbo packet.
265 	 */
266 	reg = IXGBE_READ_REG(hw, IXGBE_SECTXBUFFAF);
267 	reg = (reg & 0xfffffc00) | 0x15;
268 	IXGBE_WRITE_REG(hw, IXGBE_SECTXBUFFAF, reg);
269 
270 	/* restart the data paths by clearing the DISABLE bits */
271 	IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, 0);
272 	IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, IXGBE_SECTXCTRL_STORE_FORWARD);
273 
274 	/* enable Rx and Tx SA lookup */
275 	IXGBE_WRITE_REG(hw, IXGBE_IPSTXIDX, IXGBE_RXTXIDX_IPS_EN);
276 	IXGBE_WRITE_REG(hw, IXGBE_IPSRXIDX, IXGBE_RXTXIDX_IPS_EN);
277 
278 	IXGBE_WRITE_FLUSH(hw);
279 }
280 
281 /**
282  * ixgbe_ipsec_restore - restore the ipsec HW settings after a reset
283  * @adapter: board private structure
284  **/
285 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter)
286 {
287 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
288 	struct ixgbe_hw *hw = &adapter->hw;
289 	int i;
290 
291 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED))
292 		return;
293 
294 	/* clean up and restart the engine */
295 	ixgbe_ipsec_stop_engine(adapter);
296 	ixgbe_ipsec_clear_hw_tables(adapter);
297 	ixgbe_ipsec_start_engine(adapter);
298 
299 	/* reload the IP addrs */
300 	for (i = 0; i < IXGBE_IPSEC_MAX_RX_IP_COUNT; i++) {
301 		struct rx_ip_sa *ipsa = &ipsec->ip_tbl[i];
302 
303 		if (ipsa->used)
304 			ixgbe_ipsec_set_rx_ip(hw, i, ipsa->ipaddr);
305 	}
306 
307 	/* reload the Rx and Tx keys */
308 	for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
309 		struct rx_sa *rsa = &ipsec->rx_tbl[i];
310 		struct tx_sa *tsa = &ipsec->tx_tbl[i];
311 
312 		if (rsa->used)
313 			ixgbe_ipsec_set_rx_sa(hw, i, rsa->xs->id.spi,
314 					      rsa->key, rsa->salt,
315 					      rsa->mode, rsa->iptbl_ind);
316 
317 		if (tsa->used)
318 			ixgbe_ipsec_set_tx_sa(hw, i, tsa->key, tsa->salt);
319 	}
320 }
321 
322 /**
323  * ixgbe_ipsec_find_empty_idx - find the first unused security parameter index
324  * @ipsec: pointer to ipsec struct
325  * @rxtable: true if we need to look in the Rx table
326  *
327  * Returns the first unused index in either the Rx or Tx SA table
328  **/
329 static int ixgbe_ipsec_find_empty_idx(struct ixgbe_ipsec *ipsec, bool rxtable)
330 {
331 	u32 i;
332 
333 	if (rxtable) {
334 		if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
335 			return -ENOSPC;
336 
337 		/* search rx sa table */
338 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
339 			if (!ipsec->rx_tbl[i].used)
340 				return i;
341 		}
342 	} else {
343 		if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
344 			return -ENOSPC;
345 
346 		/* search tx sa table */
347 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
348 			if (!ipsec->tx_tbl[i].used)
349 				return i;
350 		}
351 	}
352 
353 	return -ENOSPC;
354 }
355 
356 /**
357  * ixgbe_ipsec_find_rx_state - find the state that matches
358  * @ipsec: pointer to ipsec struct
359  * @daddr: inbound address to match
360  * @proto: protocol to match
361  * @spi: SPI to match
362  * @ip4: true if using an ipv4 address
363  *
364  * Returns a pointer to the matching SA state information
365  **/
366 static struct xfrm_state *ixgbe_ipsec_find_rx_state(struct ixgbe_ipsec *ipsec,
367 						    __be32 *daddr, u8 proto,
368 						    __be32 spi, bool ip4)
369 {
370 	struct rx_sa *rsa;
371 	struct xfrm_state *ret = NULL;
372 
373 	rcu_read_lock();
374 	hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
375 				   (__force u32)spi) {
376 		if (spi == rsa->xs->id.spi &&
377 		    ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
378 		      (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
379 				       sizeof(rsa->xs->id.daddr.a6)))) &&
380 		    proto == rsa->xs->id.proto) {
381 			ret = rsa->xs;
382 			xfrm_state_hold(ret);
383 			break;
384 		}
385 	}
386 	rcu_read_unlock();
387 	return ret;
388 }
389 
390 /**
391  * ixgbe_ipsec_parse_proto_keys - find the key and salt based on the protocol
392  * @xs: pointer to xfrm_state struct
393  * @mykey: pointer to key array to populate
394  * @mysalt: pointer to salt value to populate
395  *
396  * This copies the protocol keys and salt to our own data tables.  The
397  * 82599 family only supports the one algorithm.
398  **/
399 static int ixgbe_ipsec_parse_proto_keys(struct xfrm_state *xs,
400 					u32 *mykey, u32 *mysalt)
401 {
402 	struct net_device *dev = xs->xso.dev;
403 	unsigned char *key_data;
404 	char *alg_name = NULL;
405 	const char aes_gcm_name[] = "rfc4106(gcm(aes))";
406 	int key_len;
407 
408 	if (!xs->aead) {
409 		netdev_err(dev, "Unsupported IPsec algorithm\n");
410 		return -EINVAL;
411 	}
412 
413 	if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
414 		netdev_err(dev, "IPsec offload requires %d bit authentication\n",
415 			   IXGBE_IPSEC_AUTH_BITS);
416 		return -EINVAL;
417 	}
418 
419 	key_data = &xs->aead->alg_key[0];
420 	key_len = xs->aead->alg_key_len;
421 	alg_name = xs->aead->alg_name;
422 
423 	if (strcmp(alg_name, aes_gcm_name)) {
424 		netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
425 			   aes_gcm_name);
426 		return -EINVAL;
427 	}
428 
429 	/* The key bytes come down in a bigendian array of bytes, so
430 	 * we don't need to do any byteswapping.
431 	 * 160 accounts for 16 byte key and 4 byte salt
432 	 */
433 	if (key_len == 160) {
434 		*mysalt = ((u32 *)key_data)[4];
435 	} else if (key_len != 128) {
436 		netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
437 		return -EINVAL;
438 	} else {
439 		netdev_info(dev, "IPsec hw offload parameters missing 32 bit salt value\n");
440 		*mysalt = 0;
441 	}
442 	memcpy(mykey, key_data, 16);
443 
444 	return 0;
445 }
446 
447 /**
448  * ixgbe_ipsec_check_mgmt_ip - make sure there is no clash with mgmt IP filters
449  * @xs: pointer to transformer state struct
450  **/
451 static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
452 {
453 	struct net_device *dev = xs->xso.dev;
454 	struct ixgbe_adapter *adapter = netdev_priv(dev);
455 	struct ixgbe_hw *hw = &adapter->hw;
456 	u32 mfval, manc, reg;
457 	int num_filters = 4;
458 	bool manc_ipv4;
459 	u32 bmcipval;
460 	int i, j;
461 
462 #define MANC_EN_IPV4_FILTER      BIT(24)
463 #define MFVAL_IPV4_FILTER_SHIFT  16
464 #define MFVAL_IPV6_FILTER_SHIFT  24
465 #define MIPAF_ARR(_m, _n)        (IXGBE_MIPAF + ((_m) * 0x10) + ((_n) * 4))
466 
467 #define IXGBE_BMCIP(_n)          (0x5050 + ((_n) * 4))
468 #define IXGBE_BMCIPVAL           0x5060
469 #define BMCIP_V4                 0x2
470 #define BMCIP_V6                 0x3
471 #define BMCIP_MASK               0x3
472 
473 	manc = IXGBE_READ_REG(hw, IXGBE_MANC);
474 	manc_ipv4 = !!(manc & MANC_EN_IPV4_FILTER);
475 	mfval = IXGBE_READ_REG(hw, IXGBE_MFVAL);
476 	bmcipval = IXGBE_READ_REG(hw, IXGBE_BMCIPVAL);
477 
478 	if (xs->props.family == AF_INET) {
479 		/* are there any IPv4 filters to check? */
480 		if (manc_ipv4) {
481 			/* the 4 ipv4 filters are all in MIPAF(3, i) */
482 			for (i = 0; i < num_filters; i++) {
483 				if (!(mfval & BIT(MFVAL_IPV4_FILTER_SHIFT + i)))
484 					continue;
485 
486 				reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
487 				if (reg == xs->id.daddr.a4)
488 					return 1;
489 			}
490 		}
491 
492 		if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
493 			reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
494 			if (reg == xs->id.daddr.a4)
495 				return 1;
496 		}
497 
498 	} else {
499 		/* if there are ipv4 filters, they are in the last ipv6 slot */
500 		if (manc_ipv4)
501 			num_filters = 3;
502 
503 		for (i = 0; i < num_filters; i++) {
504 			if (!(mfval & BIT(MFVAL_IPV6_FILTER_SHIFT + i)))
505 				continue;
506 
507 			for (j = 0; j < 4; j++) {
508 				reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
509 				if (reg != xs->id.daddr.a6[j])
510 					break;
511 			}
512 			if (j == 4)   /* did we match all 4 words? */
513 				return 1;
514 		}
515 
516 		if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
517 			for (j = 0; j < 4; j++) {
518 				reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
519 				if (reg != xs->id.daddr.a6[j])
520 					break;
521 			}
522 			if (j == 4)   /* did we match all 4 words? */
523 				return 1;
524 		}
525 	}
526 
527 	return 0;
528 }
529 
530 /**
531  * ixgbe_ipsec_add_sa - program device with a security association
532  * @xs: pointer to transformer state struct
533  **/
534 static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
535 {
536 	struct net_device *dev = xs->xso.dev;
537 	struct ixgbe_adapter *adapter = netdev_priv(dev);
538 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
539 	struct ixgbe_hw *hw = &adapter->hw;
540 	int checked, match, first;
541 	u16 sa_idx;
542 	int ret;
543 	int i;
544 
545 	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
546 		netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
547 			   xs->id.proto);
548 		return -EINVAL;
549 	}
550 
551 	if (ixgbe_ipsec_check_mgmt_ip(xs)) {
552 		netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
553 		return -EINVAL;
554 	}
555 
556 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
557 		struct rx_sa rsa;
558 
559 		if (xs->calg) {
560 			netdev_err(dev, "Compression offload not supported\n");
561 			return -EINVAL;
562 		}
563 
564 		/* find the first unused index */
565 		ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
566 		if (ret < 0) {
567 			netdev_err(dev, "No space for SA in Rx table!\n");
568 			return ret;
569 		}
570 		sa_idx = (u16)ret;
571 
572 		memset(&rsa, 0, sizeof(rsa));
573 		rsa.used = true;
574 		rsa.xs = xs;
575 
576 		if (rsa.xs->id.proto & IPPROTO_ESP)
577 			rsa.decrypt = xs->ealg || xs->aead;
578 
579 		/* get the key and salt */
580 		ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
581 		if (ret) {
582 			netdev_err(dev, "Failed to get key data for Rx SA table\n");
583 			return ret;
584 		}
585 
586 		/* get ip for rx sa table */
587 		if (xs->props.family == AF_INET6)
588 			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
589 		else
590 			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
591 
592 		/* The HW does not have a 1:1 mapping from keys to IP addrs, so
593 		 * check for a matching IP addr entry in the table.  If the addr
594 		 * already exists, use it; else find an unused slot and add the
595 		 * addr.  If one does not exist and there are no unused table
596 		 * entries, fail the request.
597 		 */
598 
599 		/* Find an existing match or first not used, and stop looking
600 		 * after we've checked all we know we have.
601 		 */
602 		checked = 0;
603 		match = -1;
604 		first = -1;
605 		for (i = 0;
606 		     i < IXGBE_IPSEC_MAX_RX_IP_COUNT &&
607 		     (checked < ipsec->num_rx_sa || first < 0);
608 		     i++) {
609 			if (ipsec->ip_tbl[i].used) {
610 				if (!memcmp(ipsec->ip_tbl[i].ipaddr,
611 					    rsa.ipaddr, sizeof(rsa.ipaddr))) {
612 					match = i;
613 					break;
614 				}
615 				checked++;
616 			} else if (first < 0) {
617 				first = i;  /* track the first empty seen */
618 			}
619 		}
620 
621 		if (ipsec->num_rx_sa == 0)
622 			first = 0;
623 
624 		if (match >= 0) {
625 			/* addrs are the same, we should use this one */
626 			rsa.iptbl_ind = match;
627 			ipsec->ip_tbl[match].ref_cnt++;
628 
629 		} else if (first >= 0) {
630 			/* no matches, but here's an empty slot */
631 			rsa.iptbl_ind = first;
632 
633 			memcpy(ipsec->ip_tbl[first].ipaddr,
634 			       rsa.ipaddr, sizeof(rsa.ipaddr));
635 			ipsec->ip_tbl[first].ref_cnt = 1;
636 			ipsec->ip_tbl[first].used = true;
637 
638 			ixgbe_ipsec_set_rx_ip(hw, rsa.iptbl_ind, rsa.ipaddr);
639 
640 		} else {
641 			/* no match and no empty slot */
642 			netdev_err(dev, "No space for SA in Rx IP SA table\n");
643 			memset(&rsa, 0, sizeof(rsa));
644 			return -ENOSPC;
645 		}
646 
647 		rsa.mode = IXGBE_RXMOD_VALID;
648 		if (rsa.xs->id.proto & IPPROTO_ESP)
649 			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
650 		if (rsa.decrypt)
651 			rsa.mode |= IXGBE_RXMOD_DECRYPT;
652 		if (rsa.xs->props.family == AF_INET6)
653 			rsa.mode |= IXGBE_RXMOD_IPV6;
654 
655 		/* the preparations worked, so save the info */
656 		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
657 
658 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, rsa.xs->id.spi, rsa.key,
659 				      rsa.salt, rsa.mode, rsa.iptbl_ind);
660 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
661 
662 		ipsec->num_rx_sa++;
663 
664 		/* hash the new entry for faster search in Rx path */
665 		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
666 			     (__force u32)rsa.xs->id.spi);
667 	} else {
668 		struct tx_sa tsa;
669 
670 		/* find the first unused index */
671 		ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
672 		if (ret < 0) {
673 			netdev_err(dev, "No space for SA in Tx table\n");
674 			return ret;
675 		}
676 		sa_idx = (u16)ret;
677 
678 		memset(&tsa, 0, sizeof(tsa));
679 		tsa.used = true;
680 		tsa.xs = xs;
681 
682 		if (xs->id.proto & IPPROTO_ESP)
683 			tsa.encrypt = xs->ealg || xs->aead;
684 
685 		ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
686 		if (ret) {
687 			netdev_err(dev, "Failed to get key data for Tx SA table\n");
688 			memset(&tsa, 0, sizeof(tsa));
689 			return ret;
690 		}
691 
692 		/* the preparations worked, so save the info */
693 		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
694 
695 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, tsa.key, tsa.salt);
696 
697 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
698 
699 		ipsec->num_tx_sa++;
700 	}
701 
702 	/* enable the engine if not already warmed up */
703 	if (!(adapter->flags2 & IXGBE_FLAG2_IPSEC_ENABLED)) {
704 		ixgbe_ipsec_start_engine(adapter);
705 		adapter->flags2 |= IXGBE_FLAG2_IPSEC_ENABLED;
706 	}
707 
708 	return 0;
709 }
710 
711 /**
712  * ixgbe_ipsec_del_sa - clear out this specific SA
713  * @xs: pointer to transformer state struct
714  **/
715 static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
716 {
717 	struct net_device *dev = xs->xso.dev;
718 	struct ixgbe_adapter *adapter = netdev_priv(dev);
719 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
720 	struct ixgbe_hw *hw = &adapter->hw;
721 	u32 zerobuf[4] = {0, 0, 0, 0};
722 	u16 sa_idx;
723 
724 	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
725 		struct rx_sa *rsa;
726 		u8 ipi;
727 
728 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
729 		rsa = &ipsec->rx_tbl[sa_idx];
730 
731 		if (!rsa->used) {
732 			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
733 				   sa_idx, xs->xso.offload_handle);
734 			return;
735 		}
736 
737 		ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
738 		hash_del_rcu(&rsa->hlist);
739 
740 		/* if the IP table entry is referenced by only this SA,
741 		 * i.e. ref_cnt is only 1, clear the IP table entry as well
742 		 */
743 		ipi = rsa->iptbl_ind;
744 		if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
745 			ipsec->ip_tbl[ipi].ref_cnt--;
746 
747 			if (!ipsec->ip_tbl[ipi].ref_cnt) {
748 				memset(&ipsec->ip_tbl[ipi], 0,
749 				       sizeof(struct rx_ip_sa));
750 				ixgbe_ipsec_set_rx_ip(hw, ipi,
751 						      (__force __be32 *)zerobuf);
752 			}
753 		}
754 
755 		memset(rsa, 0, sizeof(struct rx_sa));
756 		ipsec->num_rx_sa--;
757 	} else {
758 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
759 
760 		if (!ipsec->tx_tbl[sa_idx].used) {
761 			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
762 				   sa_idx, xs->xso.offload_handle);
763 			return;
764 		}
765 
766 		ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
767 		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
768 		ipsec->num_tx_sa--;
769 	}
770 
771 	/* if there are no SAs left, stop the engine to save energy */
772 	if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
773 		adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
774 		ixgbe_ipsec_stop_engine(adapter);
775 	}
776 }
777 
778 /**
779  * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload
780  * @skb: current data packet
781  * @xs: pointer to transformer state struct
782  **/
783 static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
784 {
785 	if (xs->props.family == AF_INET) {
786 		/* Offload with IPv4 options is not supported yet */
787 		if (ip_hdr(skb)->ihl != 5)
788 			return false;
789 	} else {
790 		/* Offload with IPv6 extension headers is not support yet */
791 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
792 			return false;
793 	}
794 
795 	return true;
796 }
797 
798 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = {
799 	.xdo_dev_state_add = ixgbe_ipsec_add_sa,
800 	.xdo_dev_state_delete = ixgbe_ipsec_del_sa,
801 	.xdo_dev_offload_ok = ixgbe_ipsec_offload_ok,
802 };
803 
804 /**
805  * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
806  * @tx_ring: outgoing context
807  * @first: current data packet
808  * @itd: ipsec Tx data for later use in building context descriptor
809  **/
810 int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
811 		   struct ixgbe_tx_buffer *first,
812 		   struct ixgbe_ipsec_tx_data *itd)
813 {
814 	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
815 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
816 	struct xfrm_state *xs;
817 	struct tx_sa *tsa;
818 
819 	if (unlikely(!first->skb->sp->len)) {
820 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
821 			   __func__, first->skb->sp->len);
822 		return 0;
823 	}
824 
825 	xs = xfrm_input_state(first->skb);
826 	if (unlikely(!xs)) {
827 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
828 			   __func__, xs);
829 		return 0;
830 	}
831 
832 	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
833 	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
834 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
835 			   __func__, itd->sa_idx, xs->xso.offload_handle);
836 		return 0;
837 	}
838 
839 	tsa = &ipsec->tx_tbl[itd->sa_idx];
840 	if (unlikely(!tsa->used)) {
841 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
842 			   __func__, itd->sa_idx);
843 		return 0;
844 	}
845 
846 	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;
847 
848 	if (xs->id.proto == IPPROTO_ESP) {
849 
850 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
851 			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
852 		if (first->protocol == htons(ETH_P_IP))
853 			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
854 
855 		/* The actual trailer length is authlen (16 bytes) plus
856 		 * 2 bytes for the proto and the padlen values, plus
857 		 * padlen bytes of padding.  This ends up not the same
858 		 * as the static value found in xs->props.trailer_len (21).
859 		 *
860 		 * ... but if we're doing GSO, don't bother as the stack
861 		 * doesn't add a trailer for those.
862 		 */
863 		if (!skb_is_gso(first->skb)) {
864 			/* The "correct" way to get the auth length would be
865 			 * to use
866 			 *    authlen = crypto_aead_authsize(xs->data);
867 			 * but since we know we only have one size to worry
868 			 * about * we can let the compiler use the constant
869 			 * and save us a few CPU cycles.
870 			 */
871 			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
872 			struct sk_buff *skb = first->skb;
873 			u8 padlen;
874 			int ret;
875 
876 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
877 					    &padlen, 1);
878 			if (unlikely(ret))
879 				return 0;
880 			itd->trailer_len = authlen + 2 + padlen;
881 		}
882 	}
883 	if (tsa->encrypt)
884 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
885 
886 	return 1;
887 }
888 
889 /**
890  * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
891  * @rx_ring: receiving ring
892  * @rx_desc: receive data descriptor
893  * @skb: current data packet
894  *
895  * Determine if there was an ipsec encapsulation noticed, and if so set up
896  * the resulting status for later in the receive stack.
897  **/
898 void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
899 		    union ixgbe_adv_rx_desc *rx_desc,
900 		    struct sk_buff *skb)
901 {
902 	struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
903 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
904 	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
905 					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
906 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
907 	struct xfrm_offload *xo = NULL;
908 	struct xfrm_state *xs = NULL;
909 	struct ipv6hdr *ip6 = NULL;
910 	struct iphdr *ip4 = NULL;
911 	void *daddr;
912 	__be32 spi;
913 	u8 *c_hdr;
914 	u8 proto;
915 
916 	/* Find the ip and crypto headers in the data.
917 	 * We can assume no vlan header in the way, b/c the
918 	 * hw won't recognize the IPsec packet and anyway the
919 	 * currently vlan device doesn't support xfrm offload.
920 	 */
921 	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
922 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
923 		daddr = &ip4->daddr;
924 		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
925 	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
926 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
927 		daddr = &ip6->daddr;
928 		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
929 	} else {
930 		return;
931 	}
932 
933 	switch (pkt_info & ipsec_pkt_types) {
934 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
935 		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
936 		proto = IPPROTO_AH;
937 		break;
938 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
939 		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
940 		proto = IPPROTO_ESP;
941 		break;
942 	default:
943 		return;
944 	}
945 
946 	xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
947 	if (unlikely(!xs))
948 		return;
949 
950 	skb->sp = secpath_dup(skb->sp);
951 	if (unlikely(!skb->sp))
952 		return;
953 
954 	skb->sp->xvec[skb->sp->len++] = xs;
955 	skb->sp->olen++;
956 	xo = xfrm_offload(skb);
957 	xo->flags = CRYPTO_DONE;
958 	xo->status = CRYPTO_SUCCESS;
959 
960 	adapter->rx_ipsec++;
961 }
962 
963 /**
964  * ixgbe_init_ipsec_offload - initialize security registers for IPSec operation
965  * @adapter: board private structure
966  **/
967 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter)
968 {
969 	struct ixgbe_ipsec *ipsec;
970 	size_t size;
971 
972 	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
973 		return;
974 
975 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
976 	if (!ipsec)
977 		goto err1;
978 	hash_init(ipsec->rx_sa_list);
979 
980 	size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
981 	ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
982 	if (!ipsec->rx_tbl)
983 		goto err2;
984 
985 	size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
986 	ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
987 	if (!ipsec->tx_tbl)
988 		goto err2;
989 
990 	size = sizeof(struct rx_ip_sa) * IXGBE_IPSEC_MAX_RX_IP_COUNT;
991 	ipsec->ip_tbl = kzalloc(size, GFP_KERNEL);
992 	if (!ipsec->ip_tbl)
993 		goto err2;
994 
995 	ipsec->num_rx_sa = 0;
996 	ipsec->num_tx_sa = 0;
997 
998 	adapter->ipsec = ipsec;
999 	ixgbe_ipsec_stop_engine(adapter);
1000 	ixgbe_ipsec_clear_hw_tables(adapter);
1001 
1002 	adapter->netdev->xfrmdev_ops = &ixgbe_xfrmdev_ops;
1003 
1004 #define IXGBE_ESP_FEATURES	(NETIF_F_HW_ESP | \
1005 				 NETIF_F_HW_ESP_TX_CSUM | \
1006 				 NETIF_F_GSO_ESP)
1007 
1008 	adapter->netdev->features |= IXGBE_ESP_FEATURES;
1009 	adapter->netdev->hw_enc_features |= IXGBE_ESP_FEATURES;
1010 
1011 	return;
1012 
1013 err2:
1014 	kfree(ipsec->ip_tbl);
1015 	kfree(ipsec->rx_tbl);
1016 	kfree(ipsec->tx_tbl);
1017 	kfree(ipsec);
1018 err1:
1019 	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
1020 }
1021 
1022 /**
1023  * ixgbe_stop_ipsec_offload - tear down the ipsec offload
1024  * @adapter: board private structure
1025  **/
1026 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter)
1027 {
1028 	struct ixgbe_ipsec *ipsec = adapter->ipsec;
1029 
1030 	adapter->ipsec = NULL;
1031 	if (ipsec) {
1032 		kfree(ipsec->ip_tbl);
1033 		kfree(ipsec->rx_tbl);
1034 		kfree(ipsec->tx_tbl);
1035 		kfree(ipsec);
1036 	}
1037 }
1038