1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
3 
4 #include "ixgbevf.h"
5 #include <net/xfrm.h>
6 #include <crypto/aead.h>
7 
8 #define IXGBE_IPSEC_KEY_BITS  160
9 static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
10 
11 /**
12  * ixgbevf_ipsec_set_pf_sa - ask the PF to set up an SA
13  * @adapter: board private structure
14  * @xs: xfrm info to be sent to the PF
15  *
16  * Returns: positive offload handle from the PF, or negative error code
17  **/
18 static int ixgbevf_ipsec_set_pf_sa(struct ixgbevf_adapter *adapter,
19 				   struct xfrm_state *xs)
20 {
21 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE] = { 0 };
22 	struct ixgbe_hw *hw = &adapter->hw;
23 	struct sa_mbx_msg *sam;
24 	int ret;
25 
26 	/* send the important bits to the PF */
27 	sam = (struct sa_mbx_msg *)(&msgbuf[1]);
28 	sam->dir = xs->xso.dir;
29 	sam->spi = xs->id.spi;
30 	sam->proto = xs->id.proto;
31 	sam->family = xs->props.family;
32 
33 	if (xs->props.family == AF_INET6)
34 		memcpy(sam->addr, &xs->id.daddr.a6, sizeof(xs->id.daddr.a6));
35 	else
36 		memcpy(sam->addr, &xs->id.daddr.a4, sizeof(xs->id.daddr.a4));
37 	memcpy(sam->key, xs->aead->alg_key, sizeof(sam->key));
38 
39 	msgbuf[0] = IXGBE_VF_IPSEC_ADD;
40 
41 	spin_lock_bh(&adapter->mbx_lock);
42 
43 	ret = ixgbevf_write_mbx(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
44 	if (ret)
45 		goto out;
46 
47 	ret = ixgbevf_poll_mbx(hw, msgbuf, 2);
48 	if (ret)
49 		goto out;
50 
51 	ret = (int)msgbuf[1];
52 	if (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE && ret >= 0)
53 		ret = -1;
54 
55 out:
56 	spin_unlock_bh(&adapter->mbx_lock);
57 
58 	return ret;
59 }
60 
61 /**
62  * ixgbevf_ipsec_del_pf_sa - ask the PF to delete an SA
63  * @adapter: board private structure
64  * @pfsa: sa index returned from PF when created, -1 for all
65  *
66  * Returns: 0 on success, or negative error code
67  **/
68 static int ixgbevf_ipsec_del_pf_sa(struct ixgbevf_adapter *adapter, int pfsa)
69 {
70 	struct ixgbe_hw *hw = &adapter->hw;
71 	u32 msgbuf[2];
72 	int err;
73 
74 	memset(msgbuf, 0, sizeof(msgbuf));
75 	msgbuf[0] = IXGBE_VF_IPSEC_DEL;
76 	msgbuf[1] = (u32)pfsa;
77 
78 	spin_lock_bh(&adapter->mbx_lock);
79 
80 	err = ixgbevf_write_mbx(hw, msgbuf, 2);
81 	if (err)
82 		goto out;
83 
84 	err = ixgbevf_poll_mbx(hw, msgbuf, 2);
85 	if (err)
86 		goto out;
87 
88 out:
89 	spin_unlock_bh(&adapter->mbx_lock);
90 	return err;
91 }
92 
93 /**
94  * ixgbevf_ipsec_restore - restore the IPsec HW settings after a reset
95  * @adapter: board private structure
96  *
97  * Reload the HW tables from the SW tables after they've been bashed
98  * by a chip reset.  While we're here, make sure any stale VF data is
99  * removed, since we go through reset when num_vfs changes.
100  **/
101 void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter)
102 {
103 	struct ixgbevf_ipsec *ipsec = adapter->ipsec;
104 	struct net_device *netdev = adapter->netdev;
105 	int i;
106 
107 	if (!(adapter->netdev->features & NETIF_F_HW_ESP))
108 		return;
109 
110 	/* reload the Rx and Tx keys */
111 	for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
112 		struct rx_sa *r = &ipsec->rx_tbl[i];
113 		struct tx_sa *t = &ipsec->tx_tbl[i];
114 		int ret;
115 
116 		if (r->used) {
117 			ret = ixgbevf_ipsec_set_pf_sa(adapter, r->xs);
118 			if (ret < 0)
119 				netdev_err(netdev, "reload rx_tbl[%d] failed = %d\n",
120 					   i, ret);
121 		}
122 
123 		if (t->used) {
124 			ret = ixgbevf_ipsec_set_pf_sa(adapter, t->xs);
125 			if (ret < 0)
126 				netdev_err(netdev, "reload tx_tbl[%d] failed = %d\n",
127 					   i, ret);
128 		}
129 	}
130 }
131 
132 /**
133  * ixgbevf_ipsec_find_empty_idx - find the first unused security parameter index
134  * @ipsec: pointer to IPsec struct
135  * @rxtable: true if we need to look in the Rx table
136  *
137  * Returns the first unused index in either the Rx or Tx SA table
138  **/
139 static
140 int ixgbevf_ipsec_find_empty_idx(struct ixgbevf_ipsec *ipsec, bool rxtable)
141 {
142 	u32 i;
143 
144 	if (rxtable) {
145 		if (ipsec->num_rx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
146 			return -ENOSPC;
147 
148 		/* search rx sa table */
149 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
150 			if (!ipsec->rx_tbl[i].used)
151 				return i;
152 		}
153 	} else {
154 		if (ipsec->num_tx_sa == IXGBE_IPSEC_MAX_SA_COUNT)
155 			return -ENOSPC;
156 
157 		/* search tx sa table */
158 		for (i = 0; i < IXGBE_IPSEC_MAX_SA_COUNT; i++) {
159 			if (!ipsec->tx_tbl[i].used)
160 				return i;
161 		}
162 	}
163 
164 	return -ENOSPC;
165 }
166 
167 /**
168  * ixgbevf_ipsec_find_rx_state - find the state that matches
169  * @ipsec: pointer to IPsec struct
170  * @daddr: inbound address to match
171  * @proto: protocol to match
172  * @spi: SPI to match
173  * @ip4: true if using an IPv4 address
174  *
175  * Returns a pointer to the matching SA state information
176  **/
177 static
178 struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
179 					       __be32 *daddr, u8 proto,
180 					       __be32 spi, bool ip4)
181 {
182 	struct xfrm_state *ret = NULL;
183 	struct rx_sa *rsa;
184 
185 	rcu_read_lock();
186 	hash_for_each_possible_rcu(ipsec->rx_sa_list, rsa, hlist,
187 				   (__force u32)spi) {
188 		if (spi == rsa->xs->id.spi &&
189 		    ((ip4 && *daddr == rsa->xs->id.daddr.a4) ||
190 		      (!ip4 && !memcmp(daddr, &rsa->xs->id.daddr.a6,
191 				       sizeof(rsa->xs->id.daddr.a6)))) &&
192 		    proto == rsa->xs->id.proto) {
193 			ret = rsa->xs;
194 			xfrm_state_hold(ret);
195 			break;
196 		}
197 	}
198 	rcu_read_unlock();
199 	return ret;
200 }
201 
202 /**
203  * ixgbevf_ipsec_parse_proto_keys - find the key and salt based on the protocol
204  * @xs: pointer to xfrm_state struct
205  * @mykey: pointer to key array to populate
206  * @mysalt: pointer to salt value to populate
207  *
208  * This copies the protocol keys and salt to our own data tables.  The
209  * 82599 family only supports the one algorithm.
210  **/
211 static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
212 					  u32 *mykey, u32 *mysalt)
213 {
214 	struct net_device *dev = xs->xso.real_dev;
215 	unsigned char *key_data;
216 	char *alg_name = NULL;
217 	int key_len;
218 
219 	if (!xs->aead) {
220 		netdev_err(dev, "Unsupported IPsec algorithm\n");
221 		return -EINVAL;
222 	}
223 
224 	if (xs->aead->alg_icv_len != IXGBE_IPSEC_AUTH_BITS) {
225 		netdev_err(dev, "IPsec offload requires %d bit authentication\n",
226 			   IXGBE_IPSEC_AUTH_BITS);
227 		return -EINVAL;
228 	}
229 
230 	key_data = &xs->aead->alg_key[0];
231 	key_len = xs->aead->alg_key_len;
232 	alg_name = xs->aead->alg_name;
233 
234 	if (strcmp(alg_name, aes_gcm_name)) {
235 		netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
236 			   aes_gcm_name);
237 		return -EINVAL;
238 	}
239 
240 	/* The key bytes come down in a big endian array of bytes, so
241 	 * we don't need to do any byte swapping.
242 	 * 160 accounts for 16 byte key and 4 byte salt
243 	 */
244 	if (key_len > IXGBE_IPSEC_KEY_BITS) {
245 		*mysalt = ((u32 *)key_data)[4];
246 	} else if (key_len == IXGBE_IPSEC_KEY_BITS) {
247 		*mysalt = 0;
248 	} else {
249 		netdev_err(dev, "IPsec hw offload only supports keys up to 128 bits with a 32 bit salt\n");
250 		return -EINVAL;
251 	}
252 	memcpy(mykey, key_data, 16);
253 
254 	return 0;
255 }
256 
257 /**
258  * ixgbevf_ipsec_add_sa - program device with a security association
259  * @xs: pointer to transformer state struct
260  **/
261 static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
262 {
263 	struct net_device *dev = xs->xso.real_dev;
264 	struct ixgbevf_adapter *adapter;
265 	struct ixgbevf_ipsec *ipsec;
266 	u16 sa_idx;
267 	int ret;
268 
269 	adapter = netdev_priv(dev);
270 	ipsec = adapter->ipsec;
271 
272 	if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
273 		netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
274 			   xs->id.proto);
275 		return -EINVAL;
276 	}
277 
278 	if (xs->props.mode != XFRM_MODE_TRANSPORT) {
279 		netdev_err(dev, "Unsupported mode for ipsec offload\n");
280 		return -EINVAL;
281 	}
282 
283 	if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
284 		netdev_err(dev, "Unsupported ipsec offload type\n");
285 		return -EINVAL;
286 	}
287 
288 	if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
289 		struct rx_sa rsa;
290 
291 		if (xs->calg) {
292 			netdev_err(dev, "Compression offload not supported\n");
293 			return -EINVAL;
294 		}
295 
296 		/* find the first unused index */
297 		ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
298 		if (ret < 0) {
299 			netdev_err(dev, "No space for SA in Rx table!\n");
300 			return ret;
301 		}
302 		sa_idx = (u16)ret;
303 
304 		memset(&rsa, 0, sizeof(rsa));
305 		rsa.used = true;
306 		rsa.xs = xs;
307 
308 		if (rsa.xs->id.proto & IPPROTO_ESP)
309 			rsa.decrypt = xs->ealg || xs->aead;
310 
311 		/* get the key and salt */
312 		ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
313 		if (ret) {
314 			netdev_err(dev, "Failed to get key data for Rx SA table\n");
315 			return ret;
316 		}
317 
318 		/* get ip for rx sa table */
319 		if (xs->props.family == AF_INET6)
320 			memcpy(rsa.ipaddr, &xs->id.daddr.a6, 16);
321 		else
322 			memcpy(&rsa.ipaddr[3], &xs->id.daddr.a4, 4);
323 
324 		rsa.mode = IXGBE_RXMOD_VALID;
325 		if (rsa.xs->id.proto & IPPROTO_ESP)
326 			rsa.mode |= IXGBE_RXMOD_PROTO_ESP;
327 		if (rsa.decrypt)
328 			rsa.mode |= IXGBE_RXMOD_DECRYPT;
329 		if (rsa.xs->props.family == AF_INET6)
330 			rsa.mode |= IXGBE_RXMOD_IPV6;
331 
332 		ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
333 		if (ret < 0)
334 			return ret;
335 		rsa.pfsa = ret;
336 
337 		/* the preparations worked, so save the info */
338 		memcpy(&ipsec->rx_tbl[sa_idx], &rsa, sizeof(rsa));
339 
340 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_RX_INDEX;
341 
342 		ipsec->num_rx_sa++;
343 
344 		/* hash the new entry for faster search in Rx path */
345 		hash_add_rcu(ipsec->rx_sa_list, &ipsec->rx_tbl[sa_idx].hlist,
346 			     (__force u32)rsa.xs->id.spi);
347 	} else {
348 		struct tx_sa tsa;
349 
350 		/* find the first unused index */
351 		ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
352 		if (ret < 0) {
353 			netdev_err(dev, "No space for SA in Tx table\n");
354 			return ret;
355 		}
356 		sa_idx = (u16)ret;
357 
358 		memset(&tsa, 0, sizeof(tsa));
359 		tsa.used = true;
360 		tsa.xs = xs;
361 
362 		if (xs->id.proto & IPPROTO_ESP)
363 			tsa.encrypt = xs->ealg || xs->aead;
364 
365 		ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
366 		if (ret) {
367 			netdev_err(dev, "Failed to get key data for Tx SA table\n");
368 			memset(&tsa, 0, sizeof(tsa));
369 			return ret;
370 		}
371 
372 		ret = ixgbevf_ipsec_set_pf_sa(adapter, xs);
373 		if (ret < 0)
374 			return ret;
375 		tsa.pfsa = ret;
376 
377 		/* the preparations worked, so save the info */
378 		memcpy(&ipsec->tx_tbl[sa_idx], &tsa, sizeof(tsa));
379 
380 		xs->xso.offload_handle = sa_idx + IXGBE_IPSEC_BASE_TX_INDEX;
381 
382 		ipsec->num_tx_sa++;
383 	}
384 
385 	return 0;
386 }
387 
388 /**
389  * ixgbevf_ipsec_del_sa - clear out this specific SA
390  * @xs: pointer to transformer state struct
391  **/
392 static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
393 {
394 	struct net_device *dev = xs->xso.real_dev;
395 	struct ixgbevf_adapter *adapter;
396 	struct ixgbevf_ipsec *ipsec;
397 	u16 sa_idx;
398 
399 	adapter = netdev_priv(dev);
400 	ipsec = adapter->ipsec;
401 
402 	if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
403 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
404 
405 		if (!ipsec->rx_tbl[sa_idx].used) {
406 			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
407 				   sa_idx, xs->xso.offload_handle);
408 			return;
409 		}
410 
411 		ixgbevf_ipsec_del_pf_sa(adapter, ipsec->rx_tbl[sa_idx].pfsa);
412 		hash_del_rcu(&ipsec->rx_tbl[sa_idx].hlist);
413 		memset(&ipsec->rx_tbl[sa_idx], 0, sizeof(struct rx_sa));
414 		ipsec->num_rx_sa--;
415 	} else {
416 		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
417 
418 		if (!ipsec->tx_tbl[sa_idx].used) {
419 			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
420 				   sa_idx, xs->xso.offload_handle);
421 			return;
422 		}
423 
424 		ixgbevf_ipsec_del_pf_sa(adapter, ipsec->tx_tbl[sa_idx].pfsa);
425 		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
426 		ipsec->num_tx_sa--;
427 	}
428 }
429 
430 /**
431  * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload
432  * @skb: current data packet
433  * @xs: pointer to transformer state struct
434  **/
435 static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
436 {
437 	if (xs->props.family == AF_INET) {
438 		/* Offload with IPv4 options is not supported yet */
439 		if (ip_hdr(skb)->ihl != 5)
440 			return false;
441 	} else {
442 		/* Offload with IPv6 extension headers is not support yet */
443 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
444 			return false;
445 	}
446 
447 	return true;
448 }
449 
450 static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = {
451 	.xdo_dev_state_add = ixgbevf_ipsec_add_sa,
452 	.xdo_dev_state_delete = ixgbevf_ipsec_del_sa,
453 	.xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok,
454 };
455 
456 /**
457  * ixgbevf_ipsec_tx - setup Tx flags for IPsec offload
458  * @tx_ring: outgoing context
459  * @first: current data packet
460  * @itd: ipsec Tx data for later use in building context descriptor
461  **/
462 int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
463 		     struct ixgbevf_tx_buffer *first,
464 		     struct ixgbevf_ipsec_tx_data *itd)
465 {
466 	struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
467 	struct ixgbevf_ipsec *ipsec = adapter->ipsec;
468 	struct xfrm_state *xs;
469 	struct sec_path *sp;
470 	struct tx_sa *tsa;
471 	u16 sa_idx;
472 
473 	sp = skb_sec_path(first->skb);
474 	if (unlikely(!sp->len)) {
475 		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
476 			   __func__, sp->len);
477 		return 0;
478 	}
479 
480 	xs = xfrm_input_state(first->skb);
481 	if (unlikely(!xs)) {
482 		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
483 			   __func__, xs);
484 		return 0;
485 	}
486 
487 	sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
488 	if (unlikely(sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
489 		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
490 			   __func__, sa_idx, xs->xso.offload_handle);
491 		return 0;
492 	}
493 
494 	tsa = &ipsec->tx_tbl[sa_idx];
495 	if (unlikely(!tsa->used)) {
496 		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
497 			   __func__, sa_idx);
498 		return 0;
499 	}
500 
501 	itd->pfsa = tsa->pfsa - IXGBE_IPSEC_BASE_TX_INDEX;
502 
503 	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CSUM;
504 
505 	if (xs->id.proto == IPPROTO_ESP) {
506 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
507 			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
508 		if (first->protocol == htons(ETH_P_IP))
509 			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;
510 
511 		/* The actual trailer length is authlen (16 bytes) plus
512 		 * 2 bytes for the proto and the padlen values, plus
513 		 * padlen bytes of padding.  This ends up not the same
514 		 * as the static value found in xs->props.trailer_len (21).
515 		 *
516 		 * ... but if we're doing GSO, don't bother as the stack
517 		 * doesn't add a trailer for those.
518 		 */
519 		if (!skb_is_gso(first->skb)) {
520 			/* The "correct" way to get the auth length would be
521 			 * to use
522 			 *    authlen = crypto_aead_authsize(xs->data);
523 			 * but since we know we only have one size to worry
524 			 * about * we can let the compiler use the constant
525 			 * and save us a few CPU cycles.
526 			 */
527 			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
528 			struct sk_buff *skb = first->skb;
529 			u8 padlen;
530 			int ret;
531 
532 			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
533 					    &padlen, 1);
534 			if (unlikely(ret))
535 				return 0;
536 			itd->trailer_len = authlen + 2 + padlen;
537 		}
538 	}
539 	if (tsa->encrypt)
540 		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;
541 
542 	return 1;
543 }
544 
545 /**
546  * ixgbevf_ipsec_rx - decode IPsec bits from Rx descriptor
547  * @rx_ring: receiving ring
548  * @rx_desc: receive data descriptor
549  * @skb: current data packet
550  *
551  * Determine if there was an IPsec encapsulation noticed, and if so set up
552  * the resulting status for later in the receive stack.
553  **/
554 void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring,
555 		      union ixgbe_adv_rx_desc *rx_desc,
556 		      struct sk_buff *skb)
557 {
558 	struct ixgbevf_adapter *adapter = netdev_priv(rx_ring->netdev);
559 	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
560 	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
561 					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
562 	struct ixgbevf_ipsec *ipsec = adapter->ipsec;
563 	struct xfrm_offload *xo = NULL;
564 	struct xfrm_state *xs = NULL;
565 	struct ipv6hdr *ip6 = NULL;
566 	struct iphdr *ip4 = NULL;
567 	struct sec_path *sp;
568 	void *daddr;
569 	__be32 spi;
570 	u8 *c_hdr;
571 	u8 proto;
572 
573 	/* Find the IP and crypto headers in the data.
574 	 * We can assume no VLAN header in the way, b/c the
575 	 * hw won't recognize the IPsec packet and anyway the
576 	 * currently VLAN device doesn't support xfrm offload.
577 	 */
578 	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
579 		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
580 		daddr = &ip4->daddr;
581 		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
582 	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
583 		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
584 		daddr = &ip6->daddr;
585 		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
586 	} else {
587 		return;
588 	}
589 
590 	switch (pkt_info & ipsec_pkt_types) {
591 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
592 		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
593 		proto = IPPROTO_AH;
594 		break;
595 	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
596 		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
597 		proto = IPPROTO_ESP;
598 		break;
599 	default:
600 		return;
601 	}
602 
603 	xs = ixgbevf_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
604 	if (unlikely(!xs))
605 		return;
606 
607 	sp = secpath_set(skb);
608 	if (unlikely(!sp))
609 		return;
610 
611 	sp->xvec[sp->len++] = xs;
612 	sp->olen++;
613 	xo = xfrm_offload(skb);
614 	xo->flags = CRYPTO_DONE;
615 	xo->status = CRYPTO_SUCCESS;
616 
617 	adapter->rx_ipsec++;
618 }
619 
620 /**
621  * ixgbevf_init_ipsec_offload - initialize registers for IPsec operation
622  * @adapter: board private structure
623  **/
624 void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter)
625 {
626 	struct ixgbevf_ipsec *ipsec;
627 	size_t size;
628 
629 	switch (adapter->hw.api_version) {
630 	case ixgbe_mbox_api_14:
631 	case ixgbe_mbox_api_15:
632 		break;
633 	default:
634 		return;
635 	}
636 
637 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
638 	if (!ipsec)
639 		goto err1;
640 	hash_init(ipsec->rx_sa_list);
641 
642 	size = sizeof(struct rx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
643 	ipsec->rx_tbl = kzalloc(size, GFP_KERNEL);
644 	if (!ipsec->rx_tbl)
645 		goto err2;
646 
647 	size = sizeof(struct tx_sa) * IXGBE_IPSEC_MAX_SA_COUNT;
648 	ipsec->tx_tbl = kzalloc(size, GFP_KERNEL);
649 	if (!ipsec->tx_tbl)
650 		goto err2;
651 
652 	ipsec->num_rx_sa = 0;
653 	ipsec->num_tx_sa = 0;
654 
655 	adapter->ipsec = ipsec;
656 
657 	adapter->netdev->xfrmdev_ops = &ixgbevf_xfrmdev_ops;
658 
659 #define IXGBEVF_ESP_FEATURES	(NETIF_F_HW_ESP | \
660 				 NETIF_F_HW_ESP_TX_CSUM | \
661 				 NETIF_F_GSO_ESP)
662 
663 	adapter->netdev->features |= IXGBEVF_ESP_FEATURES;
664 	adapter->netdev->hw_enc_features |= IXGBEVF_ESP_FEATURES;
665 
666 	return;
667 
668 err2:
669 	kfree(ipsec->rx_tbl);
670 	kfree(ipsec->tx_tbl);
671 	kfree(ipsec);
672 err1:
673 	netdev_err(adapter->netdev, "Unable to allocate memory for SA tables");
674 }
675 
676 /**
677  * ixgbevf_stop_ipsec_offload - tear down the IPsec offload
678  * @adapter: board private structure
679  **/
680 void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter)
681 {
682 	struct ixgbevf_ipsec *ipsec = adapter->ipsec;
683 
684 	adapter->ipsec = NULL;
685 	if (ipsec) {
686 		kfree(ipsec->rx_tbl);
687 		kfree(ipsec->tx_tbl);
688 		kfree(ipsec);
689 	}
690 }
691