1547eede0SIlan Tayari /*
2547eede0SIlan Tayari  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3547eede0SIlan Tayari  *
4547eede0SIlan Tayari  * This software is available to you under a choice of one of two
5547eede0SIlan Tayari  * licenses.  You may choose to be licensed under the terms of the GNU
6547eede0SIlan Tayari  * General Public License (GPL) Version 2, available from the file
7547eede0SIlan Tayari  * COPYING in the main directory of this source tree, or the
8547eede0SIlan Tayari  * OpenIB.org BSD license below:
9547eede0SIlan Tayari  *
10547eede0SIlan Tayari  *     Redistribution and use in source and binary forms, with or
11547eede0SIlan Tayari  *     without modification, are permitted provided that the following
12547eede0SIlan Tayari  *     conditions are met:
13547eede0SIlan Tayari  *
14547eede0SIlan Tayari  *      - Redistributions of source code must retain the above
15547eede0SIlan Tayari  *        copyright notice, this list of conditions and the following
16547eede0SIlan Tayari  *        disclaimer.
17547eede0SIlan Tayari  *
18547eede0SIlan Tayari  *      - Redistributions in binary form must reproduce the above
19547eede0SIlan Tayari  *        copyright notice, this list of conditions and the following
20547eede0SIlan Tayari  *        disclaimer in the documentation and/or other materials
21547eede0SIlan Tayari  *        provided with the distribution.
22547eede0SIlan Tayari  *
23547eede0SIlan Tayari  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24547eede0SIlan Tayari  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25547eede0SIlan Tayari  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26547eede0SIlan Tayari  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27547eede0SIlan Tayari  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28547eede0SIlan Tayari  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29547eede0SIlan Tayari  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30547eede0SIlan Tayari  * SOFTWARE.
31547eede0SIlan Tayari  *
32547eede0SIlan Tayari  */
33547eede0SIlan Tayari 
34547eede0SIlan Tayari #include <crypto/internal/geniv.h>
35547eede0SIlan Tayari #include <crypto/aead.h>
36547eede0SIlan Tayari #include <linux/inetdevice.h>
37547eede0SIlan Tayari #include <linux/netdevice.h>
384c24272bSLeon Romanovsky #include <net/netevent.h>
39547eede0SIlan Tayari 
40547eede0SIlan Tayari #include "en.h"
418efd7b17SLeon Romanovsky #include "eswitch.h"
42c6e3b421SLeon Romanovsky #include "ipsec.h"
43c6e3b421SLeon Romanovsky #include "ipsec_rxtx.h"
44f5c5abc4SJianbo Liu #include "en_rep.h"
45547eede0SIlan Tayari 
46b2f7b01dSLeon Romanovsky #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
474c24272bSLeon Romanovsky #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
48b2f7b01dSLeon Romanovsky 
to_ipsec_sa_entry(struct xfrm_state * x)4975ef3f55SAviad Yehezkel static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
5075ef3f55SAviad Yehezkel {
51021a429bSLeon Romanovsky 	return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
5275ef3f55SAviad Yehezkel }
5375ef3f55SAviad Yehezkel 
to_ipsec_pol_entry(struct xfrm_policy * x)54a5b8ca94SLeon Romanovsky static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
55a5b8ca94SLeon Romanovsky {
56a5b8ca94SLeon Romanovsky 	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
57a5b8ca94SLeon Romanovsky }
58a5b8ca94SLeon Romanovsky 
mlx5e_ipsec_handle_tx_limit(struct work_struct * _work)59b2f7b01dSLeon Romanovsky static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
60b2f7b01dSLeon Romanovsky {
61b2f7b01dSLeon Romanovsky 	struct mlx5e_ipsec_dwork *dwork =
62b2f7b01dSLeon Romanovsky 		container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
63b2f7b01dSLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
64b2f7b01dSLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
65b2f7b01dSLeon Romanovsky 
66c75b9425SLeon Romanovsky 	if (sa_entry->attrs.drop)
67c75b9425SLeon Romanovsky 		return;
68c75b9425SLeon Romanovsky 
69c75b9425SLeon Romanovsky 	spin_lock_bh(&x->lock);
70b2f7b01dSLeon Romanovsky 	xfrm_state_check_expire(x);
71b2f7b01dSLeon Romanovsky 	if (x->km.state == XFRM_STATE_EXPIRED) {
72b2f7b01dSLeon Romanovsky 		sa_entry->attrs.drop = true;
73c75b9425SLeon Romanovsky 		spin_unlock_bh(&x->lock);
74b2f7b01dSLeon Romanovsky 
75c75b9425SLeon Romanovsky 		mlx5e_accel_ipsec_fs_modify(sa_entry);
76b2f7b01dSLeon Romanovsky 		return;
77c75b9425SLeon Romanovsky 	}
78c75b9425SLeon Romanovsky 	spin_unlock_bh(&x->lock);
79b2f7b01dSLeon Romanovsky 
80b2f7b01dSLeon Romanovsky 	queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
81b2f7b01dSLeon Romanovsky 			   MLX5_IPSEC_RESCHED);
82b2f7b01dSLeon Romanovsky }
83b2f7b01dSLeon Romanovsky 
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)84cb010083SAviad Yehezkel static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
85cb010083SAviad Yehezkel {
867db21ef4SLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
872d64663cSRaed Salem 	u32 seq_bottom = 0;
887db21ef4SLeon Romanovsky 	u32 esn, esn_msb;
89cb010083SAviad Yehezkel 	u8 overlap;
90cb010083SAviad Yehezkel 
917db21ef4SLeon Romanovsky 	switch (x->xso.type) {
927db21ef4SLeon Romanovsky 	case XFRM_DEV_OFFLOAD_PACKET:
937db21ef4SLeon Romanovsky 		switch (x->xso.dir) {
947db21ef4SLeon Romanovsky 		case XFRM_DEV_OFFLOAD_IN:
957db21ef4SLeon Romanovsky 			esn = x->replay_esn->seq;
967db21ef4SLeon Romanovsky 			esn_msb = x->replay_esn->seq_hi;
977db21ef4SLeon Romanovsky 			break;
987db21ef4SLeon Romanovsky 		case XFRM_DEV_OFFLOAD_OUT:
997db21ef4SLeon Romanovsky 			esn = x->replay_esn->oseq;
1007db21ef4SLeon Romanovsky 			esn_msb = x->replay_esn->oseq_hi;
1017db21ef4SLeon Romanovsky 			break;
1027db21ef4SLeon Romanovsky 		default:
1037db21ef4SLeon Romanovsky 			WARN_ON(true);
1047db21ef4SLeon Romanovsky 			return false;
1057db21ef4SLeon Romanovsky 		}
1067db21ef4SLeon Romanovsky 		break;
1077db21ef4SLeon Romanovsky 	case XFRM_DEV_OFFLOAD_CRYPTO:
1087db21ef4SLeon Romanovsky 		/* Already parsed by XFRM core */
1097db21ef4SLeon Romanovsky 		esn = x->replay_esn->seq;
1107db21ef4SLeon Romanovsky 		break;
1117db21ef4SLeon Romanovsky 	default:
1127db21ef4SLeon Romanovsky 		WARN_ON(true);
1137db21ef4SLeon Romanovsky 		return false;
1147db21ef4SLeon Romanovsky 	}
1152d64663cSRaed Salem 
116cb010083SAviad Yehezkel 	overlap = sa_entry->esn_state.overlap;
117cb010083SAviad Yehezkel 
1187db21ef4SLeon Romanovsky 	if (esn >= x->replay_esn->replay_window)
1197db21ef4SLeon Romanovsky 		seq_bottom = esn - x->replay_esn->replay_window + 1;
1207db21ef4SLeon Romanovsky 
1217db21ef4SLeon Romanovsky 	if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
1227db21ef4SLeon Romanovsky 		esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
1237db21ef4SLeon Romanovsky 
12480299a1cSLeon Romanovsky 	if (sa_entry->esn_state.esn_msb)
1257db21ef4SLeon Romanovsky 		sa_entry->esn_state.esn = esn;
12680299a1cSLeon Romanovsky 	else
12780299a1cSLeon Romanovsky 		/* According to RFC4303, section "3.3.3. Sequence Number Generation",
12880299a1cSLeon Romanovsky 		 * the first packet sent using a given SA will contain a sequence
12980299a1cSLeon Romanovsky 		 * number of 1.
13080299a1cSLeon Romanovsky 		 */
13180299a1cSLeon Romanovsky 		sa_entry->esn_state.esn = max_t(u32, esn, 1);
1327db21ef4SLeon Romanovsky 	sa_entry->esn_state.esn_msb = esn_msb;
133cb010083SAviad Yehezkel 
134cb010083SAviad Yehezkel 	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
135cb010083SAviad Yehezkel 		sa_entry->esn_state.overlap = 0;
136cb010083SAviad Yehezkel 		return true;
137cb010083SAviad Yehezkel 	} else if (unlikely(!overlap &&
138cb010083SAviad Yehezkel 			    (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
139cb010083SAviad Yehezkel 		sa_entry->esn_state.overlap = 1;
140cb010083SAviad Yehezkel 		return true;
141cb010083SAviad Yehezkel 	}
142cb010083SAviad Yehezkel 
143cb010083SAviad Yehezkel 	return false;
144cb010083SAviad Yehezkel }
145cb010083SAviad Yehezkel 
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)1461ed78fc0SLeon Romanovsky static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
1471ed78fc0SLeon Romanovsky 				    struct mlx5_accel_esp_xfrm_attrs *attrs)
1481ed78fc0SLeon Romanovsky {
1491ed78fc0SLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
150d05971a4SLeon Romanovsky 	s64 start_value, n;
1511ed78fc0SLeon Romanovsky 
152d05971a4SLeon Romanovsky 	attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
153d05971a4SLeon Romanovsky 	attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
1541ed78fc0SLeon Romanovsky 	if (x->lft.soft_packet_limit == XFRM_INF)
1551ed78fc0SLeon Romanovsky 		return;
1561ed78fc0SLeon Romanovsky 
157d05971a4SLeon Romanovsky 	/* Compute hard limit initial value and number of rounds.
1581ed78fc0SLeon Romanovsky 	 *
159d05971a4SLeon Romanovsky 	 * The counting pattern of hardware counter goes:
160d05971a4SLeon Romanovsky 	 *                value  -> 2^31-1
161d05971a4SLeon Romanovsky 	 *      2^31  | (2^31-1) -> 2^31-1
162d05971a4SLeon Romanovsky 	 *      2^31  | (2^31-1) -> 2^31-1
163d05971a4SLeon Romanovsky 	 *      [..]
164d05971a4SLeon Romanovsky 	 *      2^31  | (2^31-1) -> 0
165d05971a4SLeon Romanovsky 	 *
166d05971a4SLeon Romanovsky 	 * The pattern is created by using an ASO operation to atomically set
167d05971a4SLeon Romanovsky 	 * bit 31 after the down counter clears bit 31. This is effectively an
168d05971a4SLeon Romanovsky 	 * atomic addition of 2**31 to the counter.
169d05971a4SLeon Romanovsky 	 *
170d05971a4SLeon Romanovsky 	 * We wish to configure the counter, within the above pattern, so that
171d05971a4SLeon Romanovsky 	 * when it reaches 0, it has hit the hard limit. This is defined by this
172d05971a4SLeon Romanovsky 	 * system of equations:
173d05971a4SLeon Romanovsky 	 *
174d05971a4SLeon Romanovsky 	 *      hard_limit == start_value + n * 2^31
175d05971a4SLeon Romanovsky 	 *      n >= 0
176d05971a4SLeon Romanovsky 	 *      start_value < 2^32, start_value >= 0
177d05971a4SLeon Romanovsky 	 *
178d05971a4SLeon Romanovsky 	 * These equations are not single-solution, there are often two choices:
179d05971a4SLeon Romanovsky 	 *      hard_limit == start_value + n * 2^31
180d05971a4SLeon Romanovsky 	 *      hard_limit == (start_value+2^31) + (n-1) * 2^31
181d05971a4SLeon Romanovsky 	 *
182d05971a4SLeon Romanovsky 	 * The algorithm selects the solution that keeps the counter value
183d05971a4SLeon Romanovsky 	 * above 2^31 until the final iteration.
1841ed78fc0SLeon Romanovsky 	 */
185d05971a4SLeon Romanovsky 
186d05971a4SLeon Romanovsky 	/* Start by estimating n and compute start_value */
187d05971a4SLeon Romanovsky 	n = attrs->lft.hard_packet_limit / BIT_ULL(31);
188d05971a4SLeon Romanovsky 	start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
189d05971a4SLeon Romanovsky 
190d05971a4SLeon Romanovsky 	/* Choose the best of the two solutions: */
191d05971a4SLeon Romanovsky 	if (n >= 1)
192d05971a4SLeon Romanovsky 		n -= 1;
193d05971a4SLeon Romanovsky 
194d05971a4SLeon Romanovsky 	/* Computed values solve the system of equations: */
195d05971a4SLeon Romanovsky 	start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
196d05971a4SLeon Romanovsky 
197d05971a4SLeon Romanovsky 	/* The best solution means: when there are multiple iterations we must
198d05971a4SLeon Romanovsky 	 * start above 2^31 and count down to 2**31 to get the interrupt.
199d05971a4SLeon Romanovsky 	 */
200d05971a4SLeon Romanovsky 	attrs->lft.hard_packet_limit = lower_32_bits(start_value);
201d05971a4SLeon Romanovsky 	attrs->lft.numb_rounds_hard = (u64)n;
202d05971a4SLeon Romanovsky 
203d05971a4SLeon Romanovsky 	/* Compute soft limit initial value and number of rounds.
204d05971a4SLeon Romanovsky 	 *
205d05971a4SLeon Romanovsky 	 * The soft_limit is achieved by adjusting the counter's
206d05971a4SLeon Romanovsky 	 * interrupt_value. This is embedded in the counting pattern created by
207d05971a4SLeon Romanovsky 	 * hard packet calculations above.
208d05971a4SLeon Romanovsky 	 *
209d05971a4SLeon Romanovsky 	 * We wish to compute the interrupt_value for the soft_limit. This is
210d05971a4SLeon Romanovsky 	 * defined by this system of equations:
211d05971a4SLeon Romanovsky 	 *
212d05971a4SLeon Romanovsky 	 *      soft_limit == start_value - soft_value + n * 2^31
213d05971a4SLeon Romanovsky 	 *      n >= 0
214d05971a4SLeon Romanovsky 	 *      soft_value < 2^32, soft_value >= 0
215d05971a4SLeon Romanovsky 	 *      for n == 0 start_value > soft_value
216d05971a4SLeon Romanovsky 	 *
217d05971a4SLeon Romanovsky 	 * As with compute_hard_n_value() the equations are not single-solution.
218d05971a4SLeon Romanovsky 	 * The algorithm selects the solution that has:
219d05971a4SLeon Romanovsky 	 *      2^30 <= soft_limit < 2^31 + 2^30
220d05971a4SLeon Romanovsky 	 * for the interior iterations, which guarantees a large guard band
221d05971a4SLeon Romanovsky 	 * around the counter hard limit and next interrupt.
222d05971a4SLeon Romanovsky 	 */
223d05971a4SLeon Romanovsky 
224d05971a4SLeon Romanovsky 	/* Start by estimating n and compute soft_value */
225d05971a4SLeon Romanovsky 	n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
226d05971a4SLeon Romanovsky 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
227d05971a4SLeon Romanovsky 		      x->lft.soft_packet_limit;
228d05971a4SLeon Romanovsky 
229d05971a4SLeon Romanovsky 	/* Compare against constraints and adjust n */
230d05971a4SLeon Romanovsky 	if (n < 0)
231d05971a4SLeon Romanovsky 		n = 0;
232d05971a4SLeon Romanovsky 	else if (start_value >= BIT_ULL(32))
233d05971a4SLeon Romanovsky 		n -= 1;
234d05971a4SLeon Romanovsky 	else if (start_value < 0)
235d05971a4SLeon Romanovsky 		n += 1;
236d05971a4SLeon Romanovsky 
237d05971a4SLeon Romanovsky 	/* Choose the best of the two solutions: */
238d05971a4SLeon Romanovsky 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
239d05971a4SLeon Romanovsky 	if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
240d05971a4SLeon Romanovsky 		n += 1;
241d05971a4SLeon Romanovsky 
242d05971a4SLeon Romanovsky 	/* Note that the upper limit of soft_value happens naturally because we
243d05971a4SLeon Romanovsky 	 * always select the lowest soft_value.
244d05971a4SLeon Romanovsky 	 */
245d05971a4SLeon Romanovsky 
246d05971a4SLeon Romanovsky 	/* Computed values solve the system of equations: */
247d05971a4SLeon Romanovsky 	start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
248d05971a4SLeon Romanovsky 
249d05971a4SLeon Romanovsky 	/* The best solution means: when there are multiple iterations we must
250d05971a4SLeon Romanovsky 	 * not fall below 2^30 as that would get too close to the false
251d05971a4SLeon Romanovsky 	 * hard_limit and when we reach an interior iteration for soft_limit it
252d05971a4SLeon Romanovsky 	 * has to be far away from 2**32-1 which is the counter reset point
253d05971a4SLeon Romanovsky 	 * after the +2^31 to accommodate latency.
254d05971a4SLeon Romanovsky 	 */
255d05971a4SLeon Romanovsky 	attrs->lft.soft_packet_limit = lower_32_bits(start_value);
256d05971a4SLeon Romanovsky 	attrs->lft.numb_rounds_soft = (u64)n;
2571ed78fc0SLeon Romanovsky }
2581ed78fc0SLeon Romanovsky 
mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)25937a417caSLeon Romanovsky static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
26037a417caSLeon Romanovsky 				  struct mlx5_accel_esp_xfrm_attrs *attrs)
26137a417caSLeon Romanovsky {
26237a417caSLeon Romanovsky 	struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
26337a417caSLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
26437a417caSLeon Romanovsky 	struct net_device *netdev;
26537a417caSLeon Romanovsky 	struct neighbour *n;
26637a417caSLeon Romanovsky 	u8 addr[ETH_ALEN];
26745fd01f2SLeon Romanovsky 	const void *pkey;
26845fd01f2SLeon Romanovsky 	u8 *dst, *src;
26937a417caSLeon Romanovsky 
2704c24272bSLeon Romanovsky 	if (attrs->mode != XFRM_MODE_TUNNEL ||
27137a417caSLeon Romanovsky 	    attrs->type != XFRM_DEV_OFFLOAD_PACKET)
27237a417caSLeon Romanovsky 		return;
27337a417caSLeon Romanovsky 
27437a417caSLeon Romanovsky 	netdev = x->xso.real_dev;
27537a417caSLeon Romanovsky 
27637a417caSLeon Romanovsky 	mlx5_query_mac_address(mdev, addr);
27737a417caSLeon Romanovsky 	switch (attrs->dir) {
27837a417caSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_IN:
27945fd01f2SLeon Romanovsky 		src = attrs->dmac;
28045fd01f2SLeon Romanovsky 		dst = attrs->smac;
28145fd01f2SLeon Romanovsky 		pkey = &attrs->saddr.a4;
28237a417caSLeon Romanovsky 		break;
283efbd31c4SLeon Romanovsky 	case XFRM_DEV_OFFLOAD_OUT:
28445fd01f2SLeon Romanovsky 		src = attrs->smac;
28545fd01f2SLeon Romanovsky 		dst = attrs->dmac;
28645fd01f2SLeon Romanovsky 		pkey = &attrs->daddr.a4;
287efbd31c4SLeon Romanovsky 		break;
28837a417caSLeon Romanovsky 	default:
28937a417caSLeon Romanovsky 		return;
29037a417caSLeon Romanovsky 	}
29145fd01f2SLeon Romanovsky 
29245fd01f2SLeon Romanovsky 	ether_addr_copy(src, addr);
29345fd01f2SLeon Romanovsky 	n = neigh_lookup(&arp_tbl, pkey, netdev);
29445fd01f2SLeon Romanovsky 	if (!n) {
29545fd01f2SLeon Romanovsky 		n = neigh_create(&arp_tbl, pkey, netdev);
29645fd01f2SLeon Romanovsky 		if (IS_ERR(n))
29745fd01f2SLeon Romanovsky 			return;
29845fd01f2SLeon Romanovsky 		neigh_event_send(n, NULL);
29945fd01f2SLeon Romanovsky 		attrs->drop = true;
30045fd01f2SLeon Romanovsky 	} else {
30145fd01f2SLeon Romanovsky 		neigh_ha_snapshot(addr, n, netdev);
30245fd01f2SLeon Romanovsky 		ether_addr_copy(dst, addr);
30345fd01f2SLeon Romanovsky 	}
30437a417caSLeon Romanovsky 	neigh_release(n);
30537a417caSLeon Romanovsky }
30637a417caSLeon Romanovsky 
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)307cee137a6SLeon Romanovsky void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
308d6c4f029SAviad Yehezkel 					struct mlx5_accel_esp_xfrm_attrs *attrs)
309547eede0SIlan Tayari {
310547eede0SIlan Tayari 	struct xfrm_state *x = sa_entry->x;
3116cd2126aSLeon Romanovsky 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
312547eede0SIlan Tayari 	struct aead_geniv_ctx *geniv_ctx;
313547eede0SIlan Tayari 	struct crypto_aead *aead;
314d6c4f029SAviad Yehezkel 	unsigned int crypto_data_len, key_len;
315547eede0SIlan Tayari 	int ivsize;
316547eede0SIlan Tayari 
317d6c4f029SAviad Yehezkel 	memset(attrs, 0, sizeof(*attrs));
318547eede0SIlan Tayari 
319d6c4f029SAviad Yehezkel 	/* key */
320547eede0SIlan Tayari 	crypto_data_len = (x->aead->alg_key_len + 7) / 8;
321547eede0SIlan Tayari 	key_len = crypto_data_len - 4; /* 4 bytes salt at end */
322d6c4f029SAviad Yehezkel 
323d6c4f029SAviad Yehezkel 	memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
324d6c4f029SAviad Yehezkel 	aes_gcm->key_len = key_len * 8;
325d6c4f029SAviad Yehezkel 
326d6c4f029SAviad Yehezkel 	/* salt and seq_iv */
327547eede0SIlan Tayari 	aead = x->data;
328547eede0SIlan Tayari 	geniv_ctx = crypto_aead_ctx(aead);
329547eede0SIlan Tayari 	ivsize = crypto_aead_ivsize(aead);
330d6c4f029SAviad Yehezkel 	memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
331d6c4f029SAviad Yehezkel 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
332d6c4f029SAviad Yehezkel 	       sizeof(aes_gcm->salt));
333547eede0SIlan Tayari 
3346b5c45e1SLeon Romanovsky 	attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
3356b5c45e1SLeon Romanovsky 
336d6c4f029SAviad Yehezkel 	/* iv len */
337d6c4f029SAviad Yehezkel 	aes_gcm->icv_len = x->aead->alg_icv_len;
338547eede0SIlan Tayari 
339*5fe8c422SLeon Romanovsky 	attrs->dir = x->xso.dir;
340*5fe8c422SLeon Romanovsky 
341cb010083SAviad Yehezkel 	/* esn */
342f4979e26SLeon Romanovsky 	if (x->props.flags & XFRM_STATE_ESN) {
3437db21ef4SLeon Romanovsky 		attrs->replay_esn.trigger = true;
3447db21ef4SLeon Romanovsky 		attrs->replay_esn.esn = sa_entry->esn_state.esn;
3457db21ef4SLeon Romanovsky 		attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
3467db21ef4SLeon Romanovsky 		attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
347*5fe8c422SLeon Romanovsky 		if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
348*5fe8c422SLeon Romanovsky 			goto skip_replay_window;
349*5fe8c422SLeon Romanovsky 
35017e600e4SLeon Romanovsky 		switch (x->replay_esn->replay_window) {
35117e600e4SLeon Romanovsky 		case 32:
35217e600e4SLeon Romanovsky 			attrs->replay_esn.replay_window =
35317e600e4SLeon Romanovsky 				MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
35417e600e4SLeon Romanovsky 			break;
35517e600e4SLeon Romanovsky 		case 64:
35617e600e4SLeon Romanovsky 			attrs->replay_esn.replay_window =
35717e600e4SLeon Romanovsky 				MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
35817e600e4SLeon Romanovsky 			break;
35917e600e4SLeon Romanovsky 		case 128:
36017e600e4SLeon Romanovsky 			attrs->replay_esn.replay_window =
36117e600e4SLeon Romanovsky 				MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
36217e600e4SLeon Romanovsky 			break;
36317e600e4SLeon Romanovsky 		case 256:
36417e600e4SLeon Romanovsky 			attrs->replay_esn.replay_window =
36517e600e4SLeon Romanovsky 				MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
36617e600e4SLeon Romanovsky 			break;
36717e600e4SLeon Romanovsky 		default:
36817e600e4SLeon Romanovsky 			WARN_ON(true);
36917e600e4SLeon Romanovsky 			return;
37017e600e4SLeon Romanovsky 		}
371cb010083SAviad Yehezkel 	}
372cb010083SAviad Yehezkel 
373*5fe8c422SLeon Romanovsky skip_replay_window:
3741dbd51d0SRaed Salem 	/* spi */
3756cd2126aSLeon Romanovsky 	attrs->spi = be32_to_cpu(x->id.spi);
3761dbd51d0SRaed Salem 
3771dbd51d0SRaed Salem 	/* source , destination ips */
3781dbd51d0SRaed Salem 	memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
3791dbd51d0SRaed Salem 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
380e3840530SLeon Romanovsky 	attrs->family = x->props.family;
3818d15f364SLeon Romanovsky 	attrs->type = x->xso.type;
38267212396SLeon Romanovsky 	attrs->reqid = x->props.reqid;
383a7385187SRaed Salem 	attrs->upspec.dport = ntohs(x->sel.dport);
384a7385187SRaed Salem 	attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
385a7385187SRaed Salem 	attrs->upspec.sport = ntohs(x->sel.sport);
386a7385187SRaed Salem 	attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
387a7385187SRaed Salem 	attrs->upspec.proto = x->sel.proto;
3886480a3b6SLeon Romanovsky 	attrs->mode = x->props.mode;
3891ed78fc0SLeon Romanovsky 
3901ed78fc0SLeon Romanovsky 	mlx5e_ipsec_init_limits(sa_entry, attrs);
39137a417caSLeon Romanovsky 	mlx5e_ipsec_init_macs(sa_entry, attrs);
392d6595493SLeon Romanovsky 
393d6595493SLeon Romanovsky 	if (x->encap) {
394d6595493SLeon Romanovsky 		attrs->encap = true;
395d6595493SLeon Romanovsky 		attrs->sport = x->encap->encap_sport;
396d6595493SLeon Romanovsky 		attrs->dport = x->encap->encap_dport;
397d6595493SLeon Romanovsky 	}
398547eede0SIlan Tayari }
399547eede0SIlan Tayari 
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)400902812b8SLeon Romanovsky static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
401902812b8SLeon Romanovsky 				     struct xfrm_state *x,
402902812b8SLeon Romanovsky 				     struct netlink_ext_ack *extack)
403547eede0SIlan Tayari {
404547eede0SIlan Tayari 	if (x->props.aalgo != SADB_AALG_NONE) {
405902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
406547eede0SIlan Tayari 		return -EINVAL;
407547eede0SIlan Tayari 	}
408547eede0SIlan Tayari 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
409902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
410547eede0SIlan Tayari 		return -EINVAL;
411547eede0SIlan Tayari 	}
412547eede0SIlan Tayari 	if (x->props.calgo != SADB_X_CALG_NONE) {
413902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
414547eede0SIlan Tayari 		return -EINVAL;
415547eede0SIlan Tayari 	}
416cb010083SAviad Yehezkel 	if (x->props.flags & XFRM_STATE_ESN &&
417902812b8SLeon Romanovsky 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
418902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
419547eede0SIlan Tayari 		return -EINVAL;
420547eede0SIlan Tayari 	}
421547eede0SIlan Tayari 	if (x->props.family != AF_INET &&
422547eede0SIlan Tayari 	    x->props.family != AF_INET6) {
423902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
424547eede0SIlan Tayari 		return -EINVAL;
425547eede0SIlan Tayari 	}
426547eede0SIlan Tayari 	if (x->id.proto != IPPROTO_ESP) {
427902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
428547eede0SIlan Tayari 		return -EINVAL;
429547eede0SIlan Tayari 	}
430547eede0SIlan Tayari 	if (x->encap) {
431d6595493SLeon Romanovsky 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
432d6595493SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
433547eede0SIlan Tayari 			return -EINVAL;
434547eede0SIlan Tayari 		}
435d6595493SLeon Romanovsky 
436d6595493SLeon Romanovsky 		if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
437d6595493SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
438d6595493SLeon Romanovsky 			return -EINVAL;
439d6595493SLeon Romanovsky 		}
440d6595493SLeon Romanovsky 
441d6595493SLeon Romanovsky 		if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
442d6595493SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
443d6595493SLeon Romanovsky 			return -EINVAL;
444d6595493SLeon Romanovsky 		}
445d6595493SLeon Romanovsky 
446d6595493SLeon Romanovsky 		if (x->props.mode != XFRM_MODE_TRANSPORT) {
447d6595493SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
448d6595493SLeon Romanovsky 			return -EINVAL;
449d6595493SLeon Romanovsky 		}
450d6595493SLeon Romanovsky 	}
451547eede0SIlan Tayari 	if (!x->aead) {
452902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
453547eede0SIlan Tayari 		return -EINVAL;
454547eede0SIlan Tayari 	}
455547eede0SIlan Tayari 	if (x->aead->alg_icv_len != 128) {
456902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
457547eede0SIlan Tayari 		return -EINVAL;
458547eede0SIlan Tayari 	}
459547eede0SIlan Tayari 	if ((x->aead->alg_key_len != 128 + 32) &&
460547eede0SIlan Tayari 	    (x->aead->alg_key_len != 256 + 32)) {
461902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
462547eede0SIlan Tayari 		return -EINVAL;
463547eede0SIlan Tayari 	}
464547eede0SIlan Tayari 	if (x->tfcpad) {
465902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
466547eede0SIlan Tayari 		return -EINVAL;
467547eede0SIlan Tayari 	}
468547eede0SIlan Tayari 	if (!x->geniv) {
469902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
470547eede0SIlan Tayari 		return -EINVAL;
471547eede0SIlan Tayari 	}
472547eede0SIlan Tayari 	if (strcmp(x->geniv, "seqiv")) {
473902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
474547eede0SIlan Tayari 		return -EINVAL;
475547eede0SIlan Tayari 	}
476a7385187SRaed Salem 
477b8c697e1SLeon Romanovsky 	if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
478b8c697e1SLeon Romanovsky 	    x->sel.proto != IPPROTO_TCP) {
479b8c697e1SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
480a7385187SRaed Salem 		return -EINVAL;
481a7385187SRaed Salem 	}
482a7385187SRaed Salem 
483c941da23SLeon Romanovsky 	if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
484c941da23SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
485c941da23SLeon Romanovsky 		return -EINVAL;
486c941da23SLeon Romanovsky 	}
487c941da23SLeon Romanovsky 
48837d244adSLeon Romanovsky 	switch (x->xso.type) {
48937d244adSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_CRYPTO:
490902812b8SLeon Romanovsky 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
491902812b8SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
49262f6eca5SLeon Romanovsky 			return -EINVAL;
49362f6eca5SLeon Romanovsky 		}
49437d244adSLeon Romanovsky 
49537d244adSLeon Romanovsky 		break;
49637d244adSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_PACKET:
497902812b8SLeon Romanovsky 		if (!(mlx5_ipsec_device_caps(mdev) &
49837d244adSLeon Romanovsky 		      MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
499902812b8SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
50037d244adSLeon Romanovsky 			return -EINVAL;
50137d244adSLeon Romanovsky 		}
50237d244adSLeon Romanovsky 
503c941da23SLeon Romanovsky 		if (x->props.mode == XFRM_MODE_TUNNEL &&
504c941da23SLeon Romanovsky 		    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
505c941da23SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
50637d244adSLeon Romanovsky 			return -EINVAL;
50737d244adSLeon Romanovsky 		}
50837d244adSLeon Romanovsky 
509*5fe8c422SLeon Romanovsky 		if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
510*5fe8c422SLeon Romanovsky 		    x->replay_esn->replay_window != 32 &&
511cded6d80SLeon Romanovsky 		    x->replay_esn->replay_window != 64 &&
512cded6d80SLeon Romanovsky 		    x->replay_esn->replay_window != 128 &&
513cded6d80SLeon Romanovsky 		    x->replay_esn->replay_window != 256) {
514902812b8SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
515cded6d80SLeon Romanovsky 			return -EINVAL;
516cded6d80SLeon Romanovsky 		}
51767212396SLeon Romanovsky 
51867212396SLeon Romanovsky 		if (!x->props.reqid) {
519902812b8SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
52067212396SLeon Romanovsky 			return -EINVAL;
52167212396SLeon Romanovsky 		}
5221ed78fc0SLeon Romanovsky 
5231ed78fc0SLeon Romanovsky 		if (x->lft.hard_byte_limit != XFRM_INF ||
5241ed78fc0SLeon Romanovsky 		    x->lft.soft_byte_limit != XFRM_INF) {
525902812b8SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
5261ed78fc0SLeon Romanovsky 			return -EINVAL;
5271ed78fc0SLeon Romanovsky 		}
5281ed78fc0SLeon Romanovsky 
5291ed78fc0SLeon Romanovsky 		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
5301ed78fc0SLeon Romanovsky 		    x->lft.hard_packet_limit != XFRM_INF) {
5311ed78fc0SLeon Romanovsky 			/* XFRM stack doesn't prevent such configuration :(. */
532902812b8SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
5331ed78fc0SLeon Romanovsky 			return -EINVAL;
5341ed78fc0SLeon Romanovsky 		}
5352da961d2SLeon Romanovsky 
5362da961d2SLeon Romanovsky 		if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
5372da961d2SLeon Romanovsky 			NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
5382da961d2SLeon Romanovsky 			return -EINVAL;
5392da961d2SLeon Romanovsky 		}
54037d244adSLeon Romanovsky 		break;
54137d244adSLeon Romanovsky 	default:
542902812b8SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
54337d244adSLeon Romanovsky 		return -EINVAL;
544cded6d80SLeon Romanovsky 	}
545547eede0SIlan Tayari 	return 0;
546547eede0SIlan Tayari }
547547eede0SIlan Tayari 
mlx5e_ipsec_modify_state(struct work_struct * _work)5484562116fSLeon Romanovsky static void mlx5e_ipsec_modify_state(struct work_struct *_work)
549c674df97SLeon Romanovsky {
5504562116fSLeon Romanovsky 	struct mlx5e_ipsec_work *work =
5514562116fSLeon Romanovsky 		container_of(_work, struct mlx5e_ipsec_work, work);
5524562116fSLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
5534562116fSLeon Romanovsky 	struct mlx5_accel_esp_xfrm_attrs *attrs;
554c674df97SLeon Romanovsky 
5554562116fSLeon Romanovsky 	attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
5564562116fSLeon Romanovsky 
5574562116fSLeon Romanovsky 	mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
558c674df97SLeon Romanovsky }
559c674df97SLeon Romanovsky 
mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry * sa_entry)560f4979e26SLeon Romanovsky static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
561f4979e26SLeon Romanovsky {
562f4979e26SLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
563f4979e26SLeon Romanovsky 
564f4979e26SLeon Romanovsky 	if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
565f4979e26SLeon Romanovsky 	    x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
566f4979e26SLeon Romanovsky 		return;
567f4979e26SLeon Romanovsky 
568f4979e26SLeon Romanovsky 	if (x->props.flags & XFRM_STATE_ESN) {
569f4979e26SLeon Romanovsky 		sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
570f4979e26SLeon Romanovsky 		return;
571f4979e26SLeon Romanovsky 	}
572f4979e26SLeon Romanovsky 
573f4979e26SLeon Romanovsky 	sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
574f4979e26SLeon Romanovsky }
575f4979e26SLeon Romanovsky 
mlx5e_ipsec_handle_netdev_event(struct work_struct * _work)5764c24272bSLeon Romanovsky static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
5774c24272bSLeon Romanovsky {
5784c24272bSLeon Romanovsky 	struct mlx5e_ipsec_work *work =
5794c24272bSLeon Romanovsky 		container_of(_work, struct mlx5e_ipsec_work, work);
5804c24272bSLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
5814c24272bSLeon Romanovsky 	struct mlx5e_ipsec_netevent_data *data = work->data;
5824c24272bSLeon Romanovsky 	struct mlx5_accel_esp_xfrm_attrs *attrs;
5834c24272bSLeon Romanovsky 
5844c24272bSLeon Romanovsky 	attrs = &sa_entry->attrs;
5854c24272bSLeon Romanovsky 
5864c24272bSLeon Romanovsky 	switch (attrs->dir) {
5874c24272bSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_IN:
5884c24272bSLeon Romanovsky 		ether_addr_copy(attrs->smac, data->addr);
5894c24272bSLeon Romanovsky 		break;
5904c24272bSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_OUT:
5914c24272bSLeon Romanovsky 		ether_addr_copy(attrs->dmac, data->addr);
5924c24272bSLeon Romanovsky 		break;
5934c24272bSLeon Romanovsky 	default:
5944c24272bSLeon Romanovsky 		WARN_ON_ONCE(true);
5954c24272bSLeon Romanovsky 	}
5964c24272bSLeon Romanovsky 	attrs->drop = false;
5974c24272bSLeon Romanovsky 	mlx5e_accel_ipsec_fs_modify(sa_entry);
5984c24272bSLeon Romanovsky }
5994c24272bSLeon Romanovsky 
mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry * sa_entry)6004562116fSLeon Romanovsky static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
6014562116fSLeon Romanovsky {
6024562116fSLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
6034562116fSLeon Romanovsky 	struct mlx5e_ipsec_work *work;
6044c24272bSLeon Romanovsky 	void *data = NULL;
6054562116fSLeon Romanovsky 
6064562116fSLeon Romanovsky 	switch (x->xso.type) {
6074562116fSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_CRYPTO:
6084562116fSLeon Romanovsky 		if (!(x->props.flags & XFRM_STATE_ESN))
6094562116fSLeon Romanovsky 			return 0;
6104562116fSLeon Romanovsky 		break;
6114c24272bSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_PACKET:
6124c24272bSLeon Romanovsky 		if (x->props.mode != XFRM_MODE_TUNNEL)
6134562116fSLeon Romanovsky 			return 0;
6144c24272bSLeon Romanovsky 		break;
6154c24272bSLeon Romanovsky 	default:
6164c24272bSLeon Romanovsky 		break;
6174562116fSLeon Romanovsky 	}
6184562116fSLeon Romanovsky 
6194562116fSLeon Romanovsky 	work = kzalloc(sizeof(*work), GFP_KERNEL);
6204562116fSLeon Romanovsky 	if (!work)
6214562116fSLeon Romanovsky 		return -ENOMEM;
6224562116fSLeon Romanovsky 
6234c24272bSLeon Romanovsky 	switch (x->xso.type) {
6244c24272bSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_CRYPTO:
6254c24272bSLeon Romanovsky 		data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
6264c24272bSLeon Romanovsky 		if (!data)
6274c24272bSLeon Romanovsky 			goto free_work;
6284562116fSLeon Romanovsky 
6294562116fSLeon Romanovsky 		INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
6304c24272bSLeon Romanovsky 		break;
6314c24272bSLeon Romanovsky 	case XFRM_DEV_OFFLOAD_PACKET:
6324c24272bSLeon Romanovsky 		data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
6334c24272bSLeon Romanovsky 			       GFP_KERNEL);
6344c24272bSLeon Romanovsky 		if (!data)
6354c24272bSLeon Romanovsky 			goto free_work;
6364c24272bSLeon Romanovsky 
6374c24272bSLeon Romanovsky 		INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
6384c24272bSLeon Romanovsky 		break;
6394c24272bSLeon Romanovsky 	default:
6404c24272bSLeon Romanovsky 		break;
6414c24272bSLeon Romanovsky 	}
6424c24272bSLeon Romanovsky 
6434c24272bSLeon Romanovsky 	work->data = data;
6444562116fSLeon Romanovsky 	work->sa_entry = sa_entry;
6454562116fSLeon Romanovsky 	sa_entry->work = work;
6464562116fSLeon Romanovsky 	return 0;
6474c24272bSLeon Romanovsky 
6484c24272bSLeon Romanovsky free_work:
6494c24272bSLeon Romanovsky 	kfree(work);
6504c24272bSLeon Romanovsky 	return -ENOMEM;
6514562116fSLeon Romanovsky }
6524562116fSLeon Romanovsky 
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry * sa_entry)653b2f7b01dSLeon Romanovsky static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
654b2f7b01dSLeon Romanovsky {
655b2f7b01dSLeon Romanovsky 	struct xfrm_state *x = sa_entry->x;
656b2f7b01dSLeon Romanovsky 	struct mlx5e_ipsec_dwork *dwork;
657b2f7b01dSLeon Romanovsky 
658b2f7b01dSLeon Romanovsky 	if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
659b2f7b01dSLeon Romanovsky 		return 0;
660b2f7b01dSLeon Romanovsky 
661b2f7b01dSLeon Romanovsky 	if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
662b2f7b01dSLeon Romanovsky 		return 0;
663b2f7b01dSLeon Romanovsky 
664b2f7b01dSLeon Romanovsky 	if (x->lft.soft_packet_limit == XFRM_INF &&
665b2f7b01dSLeon Romanovsky 	    x->lft.hard_packet_limit == XFRM_INF)
666b2f7b01dSLeon Romanovsky 		return 0;
667b2f7b01dSLeon Romanovsky 
668b2f7b01dSLeon Romanovsky 	dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
669b2f7b01dSLeon Romanovsky 	if (!dwork)
670b2f7b01dSLeon Romanovsky 		return -ENOMEM;
671b2f7b01dSLeon Romanovsky 
672b2f7b01dSLeon Romanovsky 	dwork->sa_entry = sa_entry;
673b2f7b01dSLeon Romanovsky 	INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
674b2f7b01dSLeon Romanovsky 	sa_entry->dwork = dwork;
675b2f7b01dSLeon Romanovsky 	return 0;
676b2f7b01dSLeon Romanovsky }
677b2f7b01dSLeon Romanovsky 
mlx5e_xfrm_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)6787681a4f5SLeon Romanovsky static int mlx5e_xfrm_add_state(struct xfrm_state *x,
6797681a4f5SLeon Romanovsky 				struct netlink_ext_ack *extack)
680547eede0SIlan Tayari {
681547eede0SIlan Tayari 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
682bdfd2d1fSJarod Wilson 	struct net_device *netdev = x->xso.real_dev;
683403b383aSLeon Romanovsky 	struct mlx5e_ipsec *ipsec;
684547eede0SIlan Tayari 	struct mlx5e_priv *priv;
685aa8bd0c9SRaed Salem 	gfp_t gfp;
686547eede0SIlan Tayari 	int err;
687547eede0SIlan Tayari 
688547eede0SIlan Tayari 	priv = netdev_priv(netdev);
689021a429bSLeon Romanovsky 	if (!priv->ipsec)
690021a429bSLeon Romanovsky 		return -EOPNOTSUPP;
691547eede0SIlan Tayari 
692403b383aSLeon Romanovsky 	ipsec = priv->ipsec;
693aa8bd0c9SRaed Salem 	gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
694aa8bd0c9SRaed Salem 	sa_entry = kzalloc(sizeof(*sa_entry), gfp);
695902812b8SLeon Romanovsky 	if (!sa_entry)
696902812b8SLeon Romanovsky 		return -ENOMEM;
697547eede0SIlan Tayari 
698547eede0SIlan Tayari 	sa_entry->x = x;
699403b383aSLeon Romanovsky 	sa_entry->ipsec = ipsec;
700aa8bd0c9SRaed Salem 	/* Check if this SA is originated from acquire flow temporary SA */
701aa8bd0c9SRaed Salem 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
702aa8bd0c9SRaed Salem 		goto out;
703aa8bd0c9SRaed Salem 
704aa8bd0c9SRaed Salem 	err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
705aa8bd0c9SRaed Salem 	if (err)
706aa8bd0c9SRaed Salem 		goto err_xfrm;
707547eede0SIlan Tayari 
7088efd7b17SLeon Romanovsky 	if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
7098efd7b17SLeon Romanovsky 		err = -EBUSY;
7108efd7b17SLeon Romanovsky 		goto err_xfrm;
7118efd7b17SLeon Romanovsky 	}
7128efd7b17SLeon Romanovsky 
713cb010083SAviad Yehezkel 	/* check esn */
714f4979e26SLeon Romanovsky 	if (x->props.flags & XFRM_STATE_ESN)
715cb010083SAviad Yehezkel 		mlx5e_ipsec_update_esn_state(sa_entry);
716cb010083SAviad Yehezkel 
717b73e6728SLeon Romanovsky 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
7184562116fSLeon Romanovsky 
7194562116fSLeon Romanovsky 	err = mlx5_ipsec_create_work(sa_entry);
7204562116fSLeon Romanovsky 	if (err)
7218efd7b17SLeon Romanovsky 		goto unblock_ipsec;
7224562116fSLeon Romanovsky 
723b2f7b01dSLeon Romanovsky 	err = mlx5e_ipsec_create_dwork(sa_entry);
724b2f7b01dSLeon Romanovsky 	if (err)
725b2f7b01dSLeon Romanovsky 		goto release_work;
726b2f7b01dSLeon Romanovsky 
727d6c4f029SAviad Yehezkel 	/* create hw context */
728b73e6728SLeon Romanovsky 	err = mlx5_ipsec_create_sa_ctx(sa_entry);
729b73e6728SLeon Romanovsky 	if (err)
730b2f7b01dSLeon Romanovsky 		goto release_dwork;
731547eede0SIlan Tayari 
732c7049ca6SLeon Romanovsky 	err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
7335e466345SHuy Nguyen 	if (err)
7345e466345SHuy Nguyen 		goto err_hw_ctx;
7355e466345SHuy Nguyen 
736146c196bSLeon Romanovsky 	if (x->props.mode == XFRM_MODE_TUNNEL &&
737146c196bSLeon Romanovsky 	    x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
738146c196bSLeon Romanovsky 	    !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
739146c196bSLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
740146c196bSLeon Romanovsky 		err = -EINVAL;
741146c196bSLeon Romanovsky 		goto err_add_rule;
742146c196bSLeon Romanovsky 	}
743146c196bSLeon Romanovsky 
744403b383aSLeon Romanovsky 	/* We use *_bh() variant because xfrm_timer_handler(), which runs
745403b383aSLeon Romanovsky 	 * in softirq context, can reach our state delete logic and we need
746403b383aSLeon Romanovsky 	 * xa_erase_bh() there.
747403b383aSLeon Romanovsky 	 */
748403b383aSLeon Romanovsky 	err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
749403b383aSLeon Romanovsky 			   GFP_KERNEL);
7507dfee4b1SRaed Salem 	if (err)
7515e466345SHuy Nguyen 		goto err_add_rule;
752403b383aSLeon Romanovsky 
753f4979e26SLeon Romanovsky 	mlx5e_ipsec_set_esn_ops(sa_entry);
754b2f7b01dSLeon Romanovsky 
755b2f7b01dSLeon Romanovsky 	if (sa_entry->dwork)
756b2f7b01dSLeon Romanovsky 		queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
757b2f7b01dSLeon Romanovsky 				   MLX5_IPSEC_RESCHED);
7584c24272bSLeon Romanovsky 
7594c24272bSLeon Romanovsky 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
7604c24272bSLeon Romanovsky 	    x->props.mode == XFRM_MODE_TUNNEL)
7614c24272bSLeon Romanovsky 		xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
7624c24272bSLeon Romanovsky 			    MLX5E_IPSEC_TUNNEL_SA);
7634c24272bSLeon Romanovsky 
764aa8bd0c9SRaed Salem out:
765547eede0SIlan Tayari 	x->xso.offload_handle = (unsigned long)sa_entry;
766403b383aSLeon Romanovsky 	return 0;
767547eede0SIlan Tayari 
7685e466345SHuy Nguyen err_add_rule:
769c7049ca6SLeon Romanovsky 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
7707dfee4b1SRaed Salem err_hw_ctx:
771b73e6728SLeon Romanovsky 	mlx5_ipsec_free_sa_ctx(sa_entry);
772b2f7b01dSLeon Romanovsky release_dwork:
773b2f7b01dSLeon Romanovsky 	kfree(sa_entry->dwork);
7744562116fSLeon Romanovsky release_work:
77594edec44SLeon Romanovsky 	if (sa_entry->work)
7764562116fSLeon Romanovsky 		kfree(sa_entry->work->data);
7774562116fSLeon Romanovsky 	kfree(sa_entry->work);
7788efd7b17SLeon Romanovsky unblock_ipsec:
7798efd7b17SLeon Romanovsky 	mlx5_eswitch_unblock_ipsec(priv->mdev);
780d6c4f029SAviad Yehezkel err_xfrm:
781547eede0SIlan Tayari 	kfree(sa_entry);
782697b3518SLeon Romanovsky 	NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
783547eede0SIlan Tayari 	return err;
784547eede0SIlan Tayari }
785547eede0SIlan Tayari 
mlx5e_xfrm_del_state(struct xfrm_state * x)786547eede0SIlan Tayari static void mlx5e_xfrm_del_state(struct xfrm_state *x)
787547eede0SIlan Tayari {
78875ef3f55SAviad Yehezkel 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
7894c24272bSLeon Romanovsky 	struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
790403b383aSLeon Romanovsky 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
791403b383aSLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *old;
792547eede0SIlan Tayari 
793aa8bd0c9SRaed Salem 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
794aa8bd0c9SRaed Salem 		return;
795aa8bd0c9SRaed Salem 
796403b383aSLeon Romanovsky 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
797403b383aSLeon Romanovsky 	WARN_ON(old != sa_entry);
7984c24272bSLeon Romanovsky 
7994c24272bSLeon Romanovsky 	if (attrs->mode == XFRM_MODE_TUNNEL &&
8004c24272bSLeon Romanovsky 	    attrs->type == XFRM_DEV_OFFLOAD_PACKET)
8014c24272bSLeon Romanovsky 		/* Make sure that no ARP requests are running in parallel */
8024c24272bSLeon Romanovsky 		flush_workqueue(ipsec->wq);
8034c24272bSLeon Romanovsky 
804547eede0SIlan Tayari }
805547eede0SIlan Tayari 
mlx5e_xfrm_free_state(struct xfrm_state * x)806547eede0SIlan Tayari static void mlx5e_xfrm_free_state(struct xfrm_state *x)
807547eede0SIlan Tayari {
80875ef3f55SAviad Yehezkel 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
8098efd7b17SLeon Romanovsky 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
810547eede0SIlan Tayari 
811aa8bd0c9SRaed Salem 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
812aa8bd0c9SRaed Salem 		goto sa_entry_free;
813aa8bd0c9SRaed Salem 
8144562116fSLeon Romanovsky 	if (sa_entry->work)
8154562116fSLeon Romanovsky 		cancel_work_sync(&sa_entry->work->work);
816f4979e26SLeon Romanovsky 
817b2f7b01dSLeon Romanovsky 	if (sa_entry->dwork)
818b2f7b01dSLeon Romanovsky 		cancel_delayed_work_sync(&sa_entry->dwork->dwork);
819b2f7b01dSLeon Romanovsky 
820c7049ca6SLeon Romanovsky 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
821b73e6728SLeon Romanovsky 	mlx5_ipsec_free_sa_ctx(sa_entry);
822b2f7b01dSLeon Romanovsky 	kfree(sa_entry->dwork);
82394edec44SLeon Romanovsky 	if (sa_entry->work)
8244562116fSLeon Romanovsky 		kfree(sa_entry->work->data);
8254562116fSLeon Romanovsky 	kfree(sa_entry->work);
8268efd7b17SLeon Romanovsky 	mlx5_eswitch_unblock_ipsec(ipsec->mdev);
827aa8bd0c9SRaed Salem sa_entry_free:
828547eede0SIlan Tayari 	kfree(sa_entry);
829547eede0SIlan Tayari }
830547eede0SIlan Tayari 
mlx5e_ipsec_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)8314c24272bSLeon Romanovsky static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
8324c24272bSLeon Romanovsky 				      unsigned long event, void *ptr)
8334c24272bSLeon Romanovsky {
8344c24272bSLeon Romanovsky 	struct mlx5_accel_esp_xfrm_attrs *attrs;
8354c24272bSLeon Romanovsky 	struct mlx5e_ipsec_netevent_data *data;
8364c24272bSLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *sa_entry;
8374c24272bSLeon Romanovsky 	struct mlx5e_ipsec *ipsec;
8384c24272bSLeon Romanovsky 	struct neighbour *n = ptr;
8394c24272bSLeon Romanovsky 	struct net_device *netdev;
8404c24272bSLeon Romanovsky 	struct xfrm_state *x;
8414c24272bSLeon Romanovsky 	unsigned long idx;
8424c24272bSLeon Romanovsky 
8434c24272bSLeon Romanovsky 	if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
8444c24272bSLeon Romanovsky 		return NOTIFY_DONE;
8454c24272bSLeon Romanovsky 
8464c24272bSLeon Romanovsky 	ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
8474c24272bSLeon Romanovsky 	xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
8484c24272bSLeon Romanovsky 		attrs = &sa_entry->attrs;
8494c24272bSLeon Romanovsky 
8504c24272bSLeon Romanovsky 		if (attrs->family == AF_INET) {
8514c24272bSLeon Romanovsky 			if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
8524c24272bSLeon Romanovsky 			    !neigh_key_eq32(n, &attrs->daddr.a4))
8534c24272bSLeon Romanovsky 				continue;
8544c24272bSLeon Romanovsky 		} else {
8554c24272bSLeon Romanovsky 			if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
8564c24272bSLeon Romanovsky 			    !neigh_key_eq128(n, &attrs->daddr.a4))
8574c24272bSLeon Romanovsky 				continue;
8584c24272bSLeon Romanovsky 		}
8594c24272bSLeon Romanovsky 
8604c24272bSLeon Romanovsky 		x = sa_entry->x;
8614c24272bSLeon Romanovsky 		netdev = x->xso.real_dev;
8624c24272bSLeon Romanovsky 		data = sa_entry->work->data;
8634c24272bSLeon Romanovsky 
8644c24272bSLeon Romanovsky 		neigh_ha_snapshot(data->addr, n, netdev);
8654c24272bSLeon Romanovsky 		queue_work(ipsec->wq, &sa_entry->work->work);
8664c24272bSLeon Romanovsky 	}
8674c24272bSLeon Romanovsky 
8684c24272bSLeon Romanovsky 	return NOTIFY_DONE;
8694c24272bSLeon Romanovsky }
8704c24272bSLeon Romanovsky 
mlx5e_ipsec_init(struct mlx5e_priv * priv)871953d7715SLeon Romanovsky void mlx5e_ipsec_init(struct mlx5e_priv *priv)
872547eede0SIlan Tayari {
873021a429bSLeon Romanovsky 	struct mlx5e_ipsec *ipsec;
874953d7715SLeon Romanovsky 	int ret = -ENOMEM;
875547eede0SIlan Tayari 
8762451da08SLeon Romanovsky 	if (!mlx5_ipsec_device_caps(priv->mdev)) {
877547eede0SIlan Tayari 		netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
878953d7715SLeon Romanovsky 		return;
879547eede0SIlan Tayari 	}
880547eede0SIlan Tayari 
881547eede0SIlan Tayari 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
882547eede0SIlan Tayari 	if (!ipsec)
883953d7715SLeon Romanovsky 		return;
884547eede0SIlan Tayari 
885403b383aSLeon Romanovsky 	xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
8869af1968eSLeon Romanovsky 	ipsec->mdev = priv->mdev;
88720fbdab2SLeon Romanovsky 	ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
888cb010083SAviad Yehezkel 				    priv->netdev->name);
889953d7715SLeon Romanovsky 	if (!ipsec->wq)
890021a429bSLeon Romanovsky 		goto err_wq;
8915e466345SHuy Nguyen 
8928518d05bSLeon Romanovsky 	if (mlx5_ipsec_device_caps(priv->mdev) &
8938518d05bSLeon Romanovsky 	    MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
8948518d05bSLeon Romanovsky 		ret = mlx5e_ipsec_aso_init(ipsec);
8958518d05bSLeon Romanovsky 		if (ret)
8968518d05bSLeon Romanovsky 			goto err_aso;
8978518d05bSLeon Romanovsky 	}
8988518d05bSLeon Romanovsky 
8994c24272bSLeon Romanovsky 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
9004c24272bSLeon Romanovsky 		ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
9014c24272bSLeon Romanovsky 		ret = register_netevent_notifier(&ipsec->netevent_nb);
9024c24272bSLeon Romanovsky 		if (ret)
9034c24272bSLeon Romanovsky 			goto clear_aso;
9044c24272bSLeon Romanovsky 	}
9054c24272bSLeon Romanovsky 
906f5c5abc4SJianbo Liu 	ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
907021a429bSLeon Romanovsky 	ret = mlx5e_accel_ipsec_fs_init(ipsec);
908021a429bSLeon Romanovsky 	if (ret)
909021a429bSLeon Romanovsky 		goto err_fs_init;
910021a429bSLeon Romanovsky 
911c7049ca6SLeon Romanovsky 	ipsec->fs = priv->fs;
9125589b8f1SRaed Salem 	priv->ipsec = ipsec;
913547eede0SIlan Tayari 	netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
914953d7715SLeon Romanovsky 	return;
915021a429bSLeon Romanovsky 
916021a429bSLeon Romanovsky err_fs_init:
9174c24272bSLeon Romanovsky 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
9184c24272bSLeon Romanovsky 		unregister_netevent_notifier(&ipsec->netevent_nb);
9194c24272bSLeon Romanovsky clear_aso:
9208518d05bSLeon Romanovsky 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
9218518d05bSLeon Romanovsky 		mlx5e_ipsec_aso_cleanup(ipsec);
9228518d05bSLeon Romanovsky err_aso:
923021a429bSLeon Romanovsky 	destroy_workqueue(ipsec->wq);
924021a429bSLeon Romanovsky err_wq:
925021a429bSLeon Romanovsky 	kfree(ipsec);
926953d7715SLeon Romanovsky 	mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
927953d7715SLeon Romanovsky 	return;
928547eede0SIlan Tayari }
929547eede0SIlan Tayari 
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)930547eede0SIlan Tayari void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
931547eede0SIlan Tayari {
932547eede0SIlan Tayari 	struct mlx5e_ipsec *ipsec = priv->ipsec;
933547eede0SIlan Tayari 
934547eede0SIlan Tayari 	if (!ipsec)
935547eede0SIlan Tayari 		return;
936547eede0SIlan Tayari 
937301e0be8SLeon Romanovsky 	mlx5e_accel_ipsec_fs_cleanup(ipsec);
9387e46db5eSChris Mi 	if (ipsec->netevent_nb.notifier_call) {
9394c24272bSLeon Romanovsky 		unregister_netevent_notifier(&ipsec->netevent_nb);
9407e46db5eSChris Mi 		ipsec->netevent_nb.notifier_call = NULL;
9417e46db5eSChris Mi 	}
9427e46db5eSChris Mi 	if (ipsec->aso)
9438518d05bSLeon Romanovsky 		mlx5e_ipsec_aso_cleanup(ipsec);
944cb010083SAviad Yehezkel 	destroy_workqueue(ipsec->wq);
945547eede0SIlan Tayari 	kfree(ipsec);
946547eede0SIlan Tayari 	priv->ipsec = NULL;
947547eede0SIlan Tayari }
948547eede0SIlan Tayari 
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)9492ac9cfe7SIlan Tayari static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
9502ac9cfe7SIlan Tayari {
9512ac9cfe7SIlan Tayari 	if (x->props.family == AF_INET) {
9522ac9cfe7SIlan Tayari 		/* Offload with IPv4 options is not supported yet */
9532ac9cfe7SIlan Tayari 		if (ip_hdr(skb)->ihl > 5)
9542ac9cfe7SIlan Tayari 			return false;
9552ac9cfe7SIlan Tayari 	} else {
9562ac9cfe7SIlan Tayari 		/* Offload with IPv6 extension headers is not support yet */
9572ac9cfe7SIlan Tayari 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
9582ac9cfe7SIlan Tayari 			return false;
9592ac9cfe7SIlan Tayari 	}
9602ac9cfe7SIlan Tayari 
9612ac9cfe7SIlan Tayari 	return true;
9622ac9cfe7SIlan Tayari }
9632ac9cfe7SIlan Tayari 
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)964cb010083SAviad Yehezkel static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
965cb010083SAviad Yehezkel {
966cb010083SAviad Yehezkel 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
9674562116fSLeon Romanovsky 	struct mlx5e_ipsec_work *work = sa_entry->work;
9684562116fSLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
969cb010083SAviad Yehezkel 	bool need_update;
970cb010083SAviad Yehezkel 
971cb010083SAviad Yehezkel 	need_update = mlx5e_ipsec_update_esn_state(sa_entry);
972cb010083SAviad Yehezkel 	if (!need_update)
973cb010083SAviad Yehezkel 		return;
974cb010083SAviad Yehezkel 
9754562116fSLeon Romanovsky 	sa_entry_shadow = work->data;
9764562116fSLeon Romanovsky 	memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
9774562116fSLeon Romanovsky 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
9784562116fSLeon Romanovsky 	queue_work(sa_entry->ipsec->wq, &work->work);
979cb010083SAviad Yehezkel }
980cb010083SAviad Yehezkel 
mlx5e_xfrm_update_curlft(struct xfrm_state * x)9811ed78fc0SLeon Romanovsky static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
9821ed78fc0SLeon Romanovsky {
9831ed78fc0SLeon Romanovsky 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
9845a6cddb8SRaed Salem 	struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
9855a6cddb8SRaed Salem 	u64 packets, bytes, lastuse;
9861ed78fc0SLeon Romanovsky 
9875a6cddb8SRaed Salem 	lockdep_assert(lockdep_is_held(&x->lock) ||
9885a6cddb8SRaed Salem 		       lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
9891ed78fc0SLeon Romanovsky 
990aa8bd0c9SRaed Salem 	if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
991aa8bd0c9SRaed Salem 		return;
992aa8bd0c9SRaed Salem 
9935a6cddb8SRaed Salem 	mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
9945a6cddb8SRaed Salem 	x->curlft.packets += packets;
9955a6cddb8SRaed Salem 	x->curlft.bytes += bytes;
9961ed78fc0SLeon Romanovsky }
9971ed78fc0SLeon Romanovsky 
mlx5e_xfrm_validate_policy(struct mlx5_core_dev * mdev,struct xfrm_policy * x,struct netlink_ext_ack * extack)998fa5aa2f8SPaul Blakey static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
999fa5aa2f8SPaul Blakey 				      struct xfrm_policy *x,
10001bb70c5aSLeon Romanovsky 				      struct netlink_ext_ack *extack)
1001a5b8ca94SLeon Romanovsky {
1002b3beba1fSRaed Salem 	struct xfrm_selector *sel = &x->selector;
1003b3beba1fSRaed Salem 
1004a5b8ca94SLeon Romanovsky 	if (x->type != XFRM_POLICY_TYPE_MAIN) {
10051bb70c5aSLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
1006a5b8ca94SLeon Romanovsky 		return -EINVAL;
1007a5b8ca94SLeon Romanovsky 	}
1008a5b8ca94SLeon Romanovsky 
1009a5b8ca94SLeon Romanovsky 	/* Please pay attention that we support only one template */
1010a5b8ca94SLeon Romanovsky 	if (x->xfrm_nr > 1) {
10111bb70c5aSLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
1012a5b8ca94SLeon Romanovsky 		return -EINVAL;
1013a5b8ca94SLeon Romanovsky 	}
1014a5b8ca94SLeon Romanovsky 
1015a5b8ca94SLeon Romanovsky 	if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
1016a5b8ca94SLeon Romanovsky 	    x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
10171bb70c5aSLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
1018a5b8ca94SLeon Romanovsky 		return -EINVAL;
1019a5b8ca94SLeon Romanovsky 	}
1020a5b8ca94SLeon Romanovsky 
1021b3beba1fSRaed Salem 	if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
1022b3beba1fSRaed Salem 	    addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
1023b3beba1fSRaed Salem 		NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
1024a5b8ca94SLeon Romanovsky 		return -EINVAL;
1025a5b8ca94SLeon Romanovsky 	}
1026a5b8ca94SLeon Romanovsky 
1027a5b8ca94SLeon Romanovsky 	if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
10281bb70c5aSLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
1029a5b8ca94SLeon Romanovsky 		return -EINVAL;
1030a5b8ca94SLeon Romanovsky 	}
1031a5b8ca94SLeon Romanovsky 
1032b8c697e1SLeon Romanovsky 	if (x->selector.proto != IPPROTO_IP &&
1033b8c697e1SLeon Romanovsky 	    x->selector.proto != IPPROTO_UDP &&
1034b8c697e1SLeon Romanovsky 	    x->selector.proto != IPPROTO_TCP) {
1035b8c697e1SLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
1036a7385187SRaed Salem 		return -EINVAL;
1037a7385187SRaed Salem 	}
1038a7385187SRaed Salem 
1039fa5aa2f8SPaul Blakey 	if (x->priority) {
1040fa5aa2f8SPaul Blakey 		if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
1041fa5aa2f8SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
1042fa5aa2f8SPaul Blakey 			return -EINVAL;
1043fa5aa2f8SPaul Blakey 		}
1044fa5aa2f8SPaul Blakey 
1045fa5aa2f8SPaul Blakey 		if (x->priority == U32_MAX) {
1046fa5aa2f8SPaul Blakey 			NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
1047fa5aa2f8SPaul Blakey 			return -EINVAL;
1048fa5aa2f8SPaul Blakey 		}
1049fa5aa2f8SPaul Blakey 	}
1050fa5aa2f8SPaul Blakey 
10517e46db5eSChris Mi 	if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
10527e46db5eSChris Mi 	    !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
10537e46db5eSChris Mi 		NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
10547e46db5eSChris Mi 		return -EINVAL;
10557e46db5eSChris Mi 	}
10567e46db5eSChris Mi 
1057a5b8ca94SLeon Romanovsky 	return 0;
1058a5b8ca94SLeon Romanovsky }
1059a5b8ca94SLeon Romanovsky 
1060a5b8ca94SLeon Romanovsky static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)1061a5b8ca94SLeon Romanovsky mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
1062a5b8ca94SLeon Romanovsky 				  struct mlx5_accel_pol_xfrm_attrs *attrs)
1063a5b8ca94SLeon Romanovsky {
1064a5b8ca94SLeon Romanovsky 	struct xfrm_policy *x = pol_entry->x;
1065a5b8ca94SLeon Romanovsky 	struct xfrm_selector *sel;
1066a5b8ca94SLeon Romanovsky 
1067a5b8ca94SLeon Romanovsky 	sel = &x->selector;
1068a5b8ca94SLeon Romanovsky 	memset(attrs, 0, sizeof(*attrs));
1069a5b8ca94SLeon Romanovsky 
1070a5b8ca94SLeon Romanovsky 	memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
1071a5b8ca94SLeon Romanovsky 	memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
1072a5b8ca94SLeon Romanovsky 	attrs->family = sel->family;
1073a5b8ca94SLeon Romanovsky 	attrs->dir = x->xdo.dir;
1074a5b8ca94SLeon Romanovsky 	attrs->action = x->action;
1075a5b8ca94SLeon Romanovsky 	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
107667212396SLeon Romanovsky 	attrs->reqid = x->xfrm_vec[0].reqid;
1077a7385187SRaed Salem 	attrs->upspec.dport = ntohs(sel->dport);
1078a7385187SRaed Salem 	attrs->upspec.dport_mask = ntohs(sel->dport_mask);
1079a7385187SRaed Salem 	attrs->upspec.sport = ntohs(sel->sport);
1080a7385187SRaed Salem 	attrs->upspec.sport_mask = ntohs(sel->sport_mask);
1081a7385187SRaed Salem 	attrs->upspec.proto = sel->proto;
1082fa5aa2f8SPaul Blakey 	attrs->prio = x->priority;
1083a5b8ca94SLeon Romanovsky }
1084a5b8ca94SLeon Romanovsky 
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)10853089386dSLeon Romanovsky static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
10863089386dSLeon Romanovsky 				 struct netlink_ext_ack *extack)
1087a5b8ca94SLeon Romanovsky {
1088a5b8ca94SLeon Romanovsky 	struct net_device *netdev = x->xdo.real_dev;
1089a5b8ca94SLeon Romanovsky 	struct mlx5e_ipsec_pol_entry *pol_entry;
1090a5b8ca94SLeon Romanovsky 	struct mlx5e_priv *priv;
1091a5b8ca94SLeon Romanovsky 	int err;
1092a5b8ca94SLeon Romanovsky 
1093a5b8ca94SLeon Romanovsky 	priv = netdev_priv(netdev);
10941bb70c5aSLeon Romanovsky 	if (!priv->ipsec) {
10951bb70c5aSLeon Romanovsky 		NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
1096a5b8ca94SLeon Romanovsky 		return -EOPNOTSUPP;
10971bb70c5aSLeon Romanovsky 	}
1098a5b8ca94SLeon Romanovsky 
1099fa5aa2f8SPaul Blakey 	err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
1100a5b8ca94SLeon Romanovsky 	if (err)
1101a5b8ca94SLeon Romanovsky 		return err;
1102a5b8ca94SLeon Romanovsky 
1103a5b8ca94SLeon Romanovsky 	pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
1104a5b8ca94SLeon Romanovsky 	if (!pol_entry)
1105a5b8ca94SLeon Romanovsky 		return -ENOMEM;
1106a5b8ca94SLeon Romanovsky 
1107a5b8ca94SLeon Romanovsky 	pol_entry->x = x;
1108a5b8ca94SLeon Romanovsky 	pol_entry->ipsec = priv->ipsec;
1109a5b8ca94SLeon Romanovsky 
11108efd7b17SLeon Romanovsky 	if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
11118efd7b17SLeon Romanovsky 		err = -EBUSY;
11128efd7b17SLeon Romanovsky 		goto ipsec_busy;
11138efd7b17SLeon Romanovsky 	}
11148efd7b17SLeon Romanovsky 
1115a5b8ca94SLeon Romanovsky 	mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
1116a5b8ca94SLeon Romanovsky 	err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
1117a5b8ca94SLeon Romanovsky 	if (err)
1118a5b8ca94SLeon Romanovsky 		goto err_fs;
1119a5b8ca94SLeon Romanovsky 
1120a5b8ca94SLeon Romanovsky 	x->xdo.offload_handle = (unsigned long)pol_entry;
1121a5b8ca94SLeon Romanovsky 	return 0;
1122a5b8ca94SLeon Romanovsky 
1123a5b8ca94SLeon Romanovsky err_fs:
11248efd7b17SLeon Romanovsky 	mlx5_eswitch_unblock_ipsec(priv->mdev);
11258efd7b17SLeon Romanovsky ipsec_busy:
1126a5b8ca94SLeon Romanovsky 	kfree(pol_entry);
11271bb70c5aSLeon Romanovsky 	NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
1128a5b8ca94SLeon Romanovsky 	return err;
1129a5b8ca94SLeon Romanovsky }
1130a5b8ca94SLeon Romanovsky 
mlx5e_xfrm_del_policy(struct xfrm_policy * x)1131cf5bb023SLeon Romanovsky static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
1132a5b8ca94SLeon Romanovsky {
1133a5b8ca94SLeon Romanovsky 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1134a5b8ca94SLeon Romanovsky 
1135a5b8ca94SLeon Romanovsky 	mlx5e_accel_ipsec_fs_del_pol(pol_entry);
11368efd7b17SLeon Romanovsky 	mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
1137cf5bb023SLeon Romanovsky }
1138cf5bb023SLeon Romanovsky 
mlx5e_xfrm_free_policy(struct xfrm_policy * x)1139cf5bb023SLeon Romanovsky static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
1140cf5bb023SLeon Romanovsky {
1141cf5bb023SLeon Romanovsky 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1142cf5bb023SLeon Romanovsky 
1143a5b8ca94SLeon Romanovsky 	kfree(pol_entry);
1144a5b8ca94SLeon Romanovsky }
1145a5b8ca94SLeon Romanovsky 
1146547eede0SIlan Tayari static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1147547eede0SIlan Tayari 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
1148547eede0SIlan Tayari 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
1149547eede0SIlan Tayari 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
11502ac9cfe7SIlan Tayari 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
1151cb010083SAviad Yehezkel 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
1152a5b8ca94SLeon Romanovsky 
11531ed78fc0SLeon Romanovsky 	.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
1154a5b8ca94SLeon Romanovsky 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
1155cf5bb023SLeon Romanovsky 	.xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
1156a5b8ca94SLeon Romanovsky 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
1157a5b8ca94SLeon Romanovsky };
1158a5b8ca94SLeon Romanovsky 
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)1159547eede0SIlan Tayari void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1160547eede0SIlan Tayari {
1161547eede0SIlan Tayari 	struct mlx5_core_dev *mdev = priv->mdev;
1162547eede0SIlan Tayari 	struct net_device *netdev = priv->netdev;
1163547eede0SIlan Tayari 
1164a8444b0bSLeon Romanovsky 	if (!mlx5_ipsec_device_caps(mdev))
1165a8444b0bSLeon Romanovsky 		return;
1166a8444b0bSLeon Romanovsky 
1167547eede0SIlan Tayari 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
1168a5b8ca94SLeon Romanovsky 
1169547eede0SIlan Tayari 	netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
1170547eede0SIlan Tayari 	netdev->features |= NETIF_F_HW_ESP;
1171547eede0SIlan Tayari 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
1172547eede0SIlan Tayari 
1173547eede0SIlan Tayari 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1174547eede0SIlan Tayari 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
1175547eede0SIlan Tayari 		return;
1176547eede0SIlan Tayari 	}
1177547eede0SIlan Tayari 
1178547eede0SIlan Tayari 	netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
1179547eede0SIlan Tayari 	netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
11802ac9cfe7SIlan Tayari 
1181effbe267SLeon Romanovsky 	if (!MLX5_CAP_ETH(mdev, swp_lso)) {
11822ac9cfe7SIlan Tayari 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
11832ac9cfe7SIlan Tayari 		return;
11842ac9cfe7SIlan Tayari 	}
11852ac9cfe7SIlan Tayari 
11865be01904SRaed Salem 	netdev->gso_partial_features |= NETIF_F_GSO_ESP;
11872ac9cfe7SIlan Tayari 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
11882ac9cfe7SIlan Tayari 	netdev->features |= NETIF_F_GSO_ESP;
11892ac9cfe7SIlan Tayari 	netdev->hw_features |= NETIF_F_GSO_ESP;
11902ac9cfe7SIlan Tayari 	netdev->hw_enc_features |= NETIF_F_GSO_ESP;
1191547eede0SIlan Tayari }
1192