1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 
39 #include "en.h"
40 #include "ipsec.h"
41 #include "ipsec_rxtx.h"
42 
43 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
44 {
45 	return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
46 }
47 
48 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
49 {
50 	return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
51 }
52 
53 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
54 {
55 	struct xfrm_replay_state_esn *replay_esn;
56 	u32 seq_bottom = 0;
57 	u8 overlap;
58 
59 	if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) {
60 		sa_entry->esn_state.trigger = 0;
61 		return false;
62 	}
63 
64 	replay_esn = sa_entry->x->replay_esn;
65 	if (replay_esn->seq >= replay_esn->replay_window)
66 		seq_bottom = replay_esn->seq - replay_esn->replay_window + 1;
67 
68 	overlap = sa_entry->esn_state.overlap;
69 
70 	sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x,
71 						    htonl(seq_bottom));
72 
73 	sa_entry->esn_state.trigger = 1;
74 	if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
75 		sa_entry->esn_state.overlap = 0;
76 		return true;
77 	} else if (unlikely(!overlap &&
78 			    (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
79 		sa_entry->esn_state.overlap = 1;
80 		return true;
81 	}
82 
83 	return false;
84 }
85 
86 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
87 				    struct mlx5_accel_esp_xfrm_attrs *attrs)
88 {
89 	struct xfrm_state *x = sa_entry->x;
90 
91 	attrs->hard_packet_limit = x->lft.hard_packet_limit;
92 	if (x->lft.soft_packet_limit == XFRM_INF)
93 		return;
94 
95 	/* Hardware decrements hard_packet_limit counter through
96 	 * the operation. While fires an event when soft_packet_limit
97 	 * is reached. It emans that we need substitute the numbers
98 	 * in order to properly count soft limit.
99 	 *
100 	 * As an example:
101 	 * XFRM user sets soft limit is 2 and hard limit is 9 and
102 	 * expects to see soft event after 2 packets and hard event
103 	 * after 9 packets. In our case, the hard limit will be set
104 	 * to 9 and soft limit is comparator to 7 so user gets the
105 	 * soft event after 2 packeta
106 	 */
107 	attrs->soft_packet_limit =
108 		x->lft.hard_packet_limit - x->lft.soft_packet_limit;
109 }
110 
111 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
112 					struct mlx5_accel_esp_xfrm_attrs *attrs)
113 {
114 	struct xfrm_state *x = sa_entry->x;
115 	struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
116 	struct aead_geniv_ctx *geniv_ctx;
117 	struct crypto_aead *aead;
118 	unsigned int crypto_data_len, key_len;
119 	int ivsize;
120 
121 	memset(attrs, 0, sizeof(*attrs));
122 
123 	/* key */
124 	crypto_data_len = (x->aead->alg_key_len + 7) / 8;
125 	key_len = crypto_data_len - 4; /* 4 bytes salt at end */
126 
127 	memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
128 	aes_gcm->key_len = key_len * 8;
129 
130 	/* salt and seq_iv */
131 	aead = x->data;
132 	geniv_ctx = crypto_aead_ctx(aead);
133 	ivsize = crypto_aead_ivsize(aead);
134 	memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
135 	memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
136 	       sizeof(aes_gcm->salt));
137 
138 	attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
139 
140 	/* iv len */
141 	aes_gcm->icv_len = x->aead->alg_icv_len;
142 
143 	/* esn */
144 	if (sa_entry->esn_state.trigger) {
145 		attrs->esn_trigger = true;
146 		attrs->esn = sa_entry->esn_state.esn;
147 		attrs->esn_overlap = sa_entry->esn_state.overlap;
148 		attrs->replay_window = x->replay_esn->replay_window;
149 	}
150 
151 	attrs->dir = x->xso.dir;
152 	/* spi */
153 	attrs->spi = be32_to_cpu(x->id.spi);
154 
155 	/* source , destination ips */
156 	memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
157 	memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
158 	attrs->family = x->props.family;
159 	attrs->type = x->xso.type;
160 	attrs->reqid = x->props.reqid;
161 
162 	mlx5e_ipsec_init_limits(sa_entry, attrs);
163 }
164 
165 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
166 {
167 	struct net_device *netdev = x->xso.real_dev;
168 	struct mlx5e_priv *priv;
169 
170 	priv = netdev_priv(netdev);
171 
172 	if (x->props.aalgo != SADB_AALG_NONE) {
173 		netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
174 		return -EINVAL;
175 	}
176 	if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
177 		netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
178 		return -EINVAL;
179 	}
180 	if (x->props.calgo != SADB_X_CALG_NONE) {
181 		netdev_info(netdev, "Cannot offload compressed xfrm states\n");
182 		return -EINVAL;
183 	}
184 	if (x->props.flags & XFRM_STATE_ESN &&
185 	    !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
186 		netdev_info(netdev, "Cannot offload ESN xfrm states\n");
187 		return -EINVAL;
188 	}
189 	if (x->props.family != AF_INET &&
190 	    x->props.family != AF_INET6) {
191 		netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
192 		return -EINVAL;
193 	}
194 	if (x->id.proto != IPPROTO_ESP) {
195 		netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
196 		return -EINVAL;
197 	}
198 	if (x->encap) {
199 		netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
200 		return -EINVAL;
201 	}
202 	if (!x->aead) {
203 		netdev_info(netdev, "Cannot offload xfrm states without aead\n");
204 		return -EINVAL;
205 	}
206 	if (x->aead->alg_icv_len != 128) {
207 		netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
208 		return -EINVAL;
209 	}
210 	if ((x->aead->alg_key_len != 128 + 32) &&
211 	    (x->aead->alg_key_len != 256 + 32)) {
212 		netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
213 		return -EINVAL;
214 	}
215 	if (x->tfcpad) {
216 		netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
217 		return -EINVAL;
218 	}
219 	if (!x->geniv) {
220 		netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
221 		return -EINVAL;
222 	}
223 	if (strcmp(x->geniv, "seqiv")) {
224 		netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
225 		return -EINVAL;
226 	}
227 	switch (x->xso.type) {
228 	case XFRM_DEV_OFFLOAD_CRYPTO:
229 		if (!(mlx5_ipsec_device_caps(priv->mdev) &
230 		      MLX5_IPSEC_CAP_CRYPTO)) {
231 			netdev_info(netdev, "Crypto offload is not supported\n");
232 			return -EINVAL;
233 		}
234 
235 		if (x->props.mode != XFRM_MODE_TRANSPORT &&
236 		    x->props.mode != XFRM_MODE_TUNNEL) {
237 			netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
238 			return -EINVAL;
239 		}
240 		break;
241 	case XFRM_DEV_OFFLOAD_PACKET:
242 		if (!(mlx5_ipsec_device_caps(priv->mdev) &
243 		      MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
244 			netdev_info(netdev, "Packet offload is not supported\n");
245 			return -EINVAL;
246 		}
247 
248 		if (x->props.mode != XFRM_MODE_TRANSPORT) {
249 			netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
250 			return -EINVAL;
251 		}
252 
253 		if (x->replay_esn && x->replay_esn->replay_window != 32 &&
254 		    x->replay_esn->replay_window != 64 &&
255 		    x->replay_esn->replay_window != 128 &&
256 		    x->replay_esn->replay_window != 256) {
257 			netdev_info(netdev,
258 				    "Unsupported replay window size %u\n",
259 				    x->replay_esn->replay_window);
260 			return -EINVAL;
261 		}
262 
263 		if (!x->props.reqid) {
264 			netdev_info(netdev, "Cannot offload without reqid\n");
265 			return -EINVAL;
266 		}
267 
268 		if (x->lft.hard_byte_limit != XFRM_INF ||
269 		    x->lft.soft_byte_limit != XFRM_INF) {
270 			netdev_info(netdev,
271 				    "Device doesn't support limits in bytes\n");
272 			return -EINVAL;
273 		}
274 
275 		if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
276 		    x->lft.hard_packet_limit != XFRM_INF) {
277 			/* XFRM stack doesn't prevent such configuration :(. */
278 			netdev_info(netdev,
279 				    "Hard packet limit must be greater than soft one\n");
280 			return -EINVAL;
281 		}
282 		break;
283 	default:
284 		netdev_info(netdev, "Unsupported xfrm offload type %d\n",
285 			    x->xso.type);
286 		return -EINVAL;
287 	}
288 	return 0;
289 }
290 
291 static void _update_xfrm_state(struct work_struct *work)
292 {
293 	struct mlx5e_ipsec_modify_state_work *modify_work =
294 		container_of(work, struct mlx5e_ipsec_modify_state_work, work);
295 	struct mlx5e_ipsec_sa_entry *sa_entry = container_of(
296 		modify_work, struct mlx5e_ipsec_sa_entry, modify_work);
297 
298 	mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
299 }
300 
301 static int mlx5e_xfrm_add_state(struct xfrm_state *x)
302 {
303 	struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
304 	struct net_device *netdev = x->xso.real_dev;
305 	struct mlx5e_ipsec *ipsec;
306 	struct mlx5e_priv *priv;
307 	int err;
308 
309 	priv = netdev_priv(netdev);
310 	if (!priv->ipsec)
311 		return -EOPNOTSUPP;
312 
313 	ipsec = priv->ipsec;
314 	err = mlx5e_xfrm_validate_state(x);
315 	if (err)
316 		return err;
317 
318 	sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
319 	if (!sa_entry) {
320 		err = -ENOMEM;
321 		goto out;
322 	}
323 
324 	sa_entry->x = x;
325 	sa_entry->ipsec = ipsec;
326 
327 	/* check esn */
328 	mlx5e_ipsec_update_esn_state(sa_entry);
329 
330 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
331 	/* create hw context */
332 	err = mlx5_ipsec_create_sa_ctx(sa_entry);
333 	if (err)
334 		goto err_xfrm;
335 
336 	err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
337 	if (err)
338 		goto err_hw_ctx;
339 
340 	/* We use *_bh() variant because xfrm_timer_handler(), which runs
341 	 * in softirq context, can reach our state delete logic and we need
342 	 * xa_erase_bh() there.
343 	 */
344 	err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
345 			   GFP_KERNEL);
346 	if (err)
347 		goto err_add_rule;
348 
349 	if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
350 		sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
351 				mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
352 
353 	INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
354 	x->xso.offload_handle = (unsigned long)sa_entry;
355 	return 0;
356 
357 err_add_rule:
358 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
359 err_hw_ctx:
360 	mlx5_ipsec_free_sa_ctx(sa_entry);
361 err_xfrm:
362 	kfree(sa_entry);
363 out:
364 	return err;
365 }
366 
367 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
368 {
369 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
370 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
371 	struct mlx5e_ipsec_sa_entry *old;
372 
373 	old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
374 	WARN_ON(old != sa_entry);
375 }
376 
377 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
378 {
379 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
380 
381 	cancel_work_sync(&sa_entry->modify_work.work);
382 	mlx5e_accel_ipsec_fs_del_rule(sa_entry);
383 	mlx5_ipsec_free_sa_ctx(sa_entry);
384 	kfree(sa_entry);
385 }
386 
387 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
388 {
389 	struct mlx5e_ipsec *ipsec;
390 	int ret = -ENOMEM;
391 
392 	if (!mlx5_ipsec_device_caps(priv->mdev)) {
393 		netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
394 		return;
395 	}
396 
397 	ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
398 	if (!ipsec)
399 		return;
400 
401 	xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
402 	ipsec->mdev = priv->mdev;
403 	ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
404 					    priv->netdev->name);
405 	if (!ipsec->wq)
406 		goto err_wq;
407 
408 	if (mlx5_ipsec_device_caps(priv->mdev) &
409 	    MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
410 		ret = mlx5e_ipsec_aso_init(ipsec);
411 		if (ret)
412 			goto err_aso;
413 	}
414 
415 	ret = mlx5e_accel_ipsec_fs_init(ipsec);
416 	if (ret)
417 		goto err_fs_init;
418 
419 	ipsec->fs = priv->fs;
420 	priv->ipsec = ipsec;
421 	netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
422 	return;
423 
424 err_fs_init:
425 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
426 		mlx5e_ipsec_aso_cleanup(ipsec);
427 err_aso:
428 	destroy_workqueue(ipsec->wq);
429 err_wq:
430 	kfree(ipsec);
431 	mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
432 	return;
433 }
434 
435 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
436 {
437 	struct mlx5e_ipsec *ipsec = priv->ipsec;
438 
439 	if (!ipsec)
440 		return;
441 
442 	mlx5e_accel_ipsec_fs_cleanup(ipsec);
443 	if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
444 		mlx5e_ipsec_aso_cleanup(ipsec);
445 	destroy_workqueue(ipsec->wq);
446 	kfree(ipsec);
447 	priv->ipsec = NULL;
448 }
449 
450 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
451 {
452 	if (x->props.family == AF_INET) {
453 		/* Offload with IPv4 options is not supported yet */
454 		if (ip_hdr(skb)->ihl > 5)
455 			return false;
456 	} else {
457 		/* Offload with IPv6 extension headers is not support yet */
458 		if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
459 			return false;
460 	}
461 
462 	return true;
463 }
464 
465 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
466 {
467 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
468 	struct mlx5e_ipsec_modify_state_work *modify_work =
469 		&sa_entry->modify_work;
470 	bool need_update;
471 
472 	need_update = mlx5e_ipsec_update_esn_state(sa_entry);
473 	if (!need_update)
474 		return;
475 
476 	mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &modify_work->attrs);
477 	queue_work(sa_entry->ipsec->wq, &modify_work->work);
478 }
479 
480 static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
481 {
482 	struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
483 	int err;
484 
485 	lockdep_assert_held(&x->lock);
486 
487 	if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
488 		/* Limits are not configured, as soft limit
489 		 * must be lowever than hard limit.
490 		 */
491 		return;
492 
493 	err = mlx5e_ipsec_aso_query(sa_entry, NULL);
494 	if (err)
495 		return;
496 
497 	mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
498 }
499 
500 static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
501 {
502 	struct net_device *netdev = x->xdo.real_dev;
503 
504 	if (x->type != XFRM_POLICY_TYPE_MAIN) {
505 		netdev_info(netdev, "Cannot offload non-main policy types\n");
506 		return -EINVAL;
507 	}
508 
509 	/* Please pay attention that we support only one template */
510 	if (x->xfrm_nr > 1) {
511 		netdev_info(netdev, "Cannot offload more than one template\n");
512 		return -EINVAL;
513 	}
514 
515 	if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
516 	    x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
517 		netdev_info(netdev, "Cannot offload forward policy\n");
518 		return -EINVAL;
519 	}
520 
521 	if (!x->xfrm_vec[0].reqid) {
522 		netdev_info(netdev, "Cannot offload policy without reqid\n");
523 		return -EINVAL;
524 	}
525 
526 	if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
527 		netdev_info(netdev, "Unsupported xfrm offload type\n");
528 		return -EINVAL;
529 	}
530 
531 	return 0;
532 }
533 
534 static void
535 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
536 				  struct mlx5_accel_pol_xfrm_attrs *attrs)
537 {
538 	struct xfrm_policy *x = pol_entry->x;
539 	struct xfrm_selector *sel;
540 
541 	sel = &x->selector;
542 	memset(attrs, 0, sizeof(*attrs));
543 
544 	memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
545 	memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
546 	attrs->family = sel->family;
547 	attrs->dir = x->xdo.dir;
548 	attrs->action = x->action;
549 	attrs->type = XFRM_DEV_OFFLOAD_PACKET;
550 	attrs->reqid = x->xfrm_vec[0].reqid;
551 }
552 
553 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
554 {
555 	struct net_device *netdev = x->xdo.real_dev;
556 	struct mlx5e_ipsec_pol_entry *pol_entry;
557 	struct mlx5e_priv *priv;
558 	int err;
559 
560 	priv = netdev_priv(netdev);
561 	if (!priv->ipsec)
562 		return -EOPNOTSUPP;
563 
564 	err = mlx5e_xfrm_validate_policy(x);
565 	if (err)
566 		return err;
567 
568 	pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
569 	if (!pol_entry)
570 		return -ENOMEM;
571 
572 	pol_entry->x = x;
573 	pol_entry->ipsec = priv->ipsec;
574 
575 	mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
576 	err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
577 	if (err)
578 		goto err_fs;
579 
580 	x->xdo.offload_handle = (unsigned long)pol_entry;
581 	return 0;
582 
583 err_fs:
584 	kfree(pol_entry);
585 	return err;
586 }
587 
588 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
589 {
590 	struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
591 
592 	mlx5e_accel_ipsec_fs_del_pol(pol_entry);
593 	kfree(pol_entry);
594 }
595 
596 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
597 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
598 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
599 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
600 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
601 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
602 };
603 
604 static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
605 	.xdo_dev_state_add	= mlx5e_xfrm_add_state,
606 	.xdo_dev_state_delete	= mlx5e_xfrm_del_state,
607 	.xdo_dev_state_free	= mlx5e_xfrm_free_state,
608 	.xdo_dev_offload_ok	= mlx5e_ipsec_offload_ok,
609 	.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
610 
611 	.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
612 	.xdo_dev_policy_add = mlx5e_xfrm_add_policy,
613 	.xdo_dev_policy_free = mlx5e_xfrm_free_policy,
614 };
615 
616 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
617 {
618 	struct mlx5_core_dev *mdev = priv->mdev;
619 	struct net_device *netdev = priv->netdev;
620 
621 	if (!mlx5_ipsec_device_caps(mdev))
622 		return;
623 
624 	mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
625 
626 	if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
627 		netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
628 	else
629 		netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
630 
631 	netdev->features |= NETIF_F_HW_ESP;
632 	netdev->hw_enc_features |= NETIF_F_HW_ESP;
633 
634 	if (!MLX5_CAP_ETH(mdev, swp_csum)) {
635 		mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
636 		return;
637 	}
638 
639 	netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
640 	netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
641 
642 	if (!MLX5_CAP_ETH(mdev, swp_lso)) {
643 		mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
644 		return;
645 	}
646 
647 	netdev->gso_partial_features |= NETIF_F_GSO_ESP;
648 	mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
649 	netdev->features |= NETIF_F_GSO_ESP;
650 	netdev->hw_features |= NETIF_F_GSO_ESP;
651 	netdev->hw_enc_features |= NETIF_F_GSO_ESP;
652 }
653