1 /*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34 #include <crypto/internal/geniv.h>
35 #include <crypto/aead.h>
36 #include <linux/inetdevice.h>
37 #include <linux/netdevice.h>
38 #include <net/netevent.h>
39
40 #include "en.h"
41 #include "eswitch.h"
42 #include "ipsec.h"
43 #include "ipsec_rxtx.h"
44 #include "en_rep.h"
45
46 #define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
47 #define MLX5E_IPSEC_TUNNEL_SA XA_MARK_1
48
to_ipsec_sa_entry(struct xfrm_state * x)49 static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
50 {
51 return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
52 }
53
to_ipsec_pol_entry(struct xfrm_policy * x)54 static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
55 {
56 return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
57 }
58
mlx5e_ipsec_handle_tx_limit(struct work_struct * _work)59 static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
60 {
61 struct mlx5e_ipsec_dwork *dwork =
62 container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
63 struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
64 struct xfrm_state *x = sa_entry->x;
65
66 if (sa_entry->attrs.drop)
67 return;
68
69 spin_lock_bh(&x->lock);
70 xfrm_state_check_expire(x);
71 if (x->km.state == XFRM_STATE_EXPIRED) {
72 sa_entry->attrs.drop = true;
73 spin_unlock_bh(&x->lock);
74
75 mlx5e_accel_ipsec_fs_modify(sa_entry);
76 return;
77 }
78 spin_unlock_bh(&x->lock);
79
80 queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
81 MLX5_IPSEC_RESCHED);
82 }
83
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry)84 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
85 {
86 struct xfrm_state *x = sa_entry->x;
87 u32 seq_bottom = 0;
88 u32 esn, esn_msb;
89 u8 overlap;
90
91 switch (x->xso.type) {
92 case XFRM_DEV_OFFLOAD_PACKET:
93 switch (x->xso.dir) {
94 case XFRM_DEV_OFFLOAD_IN:
95 esn = x->replay_esn->seq;
96 esn_msb = x->replay_esn->seq_hi;
97 break;
98 case XFRM_DEV_OFFLOAD_OUT:
99 esn = x->replay_esn->oseq;
100 esn_msb = x->replay_esn->oseq_hi;
101 break;
102 default:
103 WARN_ON(true);
104 return false;
105 }
106 break;
107 case XFRM_DEV_OFFLOAD_CRYPTO:
108 /* Already parsed by XFRM core */
109 esn = x->replay_esn->seq;
110 break;
111 default:
112 WARN_ON(true);
113 return false;
114 }
115
116 overlap = sa_entry->esn_state.overlap;
117
118 if (esn >= x->replay_esn->replay_window)
119 seq_bottom = esn - x->replay_esn->replay_window + 1;
120
121 if (x->xso.type == XFRM_DEV_OFFLOAD_CRYPTO)
122 esn_msb = xfrm_replay_seqhi(x, htonl(seq_bottom));
123
124 if (sa_entry->esn_state.esn_msb)
125 sa_entry->esn_state.esn = esn;
126 else
127 /* According to RFC4303, section "3.3.3. Sequence Number Generation",
128 * the first packet sent using a given SA will contain a sequence
129 * number of 1.
130 */
131 sa_entry->esn_state.esn = max_t(u32, esn, 1);
132 sa_entry->esn_state.esn_msb = esn_msb;
133
134 if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) {
135 sa_entry->esn_state.overlap = 0;
136 return true;
137 } else if (unlikely(!overlap &&
138 (seq_bottom >= MLX5E_IPSEC_ESN_SCOPE_MID))) {
139 sa_entry->esn_state.overlap = 1;
140 return true;
141 }
142
143 return false;
144 }
145
mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)146 static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
147 struct mlx5_accel_esp_xfrm_attrs *attrs)
148 {
149 struct xfrm_state *x = sa_entry->x;
150 s64 start_value, n;
151
152 attrs->lft.hard_packet_limit = x->lft.hard_packet_limit;
153 attrs->lft.soft_packet_limit = x->lft.soft_packet_limit;
154 if (x->lft.soft_packet_limit == XFRM_INF)
155 return;
156
157 /* Compute hard limit initial value and number of rounds.
158 *
159 * The counting pattern of hardware counter goes:
160 * value -> 2^31-1
161 * 2^31 | (2^31-1) -> 2^31-1
162 * 2^31 | (2^31-1) -> 2^31-1
163 * [..]
164 * 2^31 | (2^31-1) -> 0
165 *
166 * The pattern is created by using an ASO operation to atomically set
167 * bit 31 after the down counter clears bit 31. This is effectively an
168 * atomic addition of 2**31 to the counter.
169 *
170 * We wish to configure the counter, within the above pattern, so that
171 * when it reaches 0, it has hit the hard limit. This is defined by this
172 * system of equations:
173 *
174 * hard_limit == start_value + n * 2^31
175 * n >= 0
176 * start_value < 2^32, start_value >= 0
177 *
178 * These equations are not single-solution, there are often two choices:
179 * hard_limit == start_value + n * 2^31
180 * hard_limit == (start_value+2^31) + (n-1) * 2^31
181 *
182 * The algorithm selects the solution that keeps the counter value
183 * above 2^31 until the final iteration.
184 */
185
186 /* Start by estimating n and compute start_value */
187 n = attrs->lft.hard_packet_limit / BIT_ULL(31);
188 start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
189
190 /* Choose the best of the two solutions: */
191 if (n >= 1)
192 n -= 1;
193
194 /* Computed values solve the system of equations: */
195 start_value = attrs->lft.hard_packet_limit - n * BIT_ULL(31);
196
197 /* The best solution means: when there are multiple iterations we must
198 * start above 2^31 and count down to 2**31 to get the interrupt.
199 */
200 attrs->lft.hard_packet_limit = lower_32_bits(start_value);
201 attrs->lft.numb_rounds_hard = (u64)n;
202
203 /* Compute soft limit initial value and number of rounds.
204 *
205 * The soft_limit is achieved by adjusting the counter's
206 * interrupt_value. This is embedded in the counting pattern created by
207 * hard packet calculations above.
208 *
209 * We wish to compute the interrupt_value for the soft_limit. This is
210 * defined by this system of equations:
211 *
212 * soft_limit == start_value - soft_value + n * 2^31
213 * n >= 0
214 * soft_value < 2^32, soft_value >= 0
215 * for n == 0 start_value > soft_value
216 *
217 * As with compute_hard_n_value() the equations are not single-solution.
218 * The algorithm selects the solution that has:
219 * 2^30 <= soft_limit < 2^31 + 2^30
220 * for the interior iterations, which guarantees a large guard band
221 * around the counter hard limit and next interrupt.
222 */
223
224 /* Start by estimating n and compute soft_value */
225 n = (x->lft.soft_packet_limit - attrs->lft.hard_packet_limit) / BIT_ULL(31);
226 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) -
227 x->lft.soft_packet_limit;
228
229 /* Compare against constraints and adjust n */
230 if (n < 0)
231 n = 0;
232 else if (start_value >= BIT_ULL(32))
233 n -= 1;
234 else if (start_value < 0)
235 n += 1;
236
237 /* Choose the best of the two solutions: */
238 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
239 if (n != attrs->lft.numb_rounds_hard && start_value < BIT_ULL(30))
240 n += 1;
241
242 /* Note that the upper limit of soft_value happens naturally because we
243 * always select the lowest soft_value.
244 */
245
246 /* Computed values solve the system of equations: */
247 start_value = attrs->lft.hard_packet_limit + n * BIT_ULL(31) - start_value;
248
249 /* The best solution means: when there are multiple iterations we must
250 * not fall below 2^30 as that would get too close to the false
251 * hard_limit and when we reach an interior iteration for soft_limit it
252 * has to be far away from 2**32-1 which is the counter reset point
253 * after the +2^31 to accommodate latency.
254 */
255 attrs->lft.soft_packet_limit = lower_32_bits(start_value);
256 attrs->lft.numb_rounds_soft = (u64)n;
257 }
258
mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)259 static void mlx5e_ipsec_init_macs(struct mlx5e_ipsec_sa_entry *sa_entry,
260 struct mlx5_accel_esp_xfrm_attrs *attrs)
261 {
262 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
263 struct xfrm_state *x = sa_entry->x;
264 struct net_device *netdev;
265 struct neighbour *n;
266 u8 addr[ETH_ALEN];
267 const void *pkey;
268 u8 *dst, *src;
269
270 if (attrs->mode != XFRM_MODE_TUNNEL ||
271 attrs->type != XFRM_DEV_OFFLOAD_PACKET)
272 return;
273
274 netdev = x->xso.real_dev;
275
276 mlx5_query_mac_address(mdev, addr);
277 switch (attrs->dir) {
278 case XFRM_DEV_OFFLOAD_IN:
279 src = attrs->dmac;
280 dst = attrs->smac;
281 pkey = &attrs->saddr.a4;
282 break;
283 case XFRM_DEV_OFFLOAD_OUT:
284 src = attrs->smac;
285 dst = attrs->dmac;
286 pkey = &attrs->daddr.a4;
287 break;
288 default:
289 return;
290 }
291
292 ether_addr_copy(src, addr);
293 n = neigh_lookup(&arp_tbl, pkey, netdev);
294 if (!n) {
295 n = neigh_create(&arp_tbl, pkey, netdev);
296 if (IS_ERR(n))
297 return;
298 neigh_event_send(n, NULL);
299 attrs->drop = true;
300 } else {
301 neigh_ha_snapshot(addr, n, netdev);
302 ether_addr_copy(dst, addr);
303 }
304 neigh_release(n);
305 }
306
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_accel_esp_xfrm_attrs * attrs)307 void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
308 struct mlx5_accel_esp_xfrm_attrs *attrs)
309 {
310 struct xfrm_state *x = sa_entry->x;
311 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
312 struct aead_geniv_ctx *geniv_ctx;
313 struct crypto_aead *aead;
314 unsigned int crypto_data_len, key_len;
315 int ivsize;
316
317 memset(attrs, 0, sizeof(*attrs));
318
319 /* key */
320 crypto_data_len = (x->aead->alg_key_len + 7) / 8;
321 key_len = crypto_data_len - 4; /* 4 bytes salt at end */
322
323 memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
324 aes_gcm->key_len = key_len * 8;
325
326 /* salt and seq_iv */
327 aead = x->data;
328 geniv_ctx = crypto_aead_ctx(aead);
329 ivsize = crypto_aead_ivsize(aead);
330 memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
331 memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
332 sizeof(aes_gcm->salt));
333
334 attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
335
336 /* iv len */
337 aes_gcm->icv_len = x->aead->alg_icv_len;
338
339 attrs->dir = x->xso.dir;
340
341 /* esn */
342 if (x->props.flags & XFRM_STATE_ESN) {
343 attrs->replay_esn.trigger = true;
344 attrs->replay_esn.esn = sa_entry->esn_state.esn;
345 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
346 attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
347 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
348 goto skip_replay_window;
349
350 switch (x->replay_esn->replay_window) {
351 case 32:
352 attrs->replay_esn.replay_window =
353 MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
354 break;
355 case 64:
356 attrs->replay_esn.replay_window =
357 MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
358 break;
359 case 128:
360 attrs->replay_esn.replay_window =
361 MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
362 break;
363 case 256:
364 attrs->replay_esn.replay_window =
365 MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
366 break;
367 default:
368 WARN_ON(true);
369 return;
370 }
371 }
372
373 skip_replay_window:
374 /* spi */
375 attrs->spi = be32_to_cpu(x->id.spi);
376
377 /* source , destination ips */
378 memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
379 memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
380 attrs->family = x->props.family;
381 attrs->type = x->xso.type;
382 attrs->reqid = x->props.reqid;
383 attrs->upspec.dport = ntohs(x->sel.dport);
384 attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
385 attrs->upspec.sport = ntohs(x->sel.sport);
386 attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
387 attrs->upspec.proto = x->sel.proto;
388 attrs->mode = x->props.mode;
389
390 mlx5e_ipsec_init_limits(sa_entry, attrs);
391 mlx5e_ipsec_init_macs(sa_entry, attrs);
392
393 if (x->encap) {
394 attrs->encap = true;
395 attrs->sport = x->encap->encap_sport;
396 attrs->dport = x->encap->encap_dport;
397 }
398 }
399
mlx5e_xfrm_validate_state(struct mlx5_core_dev * mdev,struct xfrm_state * x,struct netlink_ext_ack * extack)400 static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
401 struct xfrm_state *x,
402 struct netlink_ext_ack *extack)
403 {
404 if (x->props.aalgo != SADB_AALG_NONE) {
405 NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
406 return -EINVAL;
407 }
408 if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
409 NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
410 return -EINVAL;
411 }
412 if (x->props.calgo != SADB_X_CALG_NONE) {
413 NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
414 return -EINVAL;
415 }
416 if (x->props.flags & XFRM_STATE_ESN &&
417 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
418 NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
419 return -EINVAL;
420 }
421 if (x->props.family != AF_INET &&
422 x->props.family != AF_INET6) {
423 NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
424 return -EINVAL;
425 }
426 if (x->id.proto != IPPROTO_ESP) {
427 NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
428 return -EINVAL;
429 }
430 if (x->encap) {
431 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
432 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is not supported");
433 return -EINVAL;
434 }
435
436 if (x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
437 NL_SET_ERR_MSG_MOD(extack, "Encapsulation other than UDP is not supported");
438 return -EINVAL;
439 }
440
441 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) {
442 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in packet offload mode only");
443 return -EINVAL;
444 }
445
446 if (x->props.mode != XFRM_MODE_TRANSPORT) {
447 NL_SET_ERR_MSG_MOD(extack, "Encapsulation is supported in transport mode only");
448 return -EINVAL;
449 }
450 }
451 if (!x->aead) {
452 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
453 return -EINVAL;
454 }
455 if (x->aead->alg_icv_len != 128) {
456 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
457 return -EINVAL;
458 }
459 if ((x->aead->alg_key_len != 128 + 32) &&
460 (x->aead->alg_key_len != 256 + 32)) {
461 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
462 return -EINVAL;
463 }
464 if (x->tfcpad) {
465 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
466 return -EINVAL;
467 }
468 if (!x->geniv) {
469 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
470 return -EINVAL;
471 }
472 if (strcmp(x->geniv, "seqiv")) {
473 NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
474 return -EINVAL;
475 }
476
477 if (x->sel.proto != IPPROTO_IP && x->sel.proto != IPPROTO_UDP &&
478 x->sel.proto != IPPROTO_TCP) {
479 NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
480 return -EINVAL;
481 }
482
483 if (x->props.mode != XFRM_MODE_TRANSPORT && x->props.mode != XFRM_MODE_TUNNEL) {
484 NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
485 return -EINVAL;
486 }
487
488 switch (x->xso.type) {
489 case XFRM_DEV_OFFLOAD_CRYPTO:
490 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
491 NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
492 return -EINVAL;
493 }
494
495 break;
496 case XFRM_DEV_OFFLOAD_PACKET:
497 if (!(mlx5_ipsec_device_caps(mdev) &
498 MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
499 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
500 return -EINVAL;
501 }
502
503 if (x->props.mode == XFRM_MODE_TUNNEL &&
504 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)) {
505 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported for tunnel mode");
506 return -EINVAL;
507 }
508
509 if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
510 x->replay_esn->replay_window != 32 &&
511 x->replay_esn->replay_window != 64 &&
512 x->replay_esn->replay_window != 128 &&
513 x->replay_esn->replay_window != 256) {
514 NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
515 return -EINVAL;
516 }
517
518 if (!x->props.reqid) {
519 NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
520 return -EINVAL;
521 }
522
523 if (x->lft.hard_byte_limit != XFRM_INF ||
524 x->lft.soft_byte_limit != XFRM_INF) {
525 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
526 return -EINVAL;
527 }
528
529 if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
530 x->lft.hard_packet_limit != XFRM_INF) {
531 /* XFRM stack doesn't prevent such configuration :(. */
532 NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
533 return -EINVAL;
534 }
535
536 if (!x->lft.soft_packet_limit || !x->lft.hard_packet_limit) {
537 NL_SET_ERR_MSG_MOD(extack, "Soft/hard packet limits can't be 0");
538 return -EINVAL;
539 }
540 break;
541 default:
542 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
543 return -EINVAL;
544 }
545 return 0;
546 }
547
mlx5e_ipsec_modify_state(struct work_struct * _work)548 static void mlx5e_ipsec_modify_state(struct work_struct *_work)
549 {
550 struct mlx5e_ipsec_work *work =
551 container_of(_work, struct mlx5e_ipsec_work, work);
552 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
553 struct mlx5_accel_esp_xfrm_attrs *attrs;
554
555 attrs = &((struct mlx5e_ipsec_sa_entry *)work->data)->attrs;
556
557 mlx5_accel_esp_modify_xfrm(sa_entry, attrs);
558 }
559
mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry * sa_entry)560 static void mlx5e_ipsec_set_esn_ops(struct mlx5e_ipsec_sa_entry *sa_entry)
561 {
562 struct xfrm_state *x = sa_entry->x;
563
564 if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO ||
565 x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
566 return;
567
568 if (x->props.flags & XFRM_STATE_ESN) {
569 sa_entry->set_iv_op = mlx5e_ipsec_set_iv_esn;
570 return;
571 }
572
573 sa_entry->set_iv_op = mlx5e_ipsec_set_iv;
574 }
575
mlx5e_ipsec_handle_netdev_event(struct work_struct * _work)576 static void mlx5e_ipsec_handle_netdev_event(struct work_struct *_work)
577 {
578 struct mlx5e_ipsec_work *work =
579 container_of(_work, struct mlx5e_ipsec_work, work);
580 struct mlx5e_ipsec_sa_entry *sa_entry = work->sa_entry;
581 struct mlx5e_ipsec_netevent_data *data = work->data;
582 struct mlx5_accel_esp_xfrm_attrs *attrs;
583
584 attrs = &sa_entry->attrs;
585
586 switch (attrs->dir) {
587 case XFRM_DEV_OFFLOAD_IN:
588 ether_addr_copy(attrs->smac, data->addr);
589 break;
590 case XFRM_DEV_OFFLOAD_OUT:
591 ether_addr_copy(attrs->dmac, data->addr);
592 break;
593 default:
594 WARN_ON_ONCE(true);
595 }
596 attrs->drop = false;
597 mlx5e_accel_ipsec_fs_modify(sa_entry);
598 }
599
mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry * sa_entry)600 static int mlx5_ipsec_create_work(struct mlx5e_ipsec_sa_entry *sa_entry)
601 {
602 struct xfrm_state *x = sa_entry->x;
603 struct mlx5e_ipsec_work *work;
604 void *data = NULL;
605
606 switch (x->xso.type) {
607 case XFRM_DEV_OFFLOAD_CRYPTO:
608 if (!(x->props.flags & XFRM_STATE_ESN))
609 return 0;
610 break;
611 case XFRM_DEV_OFFLOAD_PACKET:
612 if (x->props.mode != XFRM_MODE_TUNNEL)
613 return 0;
614 break;
615 default:
616 break;
617 }
618
619 work = kzalloc(sizeof(*work), GFP_KERNEL);
620 if (!work)
621 return -ENOMEM;
622
623 switch (x->xso.type) {
624 case XFRM_DEV_OFFLOAD_CRYPTO:
625 data = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
626 if (!data)
627 goto free_work;
628
629 INIT_WORK(&work->work, mlx5e_ipsec_modify_state);
630 break;
631 case XFRM_DEV_OFFLOAD_PACKET:
632 data = kzalloc(sizeof(struct mlx5e_ipsec_netevent_data),
633 GFP_KERNEL);
634 if (!data)
635 goto free_work;
636
637 INIT_WORK(&work->work, mlx5e_ipsec_handle_netdev_event);
638 break;
639 default:
640 break;
641 }
642
643 work->data = data;
644 work->sa_entry = sa_entry;
645 sa_entry->work = work;
646 return 0;
647
648 free_work:
649 kfree(work);
650 return -ENOMEM;
651 }
652
mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry * sa_entry)653 static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
654 {
655 struct xfrm_state *x = sa_entry->x;
656 struct mlx5e_ipsec_dwork *dwork;
657
658 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
659 return 0;
660
661 if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
662 return 0;
663
664 if (x->lft.soft_packet_limit == XFRM_INF &&
665 x->lft.hard_packet_limit == XFRM_INF)
666 return 0;
667
668 dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
669 if (!dwork)
670 return -ENOMEM;
671
672 dwork->sa_entry = sa_entry;
673 INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
674 sa_entry->dwork = dwork;
675 return 0;
676 }
677
mlx5e_xfrm_add_state(struct xfrm_state * x,struct netlink_ext_ack * extack)678 static int mlx5e_xfrm_add_state(struct xfrm_state *x,
679 struct netlink_ext_ack *extack)
680 {
681 struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
682 struct net_device *netdev = x->xso.real_dev;
683 struct mlx5e_ipsec *ipsec;
684 struct mlx5e_priv *priv;
685 gfp_t gfp;
686 int err;
687
688 priv = netdev_priv(netdev);
689 if (!priv->ipsec)
690 return -EOPNOTSUPP;
691
692 ipsec = priv->ipsec;
693 gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
694 sa_entry = kzalloc(sizeof(*sa_entry), gfp);
695 if (!sa_entry)
696 return -ENOMEM;
697
698 sa_entry->x = x;
699 sa_entry->ipsec = ipsec;
700 /* Check if this SA is originated from acquire flow temporary SA */
701 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
702 goto out;
703
704 err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
705 if (err)
706 goto err_xfrm;
707
708 if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
709 err = -EBUSY;
710 goto err_xfrm;
711 }
712
713 /* check esn */
714 if (x->props.flags & XFRM_STATE_ESN)
715 mlx5e_ipsec_update_esn_state(sa_entry);
716
717 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs);
718
719 err = mlx5_ipsec_create_work(sa_entry);
720 if (err)
721 goto unblock_ipsec;
722
723 err = mlx5e_ipsec_create_dwork(sa_entry);
724 if (err)
725 goto release_work;
726
727 /* create hw context */
728 err = mlx5_ipsec_create_sa_ctx(sa_entry);
729 if (err)
730 goto release_dwork;
731
732 err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
733 if (err)
734 goto err_hw_ctx;
735
736 if (x->props.mode == XFRM_MODE_TUNNEL &&
737 x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
738 !mlx5e_ipsec_fs_tunnel_enabled(sa_entry)) {
739 NL_SET_ERR_MSG_MOD(extack, "Packet offload tunnel mode is disabled due to encap settings");
740 err = -EINVAL;
741 goto err_add_rule;
742 }
743
744 /* We use *_bh() variant because xfrm_timer_handler(), which runs
745 * in softirq context, can reach our state delete logic and we need
746 * xa_erase_bh() there.
747 */
748 err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
749 GFP_KERNEL);
750 if (err)
751 goto err_add_rule;
752
753 mlx5e_ipsec_set_esn_ops(sa_entry);
754
755 if (sa_entry->dwork)
756 queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork,
757 MLX5_IPSEC_RESCHED);
758
759 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET &&
760 x->props.mode == XFRM_MODE_TUNNEL)
761 xa_set_mark(&ipsec->sadb, sa_entry->ipsec_obj_id,
762 MLX5E_IPSEC_TUNNEL_SA);
763
764 out:
765 x->xso.offload_handle = (unsigned long)sa_entry;
766 return 0;
767
768 err_add_rule:
769 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
770 err_hw_ctx:
771 mlx5_ipsec_free_sa_ctx(sa_entry);
772 release_dwork:
773 kfree(sa_entry->dwork);
774 release_work:
775 if (sa_entry->work)
776 kfree(sa_entry->work->data);
777 kfree(sa_entry->work);
778 unblock_ipsec:
779 mlx5_eswitch_unblock_ipsec(priv->mdev);
780 err_xfrm:
781 kfree(sa_entry);
782 NL_SET_ERR_MSG_WEAK_MOD(extack, "Device failed to offload this state");
783 return err;
784 }
785
mlx5e_xfrm_del_state(struct xfrm_state * x)786 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
787 {
788 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
789 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
790 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
791 struct mlx5e_ipsec_sa_entry *old;
792
793 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
794 return;
795
796 old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
797 WARN_ON(old != sa_entry);
798
799 if (attrs->mode == XFRM_MODE_TUNNEL &&
800 attrs->type == XFRM_DEV_OFFLOAD_PACKET)
801 /* Make sure that no ARP requests are running in parallel */
802 flush_workqueue(ipsec->wq);
803
804 }
805
mlx5e_xfrm_free_state(struct xfrm_state * x)806 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
807 {
808 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
809 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
810
811 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
812 goto sa_entry_free;
813
814 if (sa_entry->work)
815 cancel_work_sync(&sa_entry->work->work);
816
817 if (sa_entry->dwork)
818 cancel_delayed_work_sync(&sa_entry->dwork->dwork);
819
820 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
821 mlx5_ipsec_free_sa_ctx(sa_entry);
822 kfree(sa_entry->dwork);
823 if (sa_entry->work)
824 kfree(sa_entry->work->data);
825 kfree(sa_entry->work);
826 mlx5_eswitch_unblock_ipsec(ipsec->mdev);
827 sa_entry_free:
828 kfree(sa_entry);
829 }
830
mlx5e_ipsec_netevent_event(struct notifier_block * nb,unsigned long event,void * ptr)831 static int mlx5e_ipsec_netevent_event(struct notifier_block *nb,
832 unsigned long event, void *ptr)
833 {
834 struct mlx5_accel_esp_xfrm_attrs *attrs;
835 struct mlx5e_ipsec_netevent_data *data;
836 struct mlx5e_ipsec_sa_entry *sa_entry;
837 struct mlx5e_ipsec *ipsec;
838 struct neighbour *n = ptr;
839 struct net_device *netdev;
840 struct xfrm_state *x;
841 unsigned long idx;
842
843 if (event != NETEVENT_NEIGH_UPDATE || !(n->nud_state & NUD_VALID))
844 return NOTIFY_DONE;
845
846 ipsec = container_of(nb, struct mlx5e_ipsec, netevent_nb);
847 xa_for_each_marked(&ipsec->sadb, idx, sa_entry, MLX5E_IPSEC_TUNNEL_SA) {
848 attrs = &sa_entry->attrs;
849
850 if (attrs->family == AF_INET) {
851 if (!neigh_key_eq32(n, &attrs->saddr.a4) &&
852 !neigh_key_eq32(n, &attrs->daddr.a4))
853 continue;
854 } else {
855 if (!neigh_key_eq128(n, &attrs->saddr.a4) &&
856 !neigh_key_eq128(n, &attrs->daddr.a4))
857 continue;
858 }
859
860 x = sa_entry->x;
861 netdev = x->xso.real_dev;
862 data = sa_entry->work->data;
863
864 neigh_ha_snapshot(data->addr, n, netdev);
865 queue_work(ipsec->wq, &sa_entry->work->work);
866 }
867
868 return NOTIFY_DONE;
869 }
870
mlx5e_ipsec_init(struct mlx5e_priv * priv)871 void mlx5e_ipsec_init(struct mlx5e_priv *priv)
872 {
873 struct mlx5e_ipsec *ipsec;
874 int ret = -ENOMEM;
875
876 if (!mlx5_ipsec_device_caps(priv->mdev)) {
877 netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
878 return;
879 }
880
881 ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
882 if (!ipsec)
883 return;
884
885 xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
886 ipsec->mdev = priv->mdev;
887 ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
888 priv->netdev->name);
889 if (!ipsec->wq)
890 goto err_wq;
891
892 if (mlx5_ipsec_device_caps(priv->mdev) &
893 MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
894 ret = mlx5e_ipsec_aso_init(ipsec);
895 if (ret)
896 goto err_aso;
897 }
898
899 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL) {
900 ipsec->netevent_nb.notifier_call = mlx5e_ipsec_netevent_event;
901 ret = register_netevent_notifier(&ipsec->netevent_nb);
902 if (ret)
903 goto clear_aso;
904 }
905
906 ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
907 ret = mlx5e_accel_ipsec_fs_init(ipsec);
908 if (ret)
909 goto err_fs_init;
910
911 ipsec->fs = priv->fs;
912 priv->ipsec = ipsec;
913 netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
914 return;
915
916 err_fs_init:
917 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_TUNNEL)
918 unregister_netevent_notifier(&ipsec->netevent_nb);
919 clear_aso:
920 if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
921 mlx5e_ipsec_aso_cleanup(ipsec);
922 err_aso:
923 destroy_workqueue(ipsec->wq);
924 err_wq:
925 kfree(ipsec);
926 mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
927 return;
928 }
929
mlx5e_ipsec_cleanup(struct mlx5e_priv * priv)930 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
931 {
932 struct mlx5e_ipsec *ipsec = priv->ipsec;
933
934 if (!ipsec)
935 return;
936
937 mlx5e_accel_ipsec_fs_cleanup(ipsec);
938 if (ipsec->netevent_nb.notifier_call) {
939 unregister_netevent_notifier(&ipsec->netevent_nb);
940 ipsec->netevent_nb.notifier_call = NULL;
941 }
942 if (ipsec->aso)
943 mlx5e_ipsec_aso_cleanup(ipsec);
944 destroy_workqueue(ipsec->wq);
945 kfree(ipsec);
946 priv->ipsec = NULL;
947 }
948
mlx5e_ipsec_offload_ok(struct sk_buff * skb,struct xfrm_state * x)949 static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
950 {
951 if (x->props.family == AF_INET) {
952 /* Offload with IPv4 options is not supported yet */
953 if (ip_hdr(skb)->ihl > 5)
954 return false;
955 } else {
956 /* Offload with IPv6 extension headers is not support yet */
957 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
958 return false;
959 }
960
961 return true;
962 }
963
mlx5e_xfrm_advance_esn_state(struct xfrm_state * x)964 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
965 {
966 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
967 struct mlx5e_ipsec_work *work = sa_entry->work;
968 struct mlx5e_ipsec_sa_entry *sa_entry_shadow;
969 bool need_update;
970
971 need_update = mlx5e_ipsec_update_esn_state(sa_entry);
972 if (!need_update)
973 return;
974
975 sa_entry_shadow = work->data;
976 memset(sa_entry_shadow, 0x00, sizeof(*sa_entry_shadow));
977 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry_shadow->attrs);
978 queue_work(sa_entry->ipsec->wq, &work->work);
979 }
980
mlx5e_xfrm_update_curlft(struct xfrm_state * x)981 static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
982 {
983 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
984 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
985 u64 packets, bytes, lastuse;
986
987 lockdep_assert(lockdep_is_held(&x->lock) ||
988 lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
989
990 if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
991 return;
992
993 mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
994 x->curlft.packets += packets;
995 x->curlft.bytes += bytes;
996 }
997
mlx5e_xfrm_validate_policy(struct mlx5_core_dev * mdev,struct xfrm_policy * x,struct netlink_ext_ack * extack)998 static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
999 struct xfrm_policy *x,
1000 struct netlink_ext_ack *extack)
1001 {
1002 struct xfrm_selector *sel = &x->selector;
1003
1004 if (x->type != XFRM_POLICY_TYPE_MAIN) {
1005 NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
1006 return -EINVAL;
1007 }
1008
1009 /* Please pay attention that we support only one template */
1010 if (x->xfrm_nr > 1) {
1011 NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
1012 return -EINVAL;
1013 }
1014
1015 if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
1016 x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
1017 NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
1018 return -EINVAL;
1019 }
1020
1021 if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
1022 addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
1023 NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
1024 return -EINVAL;
1025 }
1026
1027 if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
1028 NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
1029 return -EINVAL;
1030 }
1031
1032 if (x->selector.proto != IPPROTO_IP &&
1033 x->selector.proto != IPPROTO_UDP &&
1034 x->selector.proto != IPPROTO_TCP) {
1035 NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than TCP/UDP");
1036 return -EINVAL;
1037 }
1038
1039 if (x->priority) {
1040 if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
1041 NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
1042 return -EINVAL;
1043 }
1044
1045 if (x->priority == U32_MAX) {
1046 NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
1047 return -EINVAL;
1048 }
1049 }
1050
1051 if (x->xdo.type == XFRM_DEV_OFFLOAD_PACKET &&
1052 !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
1053 NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
1054 return -EINVAL;
1055 }
1056
1057 return 0;
1058 }
1059
1060 static void
mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry * pol_entry,struct mlx5_accel_pol_xfrm_attrs * attrs)1061 mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
1062 struct mlx5_accel_pol_xfrm_attrs *attrs)
1063 {
1064 struct xfrm_policy *x = pol_entry->x;
1065 struct xfrm_selector *sel;
1066
1067 sel = &x->selector;
1068 memset(attrs, 0, sizeof(*attrs));
1069
1070 memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
1071 memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
1072 attrs->family = sel->family;
1073 attrs->dir = x->xdo.dir;
1074 attrs->action = x->action;
1075 attrs->type = XFRM_DEV_OFFLOAD_PACKET;
1076 attrs->reqid = x->xfrm_vec[0].reqid;
1077 attrs->upspec.dport = ntohs(sel->dport);
1078 attrs->upspec.dport_mask = ntohs(sel->dport_mask);
1079 attrs->upspec.sport = ntohs(sel->sport);
1080 attrs->upspec.sport_mask = ntohs(sel->sport_mask);
1081 attrs->upspec.proto = sel->proto;
1082 attrs->prio = x->priority;
1083 }
1084
mlx5e_xfrm_add_policy(struct xfrm_policy * x,struct netlink_ext_ack * extack)1085 static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
1086 struct netlink_ext_ack *extack)
1087 {
1088 struct net_device *netdev = x->xdo.real_dev;
1089 struct mlx5e_ipsec_pol_entry *pol_entry;
1090 struct mlx5e_priv *priv;
1091 int err;
1092
1093 priv = netdev_priv(netdev);
1094 if (!priv->ipsec) {
1095 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
1096 return -EOPNOTSUPP;
1097 }
1098
1099 err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
1100 if (err)
1101 return err;
1102
1103 pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
1104 if (!pol_entry)
1105 return -ENOMEM;
1106
1107 pol_entry->x = x;
1108 pol_entry->ipsec = priv->ipsec;
1109
1110 if (!mlx5_eswitch_block_ipsec(priv->mdev)) {
1111 err = -EBUSY;
1112 goto ipsec_busy;
1113 }
1114
1115 mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
1116 err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
1117 if (err)
1118 goto err_fs;
1119
1120 x->xdo.offload_handle = (unsigned long)pol_entry;
1121 return 0;
1122
1123 err_fs:
1124 mlx5_eswitch_unblock_ipsec(priv->mdev);
1125 ipsec_busy:
1126 kfree(pol_entry);
1127 NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
1128 return err;
1129 }
1130
mlx5e_xfrm_del_policy(struct xfrm_policy * x)1131 static void mlx5e_xfrm_del_policy(struct xfrm_policy *x)
1132 {
1133 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1134
1135 mlx5e_accel_ipsec_fs_del_pol(pol_entry);
1136 mlx5_eswitch_unblock_ipsec(pol_entry->ipsec->mdev);
1137 }
1138
mlx5e_xfrm_free_policy(struct xfrm_policy * x)1139 static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
1140 {
1141 struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
1142
1143 kfree(pol_entry);
1144 }
1145
1146 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
1147 .xdo_dev_state_add = mlx5e_xfrm_add_state,
1148 .xdo_dev_state_delete = mlx5e_xfrm_del_state,
1149 .xdo_dev_state_free = mlx5e_xfrm_free_state,
1150 .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
1151 .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
1152
1153 .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
1154 .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
1155 .xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
1156 .xdo_dev_policy_free = mlx5e_xfrm_free_policy,
1157 };
1158
mlx5e_ipsec_build_netdev(struct mlx5e_priv * priv)1159 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1160 {
1161 struct mlx5_core_dev *mdev = priv->mdev;
1162 struct net_device *netdev = priv->netdev;
1163
1164 if (!mlx5_ipsec_device_caps(mdev))
1165 return;
1166
1167 mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
1168
1169 netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
1170 netdev->features |= NETIF_F_HW_ESP;
1171 netdev->hw_enc_features |= NETIF_F_HW_ESP;
1172
1173 if (!MLX5_CAP_ETH(mdev, swp_csum)) {
1174 mlx5_core_dbg(mdev, "mlx5e: SWP checksum not supported\n");
1175 return;
1176 }
1177
1178 netdev->features |= NETIF_F_HW_ESP_TX_CSUM;
1179 netdev->hw_enc_features |= NETIF_F_HW_ESP_TX_CSUM;
1180
1181 if (!MLX5_CAP_ETH(mdev, swp_lso)) {
1182 mlx5_core_dbg(mdev, "mlx5e: ESP LSO not supported\n");
1183 return;
1184 }
1185
1186 netdev->gso_partial_features |= NETIF_F_GSO_ESP;
1187 mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
1188 netdev->features |= NETIF_F_GSO_ESP;
1189 netdev->hw_features |= NETIF_F_GSO_ESP;
1190 netdev->hw_enc_features |= NETIF_F_GSO_ESP;
1191 }
1192