1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <crypto/aead.h>
35 #include <net/xfrm.h>
36 #include <net/esp.h>
37 #include "accel/ipsec_offload.h"
38 #include "en_accel/ipsec_rxtx.h"
39 #include "en_accel/ipsec.h"
40 #include "accel/accel.h"
41 #include "en.h"
42 
43 enum {
44 	MLX5E_IPSEC_RX_SYNDROME_DECRYPTED = 0x11,
45 	MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED = 0x12,
46 	MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO = 0x17,
47 };
48 
49 struct mlx5e_ipsec_rx_metadata {
50 	unsigned char   nexthdr;
51 	__be32		sa_handle;
52 } __packed;
53 
54 enum {
55 	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD = 0x8,
56 	MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP = 0x9,
57 };
58 
59 struct mlx5e_ipsec_tx_metadata {
60 	__be16 mss_inv;         /* 1/MSS in 16bit fixed point, only for LSO */
61 	__be16 seq;             /* LSBs of the first TCP seq, only for LSO */
62 	u8     esp_next_proto;  /* Next protocol of ESP */
63 } __packed;
64 
65 struct mlx5e_ipsec_metadata {
66 	unsigned char syndrome;
67 	union {
68 		unsigned char raw[5];
69 		/* from FPGA to host, on successful decrypt */
70 		struct mlx5e_ipsec_rx_metadata rx;
71 		/* from host to FPGA */
72 		struct mlx5e_ipsec_tx_metadata tx;
73 	} __packed content;
74 	/* packet type ID field	*/
75 	__be16 ethertype;
76 } __packed;
77 
78 #define MAX_LSO_MSS 2048
79 
80 /* Pre-calculated (Q0.16) fixed-point inverse 1/x function */
81 static __be16 mlx5e_ipsec_inverse_table[MAX_LSO_MSS];
82 
83 static inline __be16 mlx5e_ipsec_mss_inv(struct sk_buff *skb)
84 {
85 	return mlx5e_ipsec_inverse_table[skb_shinfo(skb)->gso_size];
86 }
87 
88 static struct mlx5e_ipsec_metadata *mlx5e_ipsec_add_metadata(struct sk_buff *skb)
89 {
90 	struct mlx5e_ipsec_metadata *mdata;
91 	struct ethhdr *eth;
92 
93 	if (unlikely(skb_cow_head(skb, sizeof(*mdata))))
94 		return ERR_PTR(-ENOMEM);
95 
96 	eth = (struct ethhdr *)skb_push(skb, sizeof(*mdata));
97 	skb->mac_header -= sizeof(*mdata);
98 	mdata = (struct mlx5e_ipsec_metadata *)(eth + 1);
99 
100 	memmove(skb->data, skb->data + sizeof(*mdata),
101 		2 * ETH_ALEN);
102 
103 	eth->h_proto = cpu_to_be16(MLX5E_METADATA_ETHER_TYPE);
104 
105 	memset(mdata->content.raw, 0, sizeof(mdata->content.raw));
106 	return mdata;
107 }
108 
109 static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
110 {
111 	unsigned int alen = crypto_aead_authsize(x->data);
112 	struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
113 	struct iphdr *ipv4hdr = ip_hdr(skb);
114 	unsigned int trailer_len;
115 	u8 plen;
116 	int ret;
117 
118 	ret = skb_copy_bits(skb, skb->len - alen - 2, &plen, 1);
119 	if (unlikely(ret))
120 		return ret;
121 
122 	trailer_len = alen + plen + 2;
123 
124 	pskb_trim(skb, skb->len - trailer_len);
125 	if (skb->protocol == htons(ETH_P_IP)) {
126 		ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
127 		ip_send_check(ipv4hdr);
128 	} else {
129 		ipv6hdr->payload_len = htons(ntohs(ipv6hdr->payload_len) -
130 					     trailer_len);
131 	}
132 	return 0;
133 }
134 
135 static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
136 				struct mlx5_wqe_eth_seg *eseg, u8 mode,
137 				struct xfrm_offload *xo)
138 {
139 	/* Tunnel Mode:
140 	 * SWP:      OutL3       InL3  InL4
141 	 * Pkt: MAC  IP     ESP  IP    L4
142 	 *
143 	 * Transport Mode:
144 	 * SWP:      OutL3       InL4
145 	 *           InL3
146 	 * Pkt: MAC  IP     ESP  L4
147 	 *
148 	 * Tunnel(VXLAN TCP/UDP) over Transport Mode
149 	 * SWP:      OutL3                   InL3  InL4
150 	 * Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
151 	 */
152 
153 	/* Shared settings */
154 	eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
155 	if (skb->protocol == htons(ETH_P_IPV6))
156 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
157 
158 	/* Tunnel mode */
159 	if (mode == XFRM_MODE_TUNNEL) {
160 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
161 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
162 		if (xo->proto == IPPROTO_IPV6)
163 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
164 		if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
165 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
166 		return;
167 	}
168 
169 	/* Transport mode */
170 	if (mode != XFRM_MODE_TRANSPORT)
171 		return;
172 
173 	if (!xo->inner_ipproto) {
174 		eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
175 		eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
176 		if (skb->protocol == htons(ETH_P_IPV6))
177 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
178 		if (xo->proto == IPPROTO_UDP)
179 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
180 		return;
181 	}
182 
183 	/* Tunnel(VXLAN TCP/UDP) over Transport Mode */
184 	switch (xo->inner_ipproto) {
185 	case IPPROTO_UDP:
186 		eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
187 		fallthrough;
188 	case IPPROTO_TCP:
189 		eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
190 		eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
191 		if (skb->protocol == htons(ETH_P_IPV6))
192 			eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
193 		break;
194 	default:
195 		break;
196 	}
197 
198 	return;
199 }
200 
201 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
202 			    struct xfrm_offload *xo)
203 {
204 	struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
205 	__u32 oseq = replay_esn->oseq;
206 	int iv_offset;
207 	__be64 seqno;
208 	u32 seq_hi;
209 
210 	if (unlikely(skb_is_gso(skb) && oseq < MLX5E_IPSEC_ESN_SCOPE_MID &&
211 		     MLX5E_IPSEC_ESN_SCOPE_MID < (oseq - skb_shinfo(skb)->gso_segs))) {
212 		seq_hi = xo->seq.hi - 1;
213 	} else {
214 		seq_hi = xo->seq.hi;
215 	}
216 
217 	/* Place the SN in the IV field */
218 	seqno = cpu_to_be64(xo->seq.low + ((u64)seq_hi << 32));
219 	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
220 	skb_store_bits(skb, iv_offset, &seqno, 8);
221 }
222 
223 void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
224 			struct xfrm_offload *xo)
225 {
226 	int iv_offset;
227 	__be64 seqno;
228 
229 	/* Place the SN in the IV field */
230 	seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
231 	iv_offset = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr);
232 	skb_store_bits(skb, iv_offset, &seqno, 8);
233 }
234 
235 static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
236 				     struct mlx5e_ipsec_metadata *mdata,
237 				     struct xfrm_offload *xo)
238 {
239 	struct ip_esp_hdr *esph;
240 	struct tcphdr *tcph;
241 
242 	if (skb_is_gso(skb)) {
243 		/* Add LSO metadata indication */
244 		esph = ip_esp_hdr(skb);
245 		tcph = inner_tcp_hdr(skb);
246 		netdev_dbg(skb->dev, "   Offloading GSO packet outer L3 %u; L4 %u; Inner L3 %u; L4 %u\n",
247 			   skb->network_header,
248 			   skb->transport_header,
249 			   skb->inner_network_header,
250 			   skb->inner_transport_header);
251 		netdev_dbg(skb->dev, "   Offloading GSO packet of len %u; mss %u; TCP sp %u dp %u seq 0x%x ESP seq 0x%x\n",
252 			   skb->len, skb_shinfo(skb)->gso_size,
253 			   ntohs(tcph->source), ntohs(tcph->dest),
254 			   ntohl(tcph->seq), ntohl(esph->seq_no));
255 		mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD_WITH_LSO_TCP;
256 		mdata->content.tx.mss_inv = mlx5e_ipsec_mss_inv(skb);
257 		mdata->content.tx.seq = htons(ntohl(tcph->seq) & 0xFFFF);
258 	} else {
259 		mdata->syndrome = MLX5E_IPSEC_TX_SYNDROME_OFFLOAD;
260 	}
261 	mdata->content.tx.esp_next_proto = xo->proto;
262 
263 	netdev_dbg(skb->dev, "   TX metadata syndrome %u proto %u mss_inv %04x seq %04x\n",
264 		   mdata->syndrome, mdata->content.tx.esp_next_proto,
265 		   ntohs(mdata->content.tx.mss_inv),
266 		   ntohs(mdata->content.tx.seq));
267 }
268 
269 void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
270 			       struct mlx5e_accel_tx_ipsec_state *ipsec_st,
271 			       struct mlx5_wqe_inline_seg *inlseg)
272 {
273 	inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
274 	esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
275 }
276 
277 static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
278 				 struct sk_buff *skb,
279 				 struct xfrm_state *x,
280 				 struct xfrm_offload *xo,
281 				 struct mlx5e_accel_tx_ipsec_state *ipsec_st)
282 {
283 	unsigned int blksize, clen, alen, plen;
284 	struct crypto_aead *aead;
285 	unsigned int tailen;
286 
287 	ipsec_st->x = x;
288 	ipsec_st->xo = xo;
289 	if (mlx5_is_ipsec_device(priv->mdev)) {
290 		aead = x->data;
291 		alen = crypto_aead_authsize(aead);
292 		blksize = ALIGN(crypto_aead_blocksize(aead), 4);
293 		clen = ALIGN(skb->len + 2, blksize);
294 		plen = max_t(u32, clen - skb->len, 4);
295 		tailen = plen + alen;
296 		ipsec_st->plen = plen;
297 		ipsec_st->tailen = tailen;
298 	}
299 
300 	return 0;
301 }
302 
303 void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
304 			       struct mlx5_wqe_eth_seg *eseg)
305 {
306 	struct xfrm_offload *xo = xfrm_offload(skb);
307 	struct xfrm_encap_tmpl  *encap;
308 	struct xfrm_state *x;
309 	struct sec_path *sp;
310 	u8 l3_proto;
311 
312 	sp = skb_sec_path(skb);
313 	if (unlikely(sp->len != 1))
314 		return;
315 
316 	x = xfrm_input_state(skb);
317 	if (unlikely(!x))
318 		return;
319 
320 	if (unlikely(!x->xso.offload_handle ||
321 		     (skb->protocol != htons(ETH_P_IP) &&
322 		      skb->protocol != htons(ETH_P_IPV6))))
323 		return;
324 
325 	mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
326 
327 	l3_proto = (x->props.family == AF_INET) ?
328 		   ((struct iphdr *)skb_network_header(skb))->protocol :
329 		   ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
330 
331 	if (mlx5_is_ipsec_device(priv->mdev)) {
332 		eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
333 		eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
334 		encap = x->encap;
335 		if (!encap) {
336 			eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
337 				cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
338 				cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
339 		} else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
340 			eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
341 				cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
342 				cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
343 		}
344 	}
345 }
346 
347 bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
348 			       struct sk_buff *skb,
349 			       struct mlx5e_accel_tx_ipsec_state *ipsec_st)
350 {
351 	struct mlx5e_priv *priv = netdev_priv(netdev);
352 	struct xfrm_offload *xo = xfrm_offload(skb);
353 	struct mlx5e_ipsec_sa_entry *sa_entry;
354 	struct mlx5e_ipsec_metadata *mdata;
355 	struct xfrm_state *x;
356 	struct sec_path *sp;
357 
358 	sp = skb_sec_path(skb);
359 	if (unlikely(sp->len != 1)) {
360 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
361 		goto drop;
362 	}
363 
364 	x = xfrm_input_state(skb);
365 	if (unlikely(!x)) {
366 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_no_state);
367 		goto drop;
368 	}
369 
370 	if (unlikely(!x->xso.offload_handle ||
371 		     (skb->protocol != htons(ETH_P_IP) &&
372 		      skb->protocol != htons(ETH_P_IPV6)))) {
373 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_not_ip);
374 		goto drop;
375 	}
376 
377 	if (!skb_is_gso(skb))
378 		if (unlikely(mlx5e_ipsec_remove_trailer(skb, x))) {
379 			atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
380 			goto drop;
381 		}
382 
383 	if (MLX5_CAP_GEN(priv->mdev, fpga)) {
384 		mdata = mlx5e_ipsec_add_metadata(skb);
385 		if (IS_ERR(mdata)) {
386 			atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
387 			goto drop;
388 		}
389 	}
390 
391 	sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
392 	sa_entry->set_iv_op(skb, x, xo);
393 	if (MLX5_CAP_GEN(priv->mdev, fpga))
394 		mlx5e_ipsec_set_metadata(skb, mdata, xo);
395 
396 	mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
397 
398 	return true;
399 
400 drop:
401 	kfree_skb(skb);
402 	return false;
403 }
404 
405 static inline struct xfrm_state *
406 mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb,
407 		     struct mlx5e_ipsec_metadata *mdata)
408 {
409 	struct mlx5e_priv *priv = netdev_priv(netdev);
410 	struct xfrm_offload *xo;
411 	struct xfrm_state *xs;
412 	struct sec_path *sp;
413 	u32 sa_handle;
414 
415 	sp = secpath_set(skb);
416 	if (unlikely(!sp)) {
417 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
418 		return NULL;
419 	}
420 
421 	sa_handle = be32_to_cpu(mdata->content.rx.sa_handle);
422 	xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
423 	if (unlikely(!xs)) {
424 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
425 		return NULL;
426 	}
427 
428 	sp = skb_sec_path(skb);
429 	sp->xvec[sp->len++] = xs;
430 	sp->olen++;
431 
432 	xo = xfrm_offload(skb);
433 	xo->flags = CRYPTO_DONE;
434 	switch (mdata->syndrome) {
435 	case MLX5E_IPSEC_RX_SYNDROME_DECRYPTED:
436 		xo->status = CRYPTO_SUCCESS;
437 		if (likely(priv->ipsec->no_trailer)) {
438 			xo->flags |= XFRM_ESP_NO_TRAILER;
439 			xo->proto = mdata->content.rx.nexthdr;
440 		}
441 		break;
442 	case MLX5E_IPSEC_RX_SYNDROME_AUTH_FAILED:
443 		xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
444 		break;
445 	case MLX5E_IPSEC_RX_SYNDROME_BAD_PROTO:
446 		xo->status = CRYPTO_INVALID_PROTOCOL;
447 		break;
448 	default:
449 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
450 		return NULL;
451 	}
452 	return xs;
453 }
454 
455 struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
456 					  struct sk_buff *skb, u32 *cqe_bcnt)
457 {
458 	struct mlx5e_ipsec_metadata *mdata;
459 	struct xfrm_state *xs;
460 
461 	if (!is_metadata_hdr_valid(skb))
462 		return skb;
463 
464 	/* Use the metadata */
465 	mdata = (struct mlx5e_ipsec_metadata *)(skb->data + ETH_HLEN);
466 	xs = mlx5e_ipsec_build_sp(netdev, skb, mdata);
467 	if (unlikely(!xs)) {
468 		kfree_skb(skb);
469 		return NULL;
470 	}
471 
472 	remove_metadata_hdr(skb);
473 	*cqe_bcnt -= MLX5E_METADATA_ETHER_LEN;
474 
475 	return skb;
476 }
477 
478 enum {
479 	MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
480 	MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
481 	MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
482 };
483 
484 void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
485 				       struct sk_buff *skb,
486 				       struct mlx5_cqe64 *cqe)
487 {
488 	u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
489 	struct mlx5e_priv *priv;
490 	struct xfrm_offload *xo;
491 	struct xfrm_state *xs;
492 	struct sec_path *sp;
493 	u32  sa_handle;
494 
495 	sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
496 	priv = netdev_priv(netdev);
497 	sp = secpath_set(skb);
498 	if (unlikely(!sp)) {
499 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
500 		return;
501 	}
502 
503 	xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
504 	if (unlikely(!xs)) {
505 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
506 		return;
507 	}
508 
509 	sp = skb_sec_path(skb);
510 	sp->xvec[sp->len++] = xs;
511 	sp->olen++;
512 
513 	xo = xfrm_offload(skb);
514 	xo->flags = CRYPTO_DONE;
515 
516 	switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
517 	case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
518 		xo->status = CRYPTO_SUCCESS;
519 		if (WARN_ON_ONCE(priv->ipsec->no_trailer))
520 			xo->flags |= XFRM_ESP_NO_TRAILER;
521 		break;
522 	case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
523 		xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
524 		break;
525 	case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
526 		xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
527 		break;
528 	default:
529 		atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
530 	}
531 }
532 
533 void mlx5e_ipsec_build_inverse_table(void)
534 {
535 	u16 mss_inv;
536 	u32 mss;
537 
538 	/* Calculate 1/x inverse table for use in GSO data path.
539 	 * Using this table, we provide the IPSec accelerator with the value of
540 	 * 1/gso_size so that it can infer the position of each segment inside
541 	 * the GSO, and increment the ESP sequence number, and generate the IV.
542 	 * The HW needs this value in Q0.16 fixed-point number format
543 	 */
544 	mlx5e_ipsec_inverse_table[1] = htons(0xFFFF);
545 	for (mss = 2; mss < MAX_LSO_MSS; mss++) {
546 		mss_inv = div_u64(1ULL << 32, mss) >> 16;
547 		mlx5e_ipsec_inverse_table[mss] = htons(mss_inv);
548 	}
549 }
550