1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include <net/inet6_hashtables.h>
5 #include "en_accel/en_accel.h"
6 #include "en_accel/tls.h"
7 #include "en_accel/ktls_txrx.h"
8 #include "en_accel/ktls_utils.h"
9 #include "en_accel/fs_tcp.h"
10 
11 struct accel_rule {
12 	struct work_struct work;
13 	struct mlx5e_priv *priv;
14 	struct mlx5_flow_handle *rule;
15 };
16 
17 #define PROGRESS_PARAMS_WRITE_UNIT	64
18 #define PROGRESS_PARAMS_PADDED_SIZE	\
19 		(ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \
20 		       PROGRESS_PARAMS_WRITE_UNIT))
21 
22 struct mlx5e_ktls_rx_resync_buf {
23 	union {
24 		struct mlx5_wqe_tls_progress_params_seg progress;
25 		u8 pad[PROGRESS_PARAMS_PADDED_SIZE];
26 	} ____cacheline_aligned_in_smp;
27 	dma_addr_t dma_addr;
28 	struct mlx5e_ktls_offload_context_rx *priv_rx;
29 };
30 
31 enum {
32 	MLX5E_PRIV_RX_FLAG_DELETING,
33 	MLX5E_NUM_PRIV_RX_FLAGS,
34 };
35 
36 struct mlx5e_ktls_rx_resync_ctx {
37 	struct tls_offload_resync_async core;
38 	struct work_struct work;
39 	struct mlx5e_priv *priv;
40 	refcount_t refcnt;
41 	__be64 sw_rcd_sn_be;
42 	u32 seq;
43 };
44 
45 struct mlx5e_ktls_offload_context_rx {
46 	struct tls12_crypto_info_aes_gcm_128 crypto_info;
47 	struct accel_rule rule;
48 	struct sock *sk;
49 	struct mlx5e_rq_stats *stats;
50 	struct completion add_ctx;
51 	u32 tirn;
52 	u32 key_id;
53 	u32 rxq;
54 	DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
55 
56 	/* resync */
57 	struct mlx5e_ktls_rx_resync_ctx resync;
58 };
59 
60 static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
61 {
62 	int err, inlen;
63 	void *tirc;
64 	u32 *in;
65 
66 	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
67 	in = kvzalloc(inlen, GFP_KERNEL);
68 	if (!in)
69 		return -ENOMEM;
70 
71 	tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
72 
73 	MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
74 	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
75 	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
76 	MLX5_SET(tirc, tirc, indirect_table, rqtn);
77 	MLX5_SET(tirc, tirc, tls_en, 1);
78 	MLX5_SET(tirc, tirc, self_lb_block,
79 		 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST |
80 		 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST);
81 
82 	err = mlx5_core_create_tir(mdev, in, tirn);
83 
84 	kvfree(in);
85 	return err;
86 }
87 
88 static void accel_rule_handle_work(struct work_struct *work)
89 {
90 	struct mlx5e_ktls_offload_context_rx *priv_rx;
91 	struct accel_rule *accel_rule;
92 	struct mlx5_flow_handle *rule;
93 
94 	accel_rule = container_of(work, struct accel_rule, work);
95 	priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule);
96 	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
97 		goto out;
98 
99 	rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk,
100 				     priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG);
101 	if (!IS_ERR_OR_NULL(rule))
102 		accel_rule->rule = rule;
103 out:
104 	complete(&priv_rx->add_ctx);
105 }
106 
107 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv,
108 			    struct sock *sk)
109 {
110 	INIT_WORK(&rule->work, accel_rule_handle_work);
111 	rule->priv = priv;
112 }
113 
114 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi,
115 			  struct mlx5e_icosq_wqe_info *wi)
116 {
117 	sq->db.wqe_info[pi] = *wi;
118 }
119 
120 static struct mlx5_wqe_ctrl_seg *
121 post_static_params(struct mlx5e_icosq *sq,
122 		   struct mlx5e_ktls_offload_context_rx *priv_rx)
123 {
124 	struct mlx5e_set_tls_static_params_wqe *wqe;
125 	struct mlx5e_icosq_wqe_info wi;
126 	u16 pi, num_wqebbs, room;
127 
128 	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
129 	room = mlx5e_stop_room_for_wqe(num_wqebbs);
130 	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
131 		return ERR_PTR(-ENOSPC);
132 
133 	pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
134 	wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
135 	mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
136 				       priv_rx->tirn, priv_rx->key_id,
137 				       priv_rx->resync.seq, false,
138 				       TLS_OFFLOAD_CTX_DIR_RX);
139 	wi = (struct mlx5e_icosq_wqe_info) {
140 		.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
141 		.num_wqebbs = num_wqebbs,
142 		.tls_set_params.priv_rx = priv_rx,
143 	};
144 	icosq_fill_wi(sq, pi, &wi);
145 	sq->pc += num_wqebbs;
146 
147 	return &wqe->ctrl;
148 }
149 
150 static struct mlx5_wqe_ctrl_seg *
151 post_progress_params(struct mlx5e_icosq *sq,
152 		     struct mlx5e_ktls_offload_context_rx *priv_rx,
153 		     u32 next_record_tcp_sn)
154 {
155 	struct mlx5e_set_tls_progress_params_wqe *wqe;
156 	struct mlx5e_icosq_wqe_info wi;
157 	u16 pi, num_wqebbs, room;
158 
159 	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
160 	room = mlx5e_stop_room_for_wqe(num_wqebbs);
161 	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, room)))
162 		return ERR_PTR(-ENOSPC);
163 
164 	pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs);
165 	wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
166 	mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false,
167 					 next_record_tcp_sn,
168 					 TLS_OFFLOAD_CTX_DIR_RX);
169 	wi = (struct mlx5e_icosq_wqe_info) {
170 		.wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS,
171 		.num_wqebbs = num_wqebbs,
172 		.tls_set_params.priv_rx = priv_rx,
173 	};
174 
175 	icosq_fill_wi(sq, pi, &wi);
176 	sq->pc += num_wqebbs;
177 
178 	return &wqe->ctrl;
179 }
180 
181 static int post_rx_param_wqes(struct mlx5e_channel *c,
182 			      struct mlx5e_ktls_offload_context_rx *priv_rx,
183 			      u32 next_record_tcp_sn)
184 {
185 	struct mlx5_wqe_ctrl_seg *cseg;
186 	struct mlx5e_icosq *sq;
187 	int err;
188 
189 	err = 0;
190 	sq = &c->async_icosq;
191 	spin_lock(&c->async_icosq_lock);
192 
193 	cseg = post_static_params(sq, priv_rx);
194 	if (IS_ERR(cseg))
195 		goto err_out;
196 	cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn);
197 	if (IS_ERR(cseg))
198 		goto err_out;
199 
200 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
201 unlock:
202 	spin_unlock(&c->async_icosq_lock);
203 
204 	return err;
205 
206 err_out:
207 	priv_rx->stats->tls_resync_req_skip++;
208 	err = PTR_ERR(cseg);
209 	complete(&priv_rx->add_ctx);
210 	goto unlock;
211 }
212 
213 static void
214 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,
215 			   struct mlx5e_ktls_offload_context_rx *priv_rx)
216 {
217 	struct mlx5e_ktls_offload_context_rx **ctx =
218 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
219 
220 	BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) >
221 		     TLS_OFFLOAD_CONTEXT_SIZE_RX);
222 
223 	*ctx = priv_rx;
224 }
225 
226 static struct mlx5e_ktls_offload_context_rx *
227 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx)
228 {
229 	struct mlx5e_ktls_offload_context_rx **ctx =
230 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX);
231 
232 	return *ctx;
233 }
234 
235 /* Re-sync */
236 /* Runs in work context */
237 static int
238 resync_post_get_progress_params(struct mlx5e_icosq *sq,
239 				struct mlx5e_ktls_offload_context_rx *priv_rx)
240 {
241 	struct mlx5e_get_tls_progress_params_wqe *wqe;
242 	struct mlx5e_ktls_rx_resync_buf *buf;
243 	struct mlx5e_icosq_wqe_info wi;
244 	struct mlx5_wqe_ctrl_seg *cseg;
245 	struct mlx5_seg_get_psv *psv;
246 	struct device *pdev;
247 	int err;
248 	u16 pi;
249 
250 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
251 	if (unlikely(!buf)) {
252 		err = -ENOMEM;
253 		goto err_out;
254 	}
255 
256 	pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
257 	buf->dma_addr = dma_map_single(pdev, &buf->progress,
258 				       PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
259 	if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
260 		err = -ENOMEM;
261 		goto err_free;
262 	}
263 
264 	buf->priv_rx = priv_rx;
265 
266 	BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
267 
268 	spin_lock(&sq->channel->async_icosq_lock);
269 
270 	if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
271 		spin_unlock(&sq->channel->async_icosq_lock);
272 		err = -ENOSPC;
273 		goto err_dma_unmap;
274 	}
275 
276 	pi = mlx5e_icosq_get_next_pi(sq, 1);
277 	wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi);
278 
279 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS))
280 
281 	cseg = &wqe->ctrl;
282 	cseg->opmod_idx_opcode =
283 		cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV |
284 			    (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24));
285 	cseg->qpn_ds =
286 		cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT);
287 
288 	psv = &wqe->psv;
289 	psv->num_psv      = 1 << 4;
290 	psv->l_key        = sq->channel->mkey_be;
291 	psv->psv_index[0] = cpu_to_be32(priv_rx->tirn);
292 	psv->va           = cpu_to_be64(buf->dma_addr);
293 
294 	wi = (struct mlx5e_icosq_wqe_info) {
295 		.wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS,
296 		.num_wqebbs = 1,
297 		.tls_get_params.buf = buf,
298 	};
299 	icosq_fill_wi(sq, pi, &wi);
300 	sq->pc++;
301 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
302 	spin_unlock(&sq->channel->async_icosq_lock);
303 
304 	return 0;
305 
306 err_dma_unmap:
307 	dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
308 err_free:
309 	kfree(buf);
310 err_out:
311 	priv_rx->stats->tls_resync_req_skip++;
312 	return err;
313 }
314 
315 /* Function is called with elevated refcount.
316  * It decreases it only if no WQE is posted.
317  */
318 static void resync_handle_work(struct work_struct *work)
319 {
320 	struct mlx5e_ktls_offload_context_rx *priv_rx;
321 	struct mlx5e_ktls_rx_resync_ctx *resync;
322 	struct mlx5e_channel *c;
323 	struct mlx5e_icosq *sq;
324 
325 	resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work);
326 	priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
327 
328 	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
329 		refcount_dec(&resync->refcnt);
330 		return;
331 	}
332 
333 	c = resync->priv->channels.c[priv_rx->rxq];
334 	sq = &c->async_icosq;
335 
336 	if (resync_post_get_progress_params(sq, priv_rx))
337 		refcount_dec(&resync->refcnt);
338 }
339 
340 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
341 			struct mlx5e_priv *priv)
342 {
343 	INIT_WORK(&resync->work, resync_handle_work);
344 	resync->priv = priv;
345 	refcount_set(&resync->refcnt, 1);
346 }
347 
348 /* Function can be called with the refcount being either elevated or not.
349  * It does not affect the refcount.
350  */
351 static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx,
352 				   struct mlx5e_channel *c)
353 {
354 	struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info;
355 	struct mlx5_wqe_ctrl_seg *cseg;
356 	struct mlx5e_icosq *sq;
357 	int err;
358 
359 	memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq));
360 	err = 0;
361 
362 	sq = &c->async_icosq;
363 	spin_lock(&c->async_icosq_lock);
364 
365 	cseg = post_static_params(sq, priv_rx);
366 	if (IS_ERR(cseg)) {
367 		priv_rx->stats->tls_resync_res_skip++;
368 		err = PTR_ERR(cseg);
369 		goto unlock;
370 	}
371 	/* Do not increment priv_rx refcnt, CQE handling is empty */
372 	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
373 	priv_rx->stats->tls_resync_res_ok++;
374 unlock:
375 	spin_unlock(&c->async_icosq_lock);
376 
377 	return err;
378 }
379 
380 /* Function is called with elevated refcount, it decreases it. */
381 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
382 					  struct mlx5e_icosq *sq)
383 {
384 	struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
385 	struct mlx5e_ktls_offload_context_rx *priv_rx;
386 	struct mlx5e_ktls_rx_resync_ctx *resync;
387 	u8 tracker_state, auth_state, *ctx;
388 	struct device *dev;
389 	u32 hw_seq;
390 
391 	priv_rx = buf->priv_rx;
392 	resync = &priv_rx->resync;
393 	dev = mlx5_core_dma_dev(resync->priv->mdev);
394 	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
395 		goto out;
396 
397 	dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
398 				DMA_FROM_DEVICE);
399 
400 	ctx = buf->progress.ctx;
401 	tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state);
402 	auth_state = MLX5_GET(tls_progress_params, ctx, auth_state);
403 	if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
404 	    auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
405 		priv_rx->stats->tls_resync_req_skip++;
406 		goto out;
407 	}
408 
409 	hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
410 	tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
411 	priv_rx->stats->tls_resync_req_end++;
412 out:
413 	refcount_dec(&resync->refcnt);
414 	dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
415 	kfree(buf);
416 }
417 
418 /* Runs in NAPI.
419  * Function elevates the refcount, unless no work is queued.
420  */
421 static bool resync_queue_get_psv(struct sock *sk)
422 {
423 	struct mlx5e_ktls_offload_context_rx *priv_rx;
424 	struct mlx5e_ktls_rx_resync_ctx *resync;
425 
426 	priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
427 	if (unlikely(!priv_rx))
428 		return false;
429 
430 	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
431 		return false;
432 
433 	resync = &priv_rx->resync;
434 	refcount_inc(&resync->refcnt);
435 	if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
436 		refcount_dec(&resync->refcnt);
437 
438 	return true;
439 }
440 
441 /* Runs in NAPI */
442 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
443 {
444 	struct ethhdr *eth = (struct ethhdr *)(skb->data);
445 	struct net_device *netdev = rq->netdev;
446 	struct sock *sk = NULL;
447 	unsigned int datalen;
448 	struct iphdr *iph;
449 	struct tcphdr *th;
450 	__be32 seq;
451 	int depth = 0;
452 
453 	__vlan_get_protocol(skb, eth->h_proto, &depth);
454 	iph = (struct iphdr *)(skb->data + depth);
455 
456 	if (iph->version == 4) {
457 		depth += sizeof(struct iphdr);
458 		th = (void *)iph + sizeof(struct iphdr);
459 
460 		sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo,
461 					     iph->saddr, th->source, iph->daddr,
462 					     th->dest, netdev->ifindex);
463 #if IS_ENABLED(CONFIG_IPV6)
464 	} else {
465 		struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph;
466 
467 		depth += sizeof(struct ipv6hdr);
468 		th = (void *)ipv6h + sizeof(struct ipv6hdr);
469 
470 		sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo,
471 						&ipv6h->saddr, th->source,
472 						&ipv6h->daddr, ntohs(th->dest),
473 						netdev->ifindex, 0);
474 #endif
475 	}
476 
477 	depth += sizeof(struct tcphdr);
478 
479 	if (unlikely(!sk || sk->sk_state == TCP_TIME_WAIT))
480 		return;
481 
482 	if (unlikely(!resync_queue_get_psv(sk)))
483 		return;
484 
485 	skb->sk = sk;
486 	skb->destructor = sock_edemux;
487 
488 	seq = th->seq;
489 	datalen = skb->len - depth;
490 	tls_offload_rx_resync_async_request_start(sk, seq, datalen);
491 	rq->stats->tls_resync_req_start++;
492 }
493 
494 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
495 			  u32 seq, u8 *rcd_sn)
496 {
497 	struct mlx5e_ktls_offload_context_rx *priv_rx;
498 	struct mlx5e_ktls_rx_resync_ctx *resync;
499 	struct mlx5e_priv *priv;
500 	struct mlx5e_channel *c;
501 
502 	priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk));
503 	if (unlikely(!priv_rx))
504 		return;
505 
506 	resync = &priv_rx->resync;
507 	resync->sw_rcd_sn_be = *(__be64 *)rcd_sn;
508 	resync->seq = seq;
509 
510 	priv = netdev_priv(netdev);
511 	c = priv->channels.c[priv_rx->rxq];
512 
513 	resync_handle_seq_match(priv_rx, c);
514 }
515 
516 /* End of resync section */
517 
518 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
519 			      struct mlx5_cqe64 *cqe, u32 *cqe_bcnt)
520 {
521 	struct mlx5e_rq_stats *stats = rq->stats;
522 
523 	switch (get_cqe_tls_offload(cqe)) {
524 	case CQE_TLS_OFFLOAD_DECRYPTED:
525 		skb->decrypted = 1;
526 		stats->tls_decrypted_packets++;
527 		stats->tls_decrypted_bytes += *cqe_bcnt;
528 		break;
529 	case CQE_TLS_OFFLOAD_RESYNC:
530 		stats->tls_resync_req_pkt++;
531 		resync_update_sn(rq, skb);
532 		break;
533 	default: /* CQE_TLS_OFFLOAD_ERROR: */
534 		stats->tls_err++;
535 		break;
536 	}
537 }
538 
539 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi)
540 {
541 	struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx;
542 	struct accel_rule *rule = &priv_rx->rule;
543 
544 	if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
545 		complete(&priv_rx->add_ctx);
546 		return;
547 	}
548 	queue_work(rule->priv->tls->rx_wq, &rule->work);
549 }
550 
551 static int mlx5e_ktls_sk_get_rxq(struct sock *sk)
552 {
553 	int rxq = sk_rx_queue_get(sk);
554 
555 	if (unlikely(rxq == -1))
556 		rxq = 0;
557 
558 	return rxq;
559 }
560 
561 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
562 		      struct tls_crypto_info *crypto_info,
563 		      u32 start_offload_tcp_sn)
564 {
565 	struct mlx5e_ktls_offload_context_rx *priv_rx;
566 	struct mlx5e_ktls_rx_resync_ctx *resync;
567 	struct tls_context *tls_ctx;
568 	struct mlx5_core_dev *mdev;
569 	struct mlx5e_priv *priv;
570 	int rxq, err;
571 	u32 rqtn;
572 
573 	tls_ctx = tls_get_ctx(sk);
574 	priv = netdev_priv(netdev);
575 	mdev = priv->mdev;
576 	priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
577 	if (unlikely(!priv_rx))
578 		return -ENOMEM;
579 
580 	err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
581 	if (err)
582 		goto err_create_key;
583 
584 	priv_rx->crypto_info  =
585 		*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
586 
587 	rxq = mlx5e_ktls_sk_get_rxq(sk);
588 	priv_rx->rxq = rxq;
589 	priv_rx->sk = sk;
590 
591 	priv_rx->stats = &priv->channel_stats[rxq].rq;
592 	mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
593 
594 	rqtn = priv->direct_tir[rxq].rqt.rqtn;
595 
596 	err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn);
597 	if (err)
598 		goto err_create_tir;
599 
600 	init_completion(&priv_rx->add_ctx);
601 
602 	accel_rule_init(&priv_rx->rule, priv, sk);
603 	resync = &priv_rx->resync;
604 	resync_init(resync, priv);
605 	tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core;
606 	tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC);
607 
608 	err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn);
609 	if (err)
610 		goto err_post_wqes;
611 
612 	priv_rx->stats->tls_ctx++;
613 
614 	return 0;
615 
616 err_post_wqes:
617 	mlx5_core_destroy_tir(mdev, priv_rx->tirn);
618 err_create_tir:
619 	mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
620 err_create_key:
621 	kfree(priv_rx);
622 	return err;
623 }
624 
625 /* Elevated refcount on the resync object means there are
626  * outstanding operations (uncompleted GET_PSV WQEs) that
627  * will read the resync / priv_rx objects once completed.
628  * Wait for them to avoid use-after-free.
629  */
630 static void wait_for_resync(struct net_device *netdev,
631 			    struct mlx5e_ktls_rx_resync_ctx *resync)
632 {
633 #define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
634 	unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
635 	unsigned int refcnt;
636 
637 	do {
638 		refcnt = refcount_read(&resync->refcnt);
639 		if (refcnt == 1)
640 			return;
641 
642 		msleep(20);
643 	} while (time_before(jiffies, exp_time));
644 
645 	netdev_warn(netdev,
646 		    "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
647 		    refcnt);
648 }
649 
650 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
651 {
652 	struct mlx5e_ktls_offload_context_rx *priv_rx;
653 	struct mlx5e_ktls_rx_resync_ctx *resync;
654 	struct mlx5_core_dev *mdev;
655 	struct mlx5e_priv *priv;
656 
657 	priv = netdev_priv(netdev);
658 	mdev = priv->mdev;
659 
660 	priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
661 	set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
662 	mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
663 	synchronize_rcu(); /* Sync with NAPI */
664 	if (!cancel_work_sync(&priv_rx->rule.work))
665 		/* completion is needed, as the priv_rx in the add flow
666 		 * is maintained on the wqe info (wi), not on the socket.
667 		 */
668 		wait_for_completion(&priv_rx->add_ctx);
669 	resync = &priv_rx->resync;
670 	if (cancel_work_sync(&resync->work))
671 		refcount_dec(&resync->refcnt);
672 	wait_for_resync(netdev, resync);
673 
674 	priv_rx->stats->tls_del++;
675 	if (priv_rx->rule.rule)
676 		mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
677 
678 	mlx5_core_destroy_tir(mdev, priv_rx->tirn);
679 	mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
680 	kfree(priv_rx);
681 }
682