1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include "en_accel/ktls.h"
5 #include "en_accel/ktls_txrx.h"
6 #include "en_accel/ktls_utils.h"
7 
8 struct mlx5e_dump_wqe {
9 	struct mlx5_wqe_ctrl_seg ctrl;
10 	struct mlx5_wqe_data_seg data;
11 };
12 
13 #define MLX5E_KTLS_DUMP_WQEBBS \
14 	(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
15 
16 static u8
17 mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
18 			  unsigned int sync_len)
19 {
20 	/* Given the MTU and sync_len, calculates an upper bound for the
21 	 * number of DUMP WQEs needed for the TX resync of a record.
22 	 */
23 	return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
24 }
25 
26 u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
27 {
28 	u16 num_dumps, stop_room = 0;
29 
30 	if (!mlx5e_is_ktls_tx(mdev))
31 		return 0;
32 
33 	num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
34 
35 	stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
36 	stop_room += mlx5e_stop_room_for_wqe(mdev, MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
37 	stop_room += num_dumps * mlx5e_stop_room_for_wqe(mdev, MLX5E_KTLS_DUMP_WQEBBS);
38 	stop_room += 1; /* fence nop */
39 
40 	return stop_room;
41 }
42 
43 static void mlx5e_ktls_set_tisc(struct mlx5_core_dev *mdev, void *tisc)
44 {
45 	MLX5_SET(tisc, tisc, tls_en, 1);
46 	MLX5_SET(tisc, tisc, pd, mdev->mlx5e_res.hw_objs.pdn);
47 	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn);
48 }
49 
50 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
51 {
52 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
53 
54 	mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
55 
56 	return mlx5_core_create_tis(mdev, in, tisn);
57 }
58 
59 static int mlx5e_ktls_create_tis_cb(struct mlx5_core_dev *mdev,
60 				    struct mlx5_async_ctx *async_ctx,
61 				    u32 *out, int outlen,
62 				    mlx5_async_cbk_t callback,
63 				    struct mlx5_async_work *context)
64 {
65 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
66 
67 	mlx5e_ktls_set_tisc(mdev, MLX5_ADDR_OF(create_tis_in, in, ctx));
68 	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
69 
70 	return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
71 				out, outlen, callback, context);
72 }
73 
74 static int mlx5e_ktls_destroy_tis_cb(struct mlx5_core_dev *mdev, u32 tisn,
75 				     struct mlx5_async_ctx *async_ctx,
76 				     u32 *out, int outlen,
77 				     mlx5_async_cbk_t callback,
78 				     struct mlx5_async_work *context)
79 {
80 	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {};
81 
82 	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
83 	MLX5_SET(destroy_tis_in, in, tisn, tisn);
84 
85 	return mlx5_cmd_exec_cb(async_ctx, in, sizeof(in),
86 				out, outlen, callback, context);
87 }
88 
89 struct mlx5e_ktls_offload_context_tx {
90 	/* fast path */
91 	u32 expected_seq;
92 	u32 tisn;
93 	bool ctx_post_pending;
94 	/* control / resync */
95 	struct list_head list_node; /* member of the pool */
96 	struct tls12_crypto_info_aes_gcm_128 crypto_info;
97 	struct tls_offload_context_tx *tx_ctx;
98 	struct mlx5_core_dev *mdev;
99 	struct mlx5e_tls_sw_stats *sw_stats;
100 	u32 key_id;
101 	u8 create_err : 1;
102 };
103 
104 static void
105 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
106 			   struct mlx5e_ktls_offload_context_tx *priv_tx)
107 {
108 	struct mlx5e_ktls_offload_context_tx **ctx =
109 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
110 
111 	BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);
112 
113 	*ctx = priv_tx;
114 }
115 
116 static struct mlx5e_ktls_offload_context_tx *
117 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
118 {
119 	struct mlx5e_ktls_offload_context_tx **ctx =
120 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
121 
122 	return *ctx;
123 }
124 
125 /* struct for callback API management */
126 struct mlx5e_async_ctx {
127 	struct mlx5_async_work context;
128 	struct mlx5_async_ctx async_ctx;
129 	struct work_struct work;
130 	struct mlx5e_ktls_offload_context_tx *priv_tx;
131 	struct completion complete;
132 	int err;
133 	union {
134 		u32 out_create[MLX5_ST_SZ_DW(create_tis_out)];
135 		u32 out_destroy[MLX5_ST_SZ_DW(destroy_tis_out)];
136 	};
137 };
138 
139 static struct mlx5e_async_ctx *mlx5e_bulk_async_init(struct mlx5_core_dev *mdev, int n)
140 {
141 	struct mlx5e_async_ctx *bulk_async;
142 	int i;
143 
144 	bulk_async = kvcalloc(n, sizeof(struct mlx5e_async_ctx), GFP_KERNEL);
145 	if (!bulk_async)
146 		return NULL;
147 
148 	for (i = 0; i < n; i++) {
149 		struct mlx5e_async_ctx *async = &bulk_async[i];
150 
151 		mlx5_cmd_init_async_ctx(mdev, &async->async_ctx);
152 		init_completion(&async->complete);
153 	}
154 
155 	return bulk_async;
156 }
157 
158 static void mlx5e_bulk_async_cleanup(struct mlx5e_async_ctx *bulk_async, int n)
159 {
160 	int i;
161 
162 	for (i = 0; i < n; i++) {
163 		struct mlx5e_async_ctx *async = &bulk_async[i];
164 
165 		mlx5_cmd_cleanup_async_ctx(&async->async_ctx);
166 	}
167 	kvfree(bulk_async);
168 }
169 
170 static void create_tis_callback(int status, struct mlx5_async_work *context)
171 {
172 	struct mlx5e_async_ctx *async =
173 		container_of(context, struct mlx5e_async_ctx, context);
174 	struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
175 
176 	if (status) {
177 		async->err = status;
178 		priv_tx->create_err = 1;
179 		goto out;
180 	}
181 
182 	priv_tx->tisn = MLX5_GET(create_tis_out, async->out_create, tisn);
183 out:
184 	complete(&async->complete);
185 }
186 
187 static void destroy_tis_callback(int status, struct mlx5_async_work *context)
188 {
189 	struct mlx5e_async_ctx *async =
190 		container_of(context, struct mlx5e_async_ctx, context);
191 	struct mlx5e_ktls_offload_context_tx *priv_tx = async->priv_tx;
192 
193 	complete(&async->complete);
194 	kfree(priv_tx);
195 }
196 
197 static struct mlx5e_ktls_offload_context_tx *
198 mlx5e_tls_priv_tx_init(struct mlx5_core_dev *mdev, struct mlx5e_tls_sw_stats *sw_stats,
199 		       struct mlx5e_async_ctx *async)
200 {
201 	struct mlx5e_ktls_offload_context_tx *priv_tx;
202 	int err;
203 
204 	priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
205 	if (!priv_tx)
206 		return ERR_PTR(-ENOMEM);
207 
208 	priv_tx->mdev = mdev;
209 	priv_tx->sw_stats = sw_stats;
210 
211 	if (!async) {
212 		err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
213 		if (err)
214 			goto err_out;
215 	} else {
216 		async->priv_tx = priv_tx;
217 		err = mlx5e_ktls_create_tis_cb(mdev, &async->async_ctx,
218 					       async->out_create, sizeof(async->out_create),
219 					       create_tis_callback, &async->context);
220 		if (err)
221 			goto err_out;
222 	}
223 
224 	return priv_tx;
225 
226 err_out:
227 	kfree(priv_tx);
228 	return ERR_PTR(err);
229 }
230 
231 static void mlx5e_tls_priv_tx_cleanup(struct mlx5e_ktls_offload_context_tx *priv_tx,
232 				      struct mlx5e_async_ctx *async)
233 {
234 	if (priv_tx->create_err) {
235 		complete(&async->complete);
236 		kfree(priv_tx);
237 		return;
238 	}
239 	async->priv_tx = priv_tx;
240 	mlx5e_ktls_destroy_tis_cb(priv_tx->mdev, priv_tx->tisn,
241 				  &async->async_ctx,
242 				  async->out_destroy, sizeof(async->out_destroy),
243 				  destroy_tis_callback, &async->context);
244 }
245 
246 static void mlx5e_tls_priv_tx_list_cleanup(struct mlx5_core_dev *mdev,
247 					   struct list_head *list, int size)
248 {
249 	struct mlx5e_ktls_offload_context_tx *obj, *n;
250 	struct mlx5e_async_ctx *bulk_async;
251 	int i;
252 
253 	bulk_async = mlx5e_bulk_async_init(mdev, size);
254 	if (!bulk_async)
255 		return;
256 
257 	i = 0;
258 	list_for_each_entry_safe(obj, n, list, list_node) {
259 		mlx5e_tls_priv_tx_cleanup(obj, &bulk_async[i]);
260 		i++;
261 	}
262 
263 	for (i = 0; i < size; i++) {
264 		struct mlx5e_async_ctx *async = &bulk_async[i];
265 
266 		wait_for_completion(&async->complete);
267 	}
268 	mlx5e_bulk_async_cleanup(bulk_async, size);
269 }
270 
271 /* Recycling pool API */
272 
273 #define MLX5E_TLS_TX_POOL_BULK (16)
274 #define MLX5E_TLS_TX_POOL_HIGH (4 * 1024)
275 #define MLX5E_TLS_TX_POOL_LOW (MLX5E_TLS_TX_POOL_HIGH / 4)
276 
277 struct mlx5e_tls_tx_pool {
278 	struct mlx5_core_dev *mdev;
279 	struct mlx5e_tls_sw_stats *sw_stats;
280 	struct mutex lock; /* Protects access to the pool */
281 	struct list_head list;
282 	size_t size;
283 
284 	struct workqueue_struct *wq;
285 	struct work_struct create_work;
286 	struct work_struct destroy_work;
287 };
288 
289 static void create_work(struct work_struct *work)
290 {
291 	struct mlx5e_tls_tx_pool *pool =
292 		container_of(work, struct mlx5e_tls_tx_pool, create_work);
293 	struct mlx5e_ktls_offload_context_tx *obj;
294 	struct mlx5e_async_ctx *bulk_async;
295 	LIST_HEAD(local_list);
296 	int i, j, err = 0;
297 
298 	bulk_async = mlx5e_bulk_async_init(pool->mdev, MLX5E_TLS_TX_POOL_BULK);
299 	if (!bulk_async)
300 		return;
301 
302 	for (i = 0; i < MLX5E_TLS_TX_POOL_BULK; i++) {
303 		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, &bulk_async[i]);
304 		if (IS_ERR(obj)) {
305 			err = PTR_ERR(obj);
306 			break;
307 		}
308 		list_add(&obj->list_node, &local_list);
309 	}
310 
311 	for (j = 0; j < i; j++) {
312 		struct mlx5e_async_ctx *async = &bulk_async[j];
313 
314 		wait_for_completion(&async->complete);
315 		if (!err && async->err)
316 			err = async->err;
317 	}
318 	atomic64_add(i, &pool->sw_stats->tx_tls_pool_alloc);
319 	mlx5e_bulk_async_cleanup(bulk_async, MLX5E_TLS_TX_POOL_BULK);
320 	if (err)
321 		goto err_out;
322 
323 	mutex_lock(&pool->lock);
324 	if (pool->size + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH) {
325 		mutex_unlock(&pool->lock);
326 		goto err_out;
327 	}
328 	list_splice(&local_list, &pool->list);
329 	pool->size += MLX5E_TLS_TX_POOL_BULK;
330 	if (pool->size <= MLX5E_TLS_TX_POOL_LOW)
331 		queue_work(pool->wq, work);
332 	mutex_unlock(&pool->lock);
333 	return;
334 
335 err_out:
336 	mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, i);
337 	atomic64_add(i, &pool->sw_stats->tx_tls_pool_free);
338 }
339 
340 static void destroy_work(struct work_struct *work)
341 {
342 	struct mlx5e_tls_tx_pool *pool =
343 		container_of(work, struct mlx5e_tls_tx_pool, destroy_work);
344 	struct mlx5e_ktls_offload_context_tx *obj;
345 	LIST_HEAD(local_list);
346 	int i = 0;
347 
348 	mutex_lock(&pool->lock);
349 	if (pool->size < MLX5E_TLS_TX_POOL_HIGH) {
350 		mutex_unlock(&pool->lock);
351 		return;
352 	}
353 
354 	list_for_each_entry(obj, &pool->list, list_node)
355 		if (++i == MLX5E_TLS_TX_POOL_BULK)
356 			break;
357 
358 	list_cut_position(&local_list, &pool->list, &obj->list_node);
359 	pool->size -= MLX5E_TLS_TX_POOL_BULK;
360 	if (pool->size >= MLX5E_TLS_TX_POOL_HIGH)
361 		queue_work(pool->wq, work);
362 	mutex_unlock(&pool->lock);
363 
364 	mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
365 	atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
366 }
367 
368 static struct mlx5e_tls_tx_pool *mlx5e_tls_tx_pool_init(struct mlx5_core_dev *mdev,
369 							struct mlx5e_tls_sw_stats *sw_stats)
370 {
371 	struct mlx5e_tls_tx_pool *pool;
372 
373 	BUILD_BUG_ON(MLX5E_TLS_TX_POOL_LOW + MLX5E_TLS_TX_POOL_BULK >= MLX5E_TLS_TX_POOL_HIGH);
374 
375 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
376 	if (!pool)
377 		return NULL;
378 
379 	pool->wq = create_singlethread_workqueue("mlx5e_tls_tx_pool");
380 	if (!pool->wq)
381 		goto err_free;
382 
383 	INIT_LIST_HEAD(&pool->list);
384 	mutex_init(&pool->lock);
385 
386 	INIT_WORK(&pool->create_work, create_work);
387 	INIT_WORK(&pool->destroy_work, destroy_work);
388 
389 	pool->mdev = mdev;
390 	pool->sw_stats = sw_stats;
391 
392 	return pool;
393 
394 err_free:
395 	kvfree(pool);
396 	return NULL;
397 }
398 
399 static void mlx5e_tls_tx_pool_list_cleanup(struct mlx5e_tls_tx_pool *pool)
400 {
401 	while (pool->size > MLX5E_TLS_TX_POOL_BULK) {
402 		struct mlx5e_ktls_offload_context_tx *obj;
403 		LIST_HEAD(local_list);
404 		int i = 0;
405 
406 		list_for_each_entry(obj, &pool->list, list_node)
407 			if (++i == MLX5E_TLS_TX_POOL_BULK)
408 				break;
409 
410 		list_cut_position(&local_list, &pool->list, &obj->list_node);
411 		mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &local_list, MLX5E_TLS_TX_POOL_BULK);
412 		atomic64_add(MLX5E_TLS_TX_POOL_BULK, &pool->sw_stats->tx_tls_pool_free);
413 		pool->size -= MLX5E_TLS_TX_POOL_BULK;
414 	}
415 	if (pool->size) {
416 		mlx5e_tls_priv_tx_list_cleanup(pool->mdev, &pool->list, pool->size);
417 		atomic64_add(pool->size, &pool->sw_stats->tx_tls_pool_free);
418 	}
419 }
420 
421 static void mlx5e_tls_tx_pool_cleanup(struct mlx5e_tls_tx_pool *pool)
422 {
423 	mlx5e_tls_tx_pool_list_cleanup(pool);
424 	destroy_workqueue(pool->wq);
425 	kvfree(pool);
426 }
427 
428 static void pool_push(struct mlx5e_tls_tx_pool *pool, struct mlx5e_ktls_offload_context_tx *obj)
429 {
430 	mutex_lock(&pool->lock);
431 	list_add(&obj->list_node, &pool->list);
432 	if (++pool->size == MLX5E_TLS_TX_POOL_HIGH)
433 		queue_work(pool->wq, &pool->destroy_work);
434 	mutex_unlock(&pool->lock);
435 }
436 
437 static struct mlx5e_ktls_offload_context_tx *pool_pop(struct mlx5e_tls_tx_pool *pool)
438 {
439 	struct mlx5e_ktls_offload_context_tx *obj;
440 
441 	mutex_lock(&pool->lock);
442 	if (unlikely(pool->size == 0)) {
443 		/* pool is empty:
444 		 * - trigger the populating work, and
445 		 * - serve the current context via the regular blocking api.
446 		 */
447 		queue_work(pool->wq, &pool->create_work);
448 		mutex_unlock(&pool->lock);
449 		obj = mlx5e_tls_priv_tx_init(pool->mdev, pool->sw_stats, NULL);
450 		if (!IS_ERR(obj))
451 			atomic64_inc(&pool->sw_stats->tx_tls_pool_alloc);
452 		return obj;
453 	}
454 
455 	obj = list_first_entry(&pool->list, struct mlx5e_ktls_offload_context_tx,
456 			       list_node);
457 	list_del(&obj->list_node);
458 	if (--pool->size == MLX5E_TLS_TX_POOL_LOW)
459 		queue_work(pool->wq, &pool->create_work);
460 	mutex_unlock(&pool->lock);
461 	return obj;
462 }
463 
464 /* End of pool API */
465 
466 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
467 		      struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
468 {
469 	struct mlx5e_ktls_offload_context_tx *priv_tx;
470 	struct mlx5e_tls_tx_pool *pool;
471 	struct tls_context *tls_ctx;
472 	struct mlx5e_priv *priv;
473 	int err;
474 
475 	tls_ctx = tls_get_ctx(sk);
476 	priv = netdev_priv(netdev);
477 	pool = priv->tls->tx_pool;
478 
479 	priv_tx = pool_pop(pool);
480 	if (IS_ERR(priv_tx))
481 		return PTR_ERR(priv_tx);
482 
483 	err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
484 	if (err)
485 		goto err_create_key;
486 
487 	priv_tx->expected_seq = start_offload_tcp_sn;
488 	priv_tx->crypto_info  =
489 		*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
490 	priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
491 
492 	mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
493 
494 	priv_tx->ctx_post_pending = true;
495 	atomic64_inc(&priv_tx->sw_stats->tx_tls_ctx);
496 
497 	return 0;
498 
499 err_create_key:
500 	pool_push(pool, priv_tx);
501 	return err;
502 }
503 
504 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
505 {
506 	struct mlx5e_ktls_offload_context_tx *priv_tx;
507 	struct mlx5e_tls_tx_pool *pool;
508 	struct mlx5e_priv *priv;
509 
510 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
511 	priv = netdev_priv(netdev);
512 	pool = priv->tls->tx_pool;
513 
514 	atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
515 	mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
516 	pool_push(pool, priv_tx);
517 }
518 
519 static void tx_fill_wi(struct mlx5e_txqsq *sq,
520 		       u16 pi, u8 num_wqebbs, u32 num_bytes,
521 		       struct page *page)
522 {
523 	struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
524 
525 	*wi = (struct mlx5e_tx_wqe_info) {
526 		.num_wqebbs = num_wqebbs,
527 		.num_bytes  = num_bytes,
528 		.resync_dump_frag_page = page,
529 	};
530 }
531 
532 static bool
533 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
534 {
535 	bool ret = priv_tx->ctx_post_pending;
536 
537 	priv_tx->ctx_post_pending = false;
538 
539 	return ret;
540 }
541 
542 static void
543 post_static_params(struct mlx5e_txqsq *sq,
544 		   struct mlx5e_ktls_offload_context_tx *priv_tx,
545 		   bool fence)
546 {
547 	struct mlx5e_set_tls_static_params_wqe *wqe;
548 	u16 pi, num_wqebbs;
549 
550 	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
551 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
552 	wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
553 	mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
554 				       priv_tx->tisn, priv_tx->key_id, 0, fence,
555 				       TLS_OFFLOAD_CTX_DIR_TX);
556 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
557 	sq->pc += num_wqebbs;
558 }
559 
560 static void
561 post_progress_params(struct mlx5e_txqsq *sq,
562 		     struct mlx5e_ktls_offload_context_tx *priv_tx,
563 		     bool fence)
564 {
565 	struct mlx5e_set_tls_progress_params_wqe *wqe;
566 	u16 pi, num_wqebbs;
567 
568 	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
569 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
570 	wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
571 	mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
572 					 TLS_OFFLOAD_CTX_DIR_TX);
573 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
574 	sq->pc += num_wqebbs;
575 }
576 
577 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
578 {
579 	struct mlx5_wq_cyc *wq = &sq->wq;
580 	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
581 
582 	tx_fill_wi(sq, pi, 1, 0, NULL);
583 
584 	mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
585 }
586 
587 static void
588 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
589 			      struct mlx5e_ktls_offload_context_tx *priv_tx,
590 			      bool skip_static_post, bool fence_first_post)
591 {
592 	bool progress_fence = skip_static_post || !fence_first_post;
593 
594 	if (!skip_static_post)
595 		post_static_params(sq, priv_tx, fence_first_post);
596 
597 	post_progress_params(sq, priv_tx, progress_fence);
598 	tx_post_fence_nop(sq);
599 }
600 
601 struct tx_sync_info {
602 	u64 rcd_sn;
603 	u32 sync_len;
604 	int nr_frags;
605 	skb_frag_t frags[MAX_SKB_FRAGS];
606 };
607 
608 enum mlx5e_ktls_sync_retval {
609 	MLX5E_KTLS_SYNC_DONE,
610 	MLX5E_KTLS_SYNC_FAIL,
611 	MLX5E_KTLS_SYNC_SKIP_NO_DATA,
612 };
613 
614 static enum mlx5e_ktls_sync_retval
615 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
616 		 u32 tcp_seq, int datalen, struct tx_sync_info *info)
617 {
618 	struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
619 	enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
620 	struct tls_record_info *record;
621 	int remaining, i = 0;
622 	unsigned long flags;
623 	bool ends_before;
624 
625 	spin_lock_irqsave(&tx_ctx->lock, flags);
626 	record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
627 
628 	if (unlikely(!record)) {
629 		ret = MLX5E_KTLS_SYNC_FAIL;
630 		goto out;
631 	}
632 
633 	/* There are the following cases:
634 	 * 1. packet ends before start marker: bypass offload.
635 	 * 2. packet starts before start marker and ends after it: drop,
636 	 *    not supported, breaks contract with kernel.
637 	 * 3. packet ends before tls record info starts: drop,
638 	 *    this packet was already acknowledged and its record info
639 	 *    was released.
640 	 */
641 	ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
642 
643 	if (unlikely(tls_record_is_start_marker(record))) {
644 		ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
645 		goto out;
646 	} else if (ends_before) {
647 		ret = MLX5E_KTLS_SYNC_FAIL;
648 		goto out;
649 	}
650 
651 	info->sync_len = tcp_seq - tls_record_start_seq(record);
652 	remaining = info->sync_len;
653 	while (remaining > 0) {
654 		skb_frag_t *frag = &record->frags[i];
655 
656 		get_page(skb_frag_page(frag));
657 		remaining -= skb_frag_size(frag);
658 		info->frags[i++] = *frag;
659 	}
660 	/* reduce the part which will be sent with the original SKB */
661 	if (remaining < 0)
662 		skb_frag_size_add(&info->frags[i - 1], remaining);
663 	info->nr_frags = i;
664 out:
665 	spin_unlock_irqrestore(&tx_ctx->lock, flags);
666 	return ret;
667 }
668 
669 static void
670 tx_post_resync_params(struct mlx5e_txqsq *sq,
671 		      struct mlx5e_ktls_offload_context_tx *priv_tx,
672 		      u64 rcd_sn)
673 {
674 	struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
675 	__be64 rn_be = cpu_to_be64(rcd_sn);
676 	bool skip_static_post;
677 	u16 rec_seq_sz;
678 	char *rec_seq;
679 
680 	rec_seq = info->rec_seq;
681 	rec_seq_sz = sizeof(info->rec_seq);
682 
683 	skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
684 	if (!skip_static_post)
685 		memcpy(rec_seq, &rn_be, rec_seq_sz);
686 
687 	mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
688 }
689 
690 static int
691 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn)
692 {
693 	struct mlx5_wqe_ctrl_seg *cseg;
694 	struct mlx5_wqe_data_seg *dseg;
695 	struct mlx5e_dump_wqe *wqe;
696 	dma_addr_t dma_addr = 0;
697 	u16 ds_cnt;
698 	int fsz;
699 	u16 pi;
700 
701 	BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
702 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
703 	wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
704 
705 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
706 
707 	cseg = &wqe->ctrl;
708 	dseg = &wqe->data;
709 
710 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
711 	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
712 	cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
713 
714 	fsz = skb_frag_size(frag);
715 	dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
716 				    DMA_TO_DEVICE);
717 	if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
718 		return -ENOMEM;
719 
720 	dseg->addr       = cpu_to_be64(dma_addr);
721 	dseg->lkey       = sq->mkey_be;
722 	dseg->byte_count = cpu_to_be32(fsz);
723 	mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
724 
725 	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
726 	sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
727 
728 	return 0;
729 }
730 
731 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
732 					   struct mlx5e_tx_wqe_info *wi,
733 					   u32 *dma_fifo_cc)
734 {
735 	struct mlx5e_sq_stats *stats;
736 	struct mlx5e_sq_dma *dma;
737 
738 	dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
739 	stats = sq->stats;
740 
741 	mlx5e_tx_dma_unmap(sq->pdev, dma);
742 	put_page(wi->resync_dump_frag_page);
743 	stats->tls_dump_packets++;
744 	stats->tls_dump_bytes += wi->num_bytes;
745 }
746 
747 static enum mlx5e_ktls_sync_retval
748 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
749 			 struct mlx5e_txqsq *sq,
750 			 int datalen,
751 			 u32 seq)
752 {
753 	enum mlx5e_ktls_sync_retval ret;
754 	struct tx_sync_info info = {};
755 	int i;
756 
757 	ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
758 	if (unlikely(ret != MLX5E_KTLS_SYNC_DONE))
759 		/* We might get here with ret == FAIL if a retransmission
760 		 * reaches the driver after the relevant record is acked.
761 		 * It should be safe to drop the packet in this case
762 		 */
763 		return ret;
764 
765 	tx_post_resync_params(sq, priv_tx, info.rcd_sn);
766 
767 	for (i = 0; i < info.nr_frags; i++) {
768 		unsigned int orig_fsz, frag_offset = 0, n = 0;
769 		skb_frag_t *f = &info.frags[i];
770 
771 		orig_fsz = skb_frag_size(f);
772 
773 		do {
774 			unsigned int fsz;
775 
776 			n++;
777 			fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
778 			skb_frag_size_set(f, fsz);
779 			if (tx_post_resync_dump(sq, f, priv_tx->tisn)) {
780 				page_ref_add(skb_frag_page(f), n - 1);
781 				goto err_out;
782 			}
783 
784 			skb_frag_off_add(f, fsz);
785 			frag_offset += fsz;
786 		} while (frag_offset < orig_fsz);
787 
788 		page_ref_add(skb_frag_page(f), n - 1);
789 	}
790 
791 	return MLX5E_KTLS_SYNC_DONE;
792 
793 err_out:
794 	for (; i < info.nr_frags; i++)
795 		/* The put_page() here undoes the page ref obtained in tx_sync_info_get().
796 		 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
797 		 * released only upon their completions (or in mlx5e_free_txqsq_descs,
798 		 * if channel closes).
799 		 */
800 		put_page(skb_frag_page(&info.frags[i]));
801 
802 	return MLX5E_KTLS_SYNC_FAIL;
803 }
804 
805 bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
806 			      struct sk_buff *skb,
807 			      struct mlx5e_accel_tx_tls_state *state)
808 {
809 	struct mlx5e_ktls_offload_context_tx *priv_tx;
810 	struct mlx5e_sq_stats *stats = sq->stats;
811 	struct net_device *tls_netdev;
812 	struct tls_context *tls_ctx;
813 	int datalen;
814 	u32 seq;
815 
816 	datalen = skb->len - skb_tcp_all_headers(skb);
817 	if (!datalen)
818 		return true;
819 
820 	mlx5e_tx_mpwqe_ensure_complete(sq);
821 
822 	tls_ctx = tls_get_ctx(skb->sk);
823 	tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
824 	/* Don't WARN on NULL: if tls_device_down is running in parallel,
825 	 * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
826 	 * true. Rather continue processing this packet.
827 	 */
828 	if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
829 		goto err_out;
830 
831 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
832 
833 	if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx)))
834 		mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
835 
836 	seq = ntohl(tcp_hdr(skb)->seq);
837 	if (unlikely(priv_tx->expected_seq != seq)) {
838 		enum mlx5e_ktls_sync_retval ret =
839 			mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
840 
841 		stats->tls_ooo++;
842 
843 		switch (ret) {
844 		case MLX5E_KTLS_SYNC_DONE:
845 			break;
846 		case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
847 			stats->tls_skip_no_sync_data++;
848 			if (likely(!skb->decrypted))
849 				goto out;
850 			WARN_ON_ONCE(1);
851 			goto err_out;
852 		case MLX5E_KTLS_SYNC_FAIL:
853 			stats->tls_drop_no_sync_data++;
854 			goto err_out;
855 		}
856 	}
857 
858 	priv_tx->expected_seq = seq + datalen;
859 
860 	state->tls_tisn = priv_tx->tisn;
861 
862 	stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
863 	stats->tls_encrypted_bytes   += datalen;
864 
865 out:
866 	return true;
867 
868 err_out:
869 	dev_kfree_skb_any(skb);
870 	return false;
871 }
872 
873 int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
874 {
875 	if (!mlx5e_is_ktls_tx(priv->mdev))
876 		return 0;
877 
878 	priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
879 	if (!priv->tls->tx_pool)
880 		return -ENOMEM;
881 
882 	return 0;
883 }
884 
885 void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
886 {
887 	if (!mlx5e_is_ktls_tx(priv->mdev))
888 		return;
889 
890 	mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
891 	priv->tls->tx_pool = NULL;
892 }
893