1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include "en_accel/ktls_txrx.h"
5 #include "en_accel/ktls_utils.h"
6 
7 struct mlx5e_dump_wqe {
8 	struct mlx5_wqe_ctrl_seg ctrl;
9 	struct mlx5_wqe_data_seg data;
10 };
11 
12 #define MLX5E_KTLS_DUMP_WQEBBS \
13 	(DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
14 
15 static u8
16 mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
17 			  unsigned int sync_len)
18 {
19 	/* Given the MTU and sync_len, calculates an upper bound for the
20 	 * number of DUMP WQEs needed for the TX resync of a record.
21 	 */
22 	return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
23 }
24 
25 u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
26 {
27 	u16 num_dumps, stop_room = 0;
28 
29 	num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
30 
31 	stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
32 	stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS);
33 	stop_room += num_dumps * mlx5e_stop_room_for_wqe(MLX5E_KTLS_DUMP_WQEBBS);
34 
35 	return stop_room;
36 }
37 
38 static int mlx5e_ktls_create_tis(struct mlx5_core_dev *mdev, u32 *tisn)
39 {
40 	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
41 	void *tisc;
42 
43 	tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
44 
45 	MLX5_SET(tisc, tisc, tls_en, 1);
46 
47 	return mlx5e_create_tis(mdev, in, tisn);
48 }
49 
50 struct mlx5e_ktls_offload_context_tx {
51 	struct tls_offload_context_tx *tx_ctx;
52 	struct tls12_crypto_info_aes_gcm_128 crypto_info;
53 	u32 expected_seq;
54 	u32 tisn;
55 	u32 key_id;
56 	bool ctx_post_pending;
57 };
58 
59 static void
60 mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,
61 			   struct mlx5e_ktls_offload_context_tx *priv_tx)
62 {
63 	struct mlx5e_ktls_offload_context_tx **ctx =
64 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
65 
66 	BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) >
67 		     TLS_OFFLOAD_CONTEXT_SIZE_TX);
68 
69 	*ctx = priv_tx;
70 }
71 
72 static struct mlx5e_ktls_offload_context_tx *
73 mlx5e_get_ktls_tx_priv_ctx(struct tls_context *tls_ctx)
74 {
75 	struct mlx5e_ktls_offload_context_tx **ctx =
76 		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
77 
78 	return *ctx;
79 }
80 
81 int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
82 		      struct tls_crypto_info *crypto_info, u32 start_offload_tcp_sn)
83 {
84 	struct mlx5e_ktls_offload_context_tx *priv_tx;
85 	struct tls_context *tls_ctx;
86 	struct mlx5_core_dev *mdev;
87 	struct mlx5e_priv *priv;
88 	int err;
89 
90 	tls_ctx = tls_get_ctx(sk);
91 	priv = netdev_priv(netdev);
92 	mdev = priv->mdev;
93 
94 	priv_tx = kzalloc(sizeof(*priv_tx), GFP_KERNEL);
95 	if (!priv_tx)
96 		return -ENOMEM;
97 
98 	err = mlx5_ktls_create_key(mdev, crypto_info, &priv_tx->key_id);
99 	if (err)
100 		goto err_create_key;
101 
102 	priv_tx->expected_seq = start_offload_tcp_sn;
103 	priv_tx->crypto_info  =
104 		*(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
105 	priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx);
106 
107 	mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx);
108 
109 	err = mlx5e_ktls_create_tis(mdev, &priv_tx->tisn);
110 	if (err)
111 		goto err_create_tis;
112 
113 	priv_tx->ctx_post_pending = true;
114 
115 	return 0;
116 
117 err_create_tis:
118 	mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
119 err_create_key:
120 	kfree(priv_tx);
121 	return err;
122 }
123 
124 void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
125 {
126 	struct mlx5e_ktls_offload_context_tx *priv_tx;
127 	struct mlx5_core_dev *mdev;
128 	struct mlx5e_priv *priv;
129 
130 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
131 	priv = netdev_priv(netdev);
132 	mdev = priv->mdev;
133 
134 	mlx5e_destroy_tis(mdev, priv_tx->tisn);
135 	mlx5_ktls_destroy_key(mdev, priv_tx->key_id);
136 	kfree(priv_tx);
137 }
138 
139 static void tx_fill_wi(struct mlx5e_txqsq *sq,
140 		       u16 pi, u8 num_wqebbs, u32 num_bytes,
141 		       struct page *page)
142 {
143 	struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
144 
145 	*wi = (struct mlx5e_tx_wqe_info) {
146 		.num_wqebbs = num_wqebbs,
147 		.num_bytes  = num_bytes,
148 		.resync_dump_frag_page = page,
149 	};
150 }
151 
152 static bool
153 mlx5e_ktls_tx_offload_test_and_clear_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
154 {
155 	bool ret = priv_tx->ctx_post_pending;
156 
157 	priv_tx->ctx_post_pending = false;
158 
159 	return ret;
160 }
161 
162 static void
163 post_static_params(struct mlx5e_txqsq *sq,
164 		   struct mlx5e_ktls_offload_context_tx *priv_tx,
165 		   bool fence)
166 {
167 	struct mlx5e_set_tls_static_params_wqe *wqe;
168 	u16 pi, num_wqebbs;
169 
170 	num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS;
171 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
172 	wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
173 	mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
174 				       priv_tx->tisn, priv_tx->key_id, 0, fence,
175 				       TLS_OFFLOAD_CTX_DIR_TX);
176 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
177 	sq->pc += num_wqebbs;
178 }
179 
180 static void
181 post_progress_params(struct mlx5e_txqsq *sq,
182 		     struct mlx5e_ktls_offload_context_tx *priv_tx,
183 		     bool fence)
184 {
185 	struct mlx5e_set_tls_progress_params_wqe *wqe;
186 	u16 pi, num_wqebbs;
187 
188 	num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS;
189 	pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
190 	wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi);
191 	mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_tx->tisn, fence, 0,
192 					 TLS_OFFLOAD_CTX_DIR_TX);
193 	tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
194 	sq->pc += num_wqebbs;
195 }
196 
197 static void
198 mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
199 			      struct mlx5e_ktls_offload_context_tx *priv_tx,
200 			      bool skip_static_post, bool fence_first_post)
201 {
202 	bool progress_fence = skip_static_post || !fence_first_post;
203 
204 	if (!skip_static_post)
205 		post_static_params(sq, priv_tx, fence_first_post);
206 
207 	post_progress_params(sq, priv_tx, progress_fence);
208 }
209 
210 struct tx_sync_info {
211 	u64 rcd_sn;
212 	u32 sync_len;
213 	int nr_frags;
214 	skb_frag_t frags[MAX_SKB_FRAGS];
215 };
216 
217 enum mlx5e_ktls_sync_retval {
218 	MLX5E_KTLS_SYNC_DONE,
219 	MLX5E_KTLS_SYNC_FAIL,
220 	MLX5E_KTLS_SYNC_SKIP_NO_DATA,
221 };
222 
223 static enum mlx5e_ktls_sync_retval
224 tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
225 		 u32 tcp_seq, int datalen, struct tx_sync_info *info)
226 {
227 	struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
228 	enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
229 	struct tls_record_info *record;
230 	int remaining, i = 0;
231 	unsigned long flags;
232 	bool ends_before;
233 
234 	spin_lock_irqsave(&tx_ctx->lock, flags);
235 	record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
236 
237 	if (unlikely(!record)) {
238 		ret = MLX5E_KTLS_SYNC_FAIL;
239 		goto out;
240 	}
241 
242 	/* There are the following cases:
243 	 * 1. packet ends before start marker: bypass offload.
244 	 * 2. packet starts before start marker and ends after it: drop,
245 	 *    not supported, breaks contract with kernel.
246 	 * 3. packet ends before tls record info starts: drop,
247 	 *    this packet was already acknowledged and its record info
248 	 *    was released.
249 	 */
250 	ends_before = before(tcp_seq + datalen - 1, tls_record_start_seq(record));
251 
252 	if (unlikely(tls_record_is_start_marker(record))) {
253 		ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
254 		goto out;
255 	} else if (ends_before) {
256 		ret = MLX5E_KTLS_SYNC_FAIL;
257 		goto out;
258 	}
259 
260 	info->sync_len = tcp_seq - tls_record_start_seq(record);
261 	remaining = info->sync_len;
262 	while (remaining > 0) {
263 		skb_frag_t *frag = &record->frags[i];
264 
265 		get_page(skb_frag_page(frag));
266 		remaining -= skb_frag_size(frag);
267 		info->frags[i++] = *frag;
268 	}
269 	/* reduce the part which will be sent with the original SKB */
270 	if (remaining < 0)
271 		skb_frag_size_add(&info->frags[i - 1], remaining);
272 	info->nr_frags = i;
273 out:
274 	spin_unlock_irqrestore(&tx_ctx->lock, flags);
275 	return ret;
276 }
277 
278 static void
279 tx_post_resync_params(struct mlx5e_txqsq *sq,
280 		      struct mlx5e_ktls_offload_context_tx *priv_tx,
281 		      u64 rcd_sn)
282 {
283 	struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
284 	__be64 rn_be = cpu_to_be64(rcd_sn);
285 	bool skip_static_post;
286 	u16 rec_seq_sz;
287 	char *rec_seq;
288 
289 	rec_seq = info->rec_seq;
290 	rec_seq_sz = sizeof(info->rec_seq);
291 
292 	skip_static_post = !memcmp(rec_seq, &rn_be, rec_seq_sz);
293 	if (!skip_static_post)
294 		memcpy(rec_seq, &rn_be, rec_seq_sz);
295 
296 	mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
297 }
298 
299 static int
300 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
301 {
302 	struct mlx5_wqe_ctrl_seg *cseg;
303 	struct mlx5_wqe_data_seg *dseg;
304 	struct mlx5e_dump_wqe *wqe;
305 	dma_addr_t dma_addr = 0;
306 	u16 ds_cnt;
307 	int fsz;
308 	u16 pi;
309 
310 	BUILD_BUG_ON(MLX5E_KTLS_DUMP_WQEBBS != 1);
311 	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
312 	wqe = MLX5E_TLS_FETCH_DUMP_WQE(sq, pi);
313 
314 	ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
315 
316 	cseg = &wqe->ctrl;
317 	dseg = &wqe->data;
318 
319 	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8)  | MLX5_OPCODE_DUMP);
320 	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
321 	cseg->tis_tir_num      = cpu_to_be32(tisn << 8);
322 	cseg->fm_ce_se         = first ? MLX5_FENCE_MODE_INITIATOR_SMALL : 0;
323 
324 	fsz = skb_frag_size(frag);
325 	dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
326 				    DMA_TO_DEVICE);
327 	if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
328 		return -ENOMEM;
329 
330 	dseg->addr       = cpu_to_be64(dma_addr);
331 	dseg->lkey       = sq->mkey_be;
332 	dseg->byte_count = cpu_to_be32(fsz);
333 	mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
334 
335 	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
336 	sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
337 
338 	return 0;
339 }
340 
341 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
342 					   struct mlx5e_tx_wqe_info *wi,
343 					   u32 *dma_fifo_cc)
344 {
345 	struct mlx5e_sq_stats *stats;
346 	struct mlx5e_sq_dma *dma;
347 
348 	dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
349 	stats = sq->stats;
350 
351 	mlx5e_tx_dma_unmap(sq->pdev, dma);
352 	put_page(wi->resync_dump_frag_page);
353 	stats->tls_dump_packets++;
354 	stats->tls_dump_bytes += wi->num_bytes;
355 }
356 
357 static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
358 {
359 	struct mlx5_wq_cyc *wq = &sq->wq;
360 	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
361 
362 	tx_fill_wi(sq, pi, 1, 0, NULL);
363 
364 	mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
365 }
366 
367 static enum mlx5e_ktls_sync_retval
368 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
369 			 struct mlx5e_txqsq *sq,
370 			 int datalen,
371 			 u32 seq)
372 {
373 	struct mlx5e_sq_stats *stats = sq->stats;
374 	enum mlx5e_ktls_sync_retval ret;
375 	struct tx_sync_info info = {};
376 	int i = 0;
377 
378 	ret = tx_sync_info_get(priv_tx, seq, datalen, &info);
379 	if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
380 		if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
381 			stats->tls_skip_no_sync_data++;
382 			return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
383 		}
384 		/* We might get here if a retransmission reaches the driver
385 		 * after the relevant record is acked.
386 		 * It should be safe to drop the packet in this case
387 		 */
388 		stats->tls_drop_no_sync_data++;
389 		goto err_out;
390 	}
391 
392 	stats->tls_ooo++;
393 
394 	tx_post_resync_params(sq, priv_tx, info.rcd_sn);
395 
396 	/* If no dump WQE was sent, we need to have a fence NOP WQE before the
397 	 * actual data xmit.
398 	 */
399 	if (!info.nr_frags) {
400 		tx_post_fence_nop(sq);
401 		return MLX5E_KTLS_SYNC_DONE;
402 	}
403 
404 	for (; i < info.nr_frags; i++) {
405 		unsigned int orig_fsz, frag_offset = 0, n = 0;
406 		skb_frag_t *f = &info.frags[i];
407 
408 		orig_fsz = skb_frag_size(f);
409 
410 		do {
411 			bool fence = !(i || frag_offset);
412 			unsigned int fsz;
413 
414 			n++;
415 			fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
416 			skb_frag_size_set(f, fsz);
417 			if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
418 				page_ref_add(skb_frag_page(f), n - 1);
419 				goto err_out;
420 			}
421 
422 			skb_frag_off_add(f, fsz);
423 			frag_offset += fsz;
424 		} while (frag_offset < orig_fsz);
425 
426 		page_ref_add(skb_frag_page(f), n - 1);
427 	}
428 
429 	return MLX5E_KTLS_SYNC_DONE;
430 
431 err_out:
432 	for (; i < info.nr_frags; i++)
433 		/* The put_page() here undoes the page ref obtained in tx_sync_info_get().
434 		 * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
435 		 * released only upon their completions (or in mlx5e_free_txqsq_descs,
436 		 * if channel closes).
437 		 */
438 		put_page(skb_frag_page(&info.frags[i]));
439 
440 	return MLX5E_KTLS_SYNC_FAIL;
441 }
442 
443 bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
444 			      struct sk_buff *skb, int datalen,
445 			      struct mlx5e_accel_tx_tls_state *state)
446 {
447 	struct mlx5e_ktls_offload_context_tx *priv_tx;
448 	struct mlx5e_sq_stats *stats = sq->stats;
449 	u32 seq;
450 
451 	priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
452 
453 	if (unlikely(mlx5e_ktls_tx_offload_test_and_clear_pending(priv_tx))) {
454 		mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false);
455 		stats->tls_ctx++;
456 	}
457 
458 	seq = ntohl(tcp_hdr(skb)->seq);
459 	if (unlikely(priv_tx->expected_seq != seq)) {
460 		enum mlx5e_ktls_sync_retval ret =
461 			mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
462 
463 		switch (ret) {
464 		case MLX5E_KTLS_SYNC_DONE:
465 			break;
466 		case MLX5E_KTLS_SYNC_SKIP_NO_DATA:
467 			if (likely(!skb->decrypted))
468 				goto out;
469 			WARN_ON_ONCE(1);
470 			fallthrough;
471 		case MLX5E_KTLS_SYNC_FAIL:
472 			goto err_out;
473 		}
474 	}
475 
476 	priv_tx->expected_seq = seq + datalen;
477 
478 	state->tls_tisn = priv_tx->tisn;
479 
480 	stats->tls_encrypted_packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
481 	stats->tls_encrypted_bytes   += datalen;
482 
483 out:
484 	return true;
485 
486 err_out:
487 	dev_kfree_skb_any(skb);
488 	return false;
489 }
490