1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 // Copyright (c) 2019 Mellanox Technologies. 3 4 #include <net/inet6_hashtables.h> 5 #include "en_accel/en_accel.h" 6 #include "en_accel/tls.h" 7 #include "en_accel/ktls_txrx.h" 8 #include "en_accel/ktls_utils.h" 9 #include "en_accel/fs_tcp.h" 10 11 struct accel_rule { 12 struct work_struct work; 13 struct mlx5e_priv *priv; 14 struct mlx5_flow_handle *rule; 15 }; 16 17 #define PROGRESS_PARAMS_WRITE_UNIT 64 18 #define PROGRESS_PARAMS_PADDED_SIZE \ 19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ 20 PROGRESS_PARAMS_WRITE_UNIT)) 21 22 struct mlx5e_ktls_rx_resync_buf { 23 union { 24 struct mlx5_wqe_tls_progress_params_seg progress; 25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; 26 } ____cacheline_aligned_in_smp; 27 dma_addr_t dma_addr; 28 struct mlx5e_ktls_offload_context_rx *priv_rx; 29 }; 30 31 enum { 32 MLX5E_PRIV_RX_FLAG_DELETING, 33 MLX5E_NUM_PRIV_RX_FLAGS, 34 }; 35 36 struct mlx5e_ktls_rx_resync_ctx { 37 struct tls_offload_resync_async core; 38 struct work_struct work; 39 struct mlx5e_priv *priv; 40 refcount_t refcnt; 41 __be64 sw_rcd_sn_be; 42 u32 seq; 43 }; 44 45 struct mlx5e_ktls_offload_context_rx { 46 struct tls12_crypto_info_aes_gcm_128 crypto_info; 47 struct accel_rule rule; 48 struct sock *sk; 49 struct mlx5e_rq_stats *rq_stats; 50 struct mlx5e_tls_sw_stats *sw_stats; 51 struct completion add_ctx; 52 u32 tirn; 53 u32 key_id; 54 u32 rxq; 55 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); 56 57 /* resync */ 58 struct mlx5e_ktls_rx_resync_ctx resync; 59 }; 60 61 static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) 62 { 63 if (!refcount_dec_and_test(&priv_rx->resync.refcnt)) 64 return false; 65 66 kfree(priv_rx); 67 return true; 68 } 69 70 static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) 71 { 72 refcount_inc(&priv_rx->resync.refcnt); 73 } 74 75 static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) 76 { 77 int err, inlen; 78 void *tirc; 79 u32 *in; 80 81 inlen = MLX5_ST_SZ_BYTES(create_tir_in); 82 in = kvzalloc(inlen, GFP_KERNEL); 83 if (!in) 84 return -ENOMEM; 85 86 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); 87 88 MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn); 89 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 90 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); 91 MLX5_SET(tirc, tirc, indirect_table, rqtn); 92 MLX5_SET(tirc, tirc, tls_en, 1); 93 MLX5_SET(tirc, tirc, self_lb_block, 94 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | 95 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); 96 97 err = mlx5_core_create_tir(mdev, in, tirn); 98 99 kvfree(in); 100 return err; 101 } 102 103 static void accel_rule_handle_work(struct work_struct *work) 104 { 105 struct mlx5e_ktls_offload_context_rx *priv_rx; 106 struct accel_rule *accel_rule; 107 struct mlx5_flow_handle *rule; 108 109 accel_rule = container_of(work, struct accel_rule, work); 110 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); 111 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 112 goto out; 113 114 rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, 115 priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG); 116 if (!IS_ERR_OR_NULL(rule)) 117 accel_rule->rule = rule; 118 out: 119 complete(&priv_rx->add_ctx); 120 } 121 122 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv, 123 struct sock *sk) 124 { 125 INIT_WORK(&rule->work, accel_rule_handle_work); 126 rule->priv = priv; 127 } 128 129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, 130 struct mlx5e_icosq_wqe_info *wi) 131 { 132 sq->db.wqe_info[pi] = *wi; 133 } 134 135 static struct mlx5_wqe_ctrl_seg * 136 post_static_params(struct mlx5e_icosq *sq, 137 struct mlx5e_ktls_offload_context_rx *priv_rx) 138 { 139 struct mlx5e_set_tls_static_params_wqe *wqe; 140 struct mlx5e_icosq_wqe_info wi; 141 u16 pi, num_wqebbs; 142 143 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; 144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 145 return ERR_PTR(-ENOSPC); 146 147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, 150 priv_rx->tirn, priv_rx->key_id, 151 priv_rx->resync.seq, false, 152 TLS_OFFLOAD_CTX_DIR_RX); 153 wi = (struct mlx5e_icosq_wqe_info) { 154 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, 155 .num_wqebbs = num_wqebbs, 156 .tls_set_params.priv_rx = priv_rx, 157 }; 158 icosq_fill_wi(sq, pi, &wi); 159 sq->pc += num_wqebbs; 160 161 return &wqe->ctrl; 162 } 163 164 static struct mlx5_wqe_ctrl_seg * 165 post_progress_params(struct mlx5e_icosq *sq, 166 struct mlx5e_ktls_offload_context_rx *priv_rx, 167 u32 next_record_tcp_sn) 168 { 169 struct mlx5e_set_tls_progress_params_wqe *wqe; 170 struct mlx5e_icosq_wqe_info wi; 171 u16 pi, num_wqebbs; 172 173 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; 174 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 175 return ERR_PTR(-ENOSPC); 176 177 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 178 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); 179 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false, 180 next_record_tcp_sn, 181 TLS_OFFLOAD_CTX_DIR_RX); 182 wi = (struct mlx5e_icosq_wqe_info) { 183 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, 184 .num_wqebbs = num_wqebbs, 185 .tls_set_params.priv_rx = priv_rx, 186 }; 187 188 icosq_fill_wi(sq, pi, &wi); 189 sq->pc += num_wqebbs; 190 191 return &wqe->ctrl; 192 } 193 194 static int post_rx_param_wqes(struct mlx5e_channel *c, 195 struct mlx5e_ktls_offload_context_rx *priv_rx, 196 u32 next_record_tcp_sn) 197 { 198 struct mlx5_wqe_ctrl_seg *cseg; 199 struct mlx5e_icosq *sq; 200 int err; 201 202 err = 0; 203 sq = &c->async_icosq; 204 spin_lock_bh(&c->async_icosq_lock); 205 206 cseg = post_static_params(sq, priv_rx); 207 if (IS_ERR(cseg)) 208 goto err_out; 209 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); 210 if (IS_ERR(cseg)) 211 goto err_out; 212 213 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 214 unlock: 215 spin_unlock_bh(&c->async_icosq_lock); 216 217 return err; 218 219 err_out: 220 priv_rx->rq_stats->tls_resync_req_skip++; 221 err = PTR_ERR(cseg); 222 complete(&priv_rx->add_ctx); 223 goto unlock; 224 } 225 226 static void 227 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, 228 struct mlx5e_ktls_offload_context_rx *priv_rx) 229 { 230 struct mlx5e_ktls_offload_context_rx **ctx = 231 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 232 233 BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > 234 TLS_OFFLOAD_CONTEXT_SIZE_RX); 235 236 *ctx = priv_rx; 237 } 238 239 static struct mlx5e_ktls_offload_context_rx * 240 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) 241 { 242 struct mlx5e_ktls_offload_context_rx **ctx = 243 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 244 245 return *ctx; 246 } 247 248 /* Re-sync */ 249 /* Runs in work context */ 250 static int 251 resync_post_get_progress_params(struct mlx5e_icosq *sq, 252 struct mlx5e_ktls_offload_context_rx *priv_rx) 253 { 254 struct mlx5e_get_tls_progress_params_wqe *wqe; 255 struct mlx5e_ktls_rx_resync_buf *buf; 256 struct mlx5e_icosq_wqe_info wi; 257 struct mlx5_wqe_ctrl_seg *cseg; 258 struct mlx5_seg_get_psv *psv; 259 struct device *pdev; 260 int err; 261 u16 pi; 262 263 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 264 if (unlikely(!buf)) { 265 err = -ENOMEM; 266 goto err_out; 267 } 268 269 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev); 270 buf->dma_addr = dma_map_single(pdev, &buf->progress, 271 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 272 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { 273 err = -ENOMEM; 274 goto err_free; 275 } 276 277 buf->priv_rx = priv_rx; 278 279 spin_lock_bh(&sq->channel->async_icosq_lock); 280 281 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { 282 spin_unlock_bh(&sq->channel->async_icosq_lock); 283 err = -ENOSPC; 284 goto err_dma_unmap; 285 } 286 287 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); 288 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); 289 290 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) 291 292 cseg = &wqe->ctrl; 293 cseg->opmod_idx_opcode = 294 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | 295 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); 296 cseg->qpn_ds = 297 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); 298 299 psv = &wqe->psv; 300 psv->num_psv = 1 << 4; 301 psv->l_key = sq->channel->mkey_be; 302 psv->psv_index[0] = cpu_to_be32(priv_rx->tirn); 303 psv->va = cpu_to_be64(buf->dma_addr); 304 305 wi = (struct mlx5e_icosq_wqe_info) { 306 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, 307 .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, 308 .tls_get_params.buf = buf, 309 }; 310 icosq_fill_wi(sq, pi, &wi); 311 sq->pc++; 312 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 313 spin_unlock_bh(&sq->channel->async_icosq_lock); 314 315 return 0; 316 317 err_dma_unmap: 318 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 319 err_free: 320 kfree(buf); 321 err_out: 322 priv_rx->rq_stats->tls_resync_req_skip++; 323 return err; 324 } 325 326 /* Function is called with elevated refcount. 327 * It decreases it only if no WQE is posted. 328 */ 329 static void resync_handle_work(struct work_struct *work) 330 { 331 struct mlx5e_ktls_offload_context_rx *priv_rx; 332 struct mlx5e_ktls_rx_resync_ctx *resync; 333 struct mlx5e_channel *c; 334 struct mlx5e_icosq *sq; 335 336 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); 337 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); 338 339 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 340 mlx5e_ktls_priv_rx_put(priv_rx); 341 return; 342 } 343 344 c = resync->priv->channels.c[priv_rx->rxq]; 345 sq = &c->async_icosq; 346 347 if (resync_post_get_progress_params(sq, priv_rx)) 348 mlx5e_ktls_priv_rx_put(priv_rx); 349 } 350 351 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, 352 struct mlx5e_priv *priv) 353 { 354 INIT_WORK(&resync->work, resync_handle_work); 355 resync->priv = priv; 356 refcount_set(&resync->refcnt, 1); 357 } 358 359 /* Function can be called with the refcount being either elevated or not. 360 * It does not affect the refcount. 361 */ 362 static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, 363 struct mlx5e_channel *c) 364 { 365 struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info; 366 struct mlx5_wqe_ctrl_seg *cseg; 367 struct mlx5e_icosq *sq; 368 int err; 369 370 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); 371 err = 0; 372 373 sq = &c->async_icosq; 374 spin_lock_bh(&c->async_icosq_lock); 375 376 cseg = post_static_params(sq, priv_rx); 377 if (IS_ERR(cseg)) { 378 priv_rx->rq_stats->tls_resync_res_skip++; 379 err = PTR_ERR(cseg); 380 goto unlock; 381 } 382 /* Do not increment priv_rx refcnt, CQE handling is empty */ 383 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 384 priv_rx->rq_stats->tls_resync_res_ok++; 385 unlock: 386 spin_unlock_bh(&c->async_icosq_lock); 387 388 return err; 389 } 390 391 /* Function can be called with the refcount being either elevated or not. 392 * It decreases the refcount and may free the kTLS priv context. 393 * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was 394 * already in flight. 395 */ 396 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, 397 struct mlx5e_icosq *sq) 398 { 399 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 400 struct mlx5e_ktls_offload_context_rx *priv_rx; 401 struct mlx5e_ktls_rx_resync_ctx *resync; 402 u8 tracker_state, auth_state, *ctx; 403 struct device *dev; 404 u32 hw_seq; 405 406 priv_rx = buf->priv_rx; 407 resync = &priv_rx->resync; 408 dev = mlx5_core_dma_dev(resync->priv->mdev); 409 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 410 goto out; 411 412 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, 413 DMA_FROM_DEVICE); 414 415 ctx = buf->progress.ctx; 416 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); 417 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); 418 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 419 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 420 priv_rx->rq_stats->tls_resync_req_skip++; 421 goto out; 422 } 423 424 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 425 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 426 priv_rx->rq_stats->tls_resync_req_end++; 427 out: 428 mlx5e_ktls_priv_rx_put(priv_rx); 429 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 430 kfree(buf); 431 } 432 433 /* Runs in NAPI. 434 * Function elevates the refcount, unless no work is queued. 435 */ 436 static bool resync_queue_get_psv(struct sock *sk) 437 { 438 struct mlx5e_ktls_offload_context_rx *priv_rx; 439 struct mlx5e_ktls_rx_resync_ctx *resync; 440 441 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 442 if (unlikely(!priv_rx)) 443 return false; 444 445 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 446 return false; 447 448 resync = &priv_rx->resync; 449 mlx5e_ktls_priv_rx_get(priv_rx); 450 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) 451 mlx5e_ktls_priv_rx_put(priv_rx); 452 453 return true; 454 } 455 456 /* Runs in NAPI */ 457 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 458 { 459 struct ethhdr *eth = (struct ethhdr *)(skb->data); 460 struct net_device *netdev = rq->netdev; 461 struct sock *sk = NULL; 462 unsigned int datalen; 463 struct iphdr *iph; 464 struct tcphdr *th; 465 __be32 seq; 466 int depth = 0; 467 468 __vlan_get_protocol(skb, eth->h_proto, &depth); 469 iph = (struct iphdr *)(skb->data + depth); 470 471 if (iph->version == 4) { 472 depth += sizeof(struct iphdr); 473 th = (void *)iph + sizeof(struct iphdr); 474 475 sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, 476 iph->saddr, th->source, iph->daddr, 477 th->dest, netdev->ifindex); 478 #if IS_ENABLED(CONFIG_IPV6) 479 } else { 480 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; 481 482 depth += sizeof(struct ipv6hdr); 483 th = (void *)ipv6h + sizeof(struct ipv6hdr); 484 485 sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, 486 &ipv6h->saddr, th->source, 487 &ipv6h->daddr, ntohs(th->dest), 488 netdev->ifindex, 0); 489 #endif 490 } 491 492 depth += sizeof(struct tcphdr); 493 494 if (unlikely(!sk)) 495 return; 496 497 if (unlikely(sk->sk_state == TCP_TIME_WAIT)) 498 goto unref; 499 500 if (unlikely(!resync_queue_get_psv(sk))) 501 goto unref; 502 503 seq = th->seq; 504 datalen = skb->len - depth; 505 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 506 rq->stats->tls_resync_req_start++; 507 508 unref: 509 sock_gen_put(sk); 510 } 511 512 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, 513 u32 seq, u8 *rcd_sn) 514 { 515 struct mlx5e_ktls_offload_context_rx *priv_rx; 516 struct mlx5e_ktls_rx_resync_ctx *resync; 517 struct mlx5e_priv *priv; 518 struct mlx5e_channel *c; 519 520 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 521 if (unlikely(!priv_rx)) 522 return; 523 524 resync = &priv_rx->resync; 525 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; 526 resync->seq = seq; 527 528 priv = netdev_priv(netdev); 529 c = priv->channels.c[priv_rx->rxq]; 530 531 resync_handle_seq_match(priv_rx, c); 532 } 533 534 /* End of resync section */ 535 536 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 537 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 538 { 539 struct mlx5e_rq_stats *stats = rq->stats; 540 541 switch (get_cqe_tls_offload(cqe)) { 542 case CQE_TLS_OFFLOAD_DECRYPTED: 543 skb->decrypted = 1; 544 stats->tls_decrypted_packets++; 545 stats->tls_decrypted_bytes += *cqe_bcnt; 546 break; 547 case CQE_TLS_OFFLOAD_RESYNC: 548 stats->tls_resync_req_pkt++; 549 resync_update_sn(rq, skb); 550 break; 551 default: /* CQE_TLS_OFFLOAD_ERROR: */ 552 stats->tls_err++; 553 break; 554 } 555 } 556 557 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) 558 { 559 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; 560 struct accel_rule *rule = &priv_rx->rule; 561 562 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 563 complete(&priv_rx->add_ctx); 564 return; 565 } 566 queue_work(rule->priv->tls->rx_wq, &rule->work); 567 } 568 569 static int mlx5e_ktls_sk_get_rxq(struct sock *sk) 570 { 571 int rxq = sk_rx_queue_get(sk); 572 573 if (unlikely(rxq == -1)) 574 rxq = 0; 575 576 return rxq; 577 } 578 579 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, 580 struct tls_crypto_info *crypto_info, 581 u32 start_offload_tcp_sn) 582 { 583 struct mlx5e_ktls_offload_context_rx *priv_rx; 584 struct mlx5e_ktls_rx_resync_ctx *resync; 585 struct tls_context *tls_ctx; 586 struct mlx5_core_dev *mdev; 587 struct mlx5e_priv *priv; 588 int rxq, err; 589 u32 rqtn; 590 591 tls_ctx = tls_get_ctx(sk); 592 priv = netdev_priv(netdev); 593 mdev = priv->mdev; 594 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); 595 if (unlikely(!priv_rx)) 596 return -ENOMEM; 597 598 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id); 599 if (err) 600 goto err_create_key; 601 602 priv_rx->crypto_info = 603 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 604 605 rxq = mlx5e_ktls_sk_get_rxq(sk); 606 priv_rx->rxq = rxq; 607 priv_rx->sk = sk; 608 609 priv_rx->rq_stats = &priv->channel_stats[rxq].rq; 610 priv_rx->sw_stats = &priv->tls->sw_stats; 611 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); 612 613 rqtn = priv->direct_tir[rxq].rqt.rqtn; 614 615 err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); 616 if (err) 617 goto err_create_tir; 618 619 init_completion(&priv_rx->add_ctx); 620 621 accel_rule_init(&priv_rx->rule, priv, sk); 622 resync = &priv_rx->resync; 623 resync_init(resync, priv); 624 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; 625 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); 626 627 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); 628 if (err) 629 goto err_post_wqes; 630 631 atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx); 632 633 return 0; 634 635 err_post_wqes: 636 mlx5_core_destroy_tir(mdev, priv_rx->tirn); 637 err_create_tir: 638 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 639 err_create_key: 640 kfree(priv_rx); 641 return err; 642 } 643 644 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) 645 { 646 struct mlx5e_ktls_offload_context_rx *priv_rx; 647 struct mlx5e_ktls_rx_resync_ctx *resync; 648 struct mlx5_core_dev *mdev; 649 struct mlx5e_priv *priv; 650 651 priv = netdev_priv(netdev); 652 mdev = priv->mdev; 653 654 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); 655 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); 656 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); 657 synchronize_net(); /* Sync with NAPI */ 658 if (!cancel_work_sync(&priv_rx->rule.work)) 659 /* completion is needed, as the priv_rx in the add flow 660 * is maintained on the wqe info (wi), not on the socket. 661 */ 662 wait_for_completion(&priv_rx->add_ctx); 663 resync = &priv_rx->resync; 664 if (cancel_work_sync(&resync->work)) 665 mlx5e_ktls_priv_rx_put(priv_rx); 666 667 atomic64_inc(&priv_rx->sw_stats->rx_tls_del); 668 if (priv_rx->rule.rule) 669 mlx5e_accel_fs_del_sk(priv_rx->rule.rule); 670 671 mlx5_core_destroy_tir(mdev, priv_rx->tirn); 672 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 673 /* priv_rx should normally be freed here, but if there is an outstanding 674 * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is 675 * processed. 676 */ 677 mlx5e_ktls_priv_rx_put(priv_rx); 678 } 679