1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 // Copyright (c) 2019 Mellanox Technologies. 3 4 #include <net/inet6_hashtables.h> 5 #include "en_accel/en_accel.h" 6 #include "en_accel/tls.h" 7 #include "en_accel/ktls_txrx.h" 8 #include "en_accel/ktls_utils.h" 9 #include "en_accel/fs_tcp.h" 10 11 struct accel_rule { 12 struct work_struct work; 13 struct mlx5e_priv *priv; 14 struct mlx5_flow_handle *rule; 15 }; 16 17 #define PROGRESS_PARAMS_WRITE_UNIT 64 18 #define PROGRESS_PARAMS_PADDED_SIZE \ 19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ 20 PROGRESS_PARAMS_WRITE_UNIT)) 21 22 struct mlx5e_ktls_rx_resync_buf { 23 union { 24 struct mlx5_wqe_tls_progress_params_seg progress; 25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; 26 } ____cacheline_aligned_in_smp; 27 dma_addr_t dma_addr; 28 struct mlx5e_ktls_offload_context_rx *priv_rx; 29 }; 30 31 enum { 32 MLX5E_PRIV_RX_FLAG_DELETING, 33 MLX5E_NUM_PRIV_RX_FLAGS, 34 }; 35 36 struct mlx5e_ktls_rx_resync_ctx { 37 struct tls_offload_resync_async core; 38 struct work_struct work; 39 struct mlx5e_priv *priv; 40 refcount_t refcnt; 41 __be64 sw_rcd_sn_be; 42 u32 seq; 43 }; 44 45 struct mlx5e_ktls_offload_context_rx { 46 struct tls12_crypto_info_aes_gcm_128 crypto_info; 47 struct accel_rule rule; 48 struct sock *sk; 49 struct mlx5e_rq_stats *rq_stats; 50 struct mlx5e_tls_sw_stats *sw_stats; 51 struct completion add_ctx; 52 struct mlx5e_tir tir; 53 u32 key_id; 54 u32 rxq; 55 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); 56 57 /* resync */ 58 spinlock_t lock; /* protects resync fields */ 59 struct mlx5e_ktls_rx_resync_ctx resync; 60 struct list_head list; 61 }; 62 63 static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) 64 { 65 if (!refcount_dec_and_test(&priv_rx->resync.refcnt)) 66 return false; 67 68 kfree(priv_rx); 69 return true; 70 } 71 72 static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) 73 { 74 refcount_inc(&priv_rx->resync.refcnt); 75 } 76 77 struct mlx5e_ktls_resync_resp { 78 /* protects list changes */ 79 spinlock_t lock; 80 struct list_head list; 81 }; 82 83 void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) 84 { 85 kvfree(resp_list); 86 } 87 88 struct mlx5e_ktls_resync_resp * 89 mlx5e_ktls_rx_resync_create_resp_list(void) 90 { 91 struct mlx5e_ktls_resync_resp *resp_list; 92 93 resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL); 94 if (!resp_list) 95 return ERR_PTR(-ENOMEM); 96 97 INIT_LIST_HEAD(&resp_list->list); 98 spin_lock_init(&resp_list->lock); 99 100 return resp_list; 101 } 102 103 static void accel_rule_handle_work(struct work_struct *work) 104 { 105 struct mlx5e_ktls_offload_context_rx *priv_rx; 106 struct accel_rule *accel_rule; 107 struct mlx5_flow_handle *rule; 108 109 accel_rule = container_of(work, struct accel_rule, work); 110 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); 111 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 112 goto out; 113 114 rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, 115 mlx5e_tir_get_tirn(&priv_rx->tir), 116 MLX5_FS_DEFAULT_FLOW_TAG); 117 if (!IS_ERR_OR_NULL(rule)) 118 accel_rule->rule = rule; 119 out: 120 complete(&priv_rx->add_ctx); 121 } 122 123 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) 124 { 125 INIT_WORK(&rule->work, accel_rule_handle_work); 126 rule->priv = priv; 127 } 128 129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, 130 struct mlx5e_icosq_wqe_info *wi) 131 { 132 sq->db.wqe_info[pi] = *wi; 133 } 134 135 static struct mlx5_wqe_ctrl_seg * 136 post_static_params(struct mlx5e_icosq *sq, 137 struct mlx5e_ktls_offload_context_rx *priv_rx) 138 { 139 struct mlx5e_set_tls_static_params_wqe *wqe; 140 struct mlx5e_icosq_wqe_info wi; 141 u16 pi, num_wqebbs; 142 143 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; 144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 145 return ERR_PTR(-ENOSPC); 146 147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, 150 mlx5e_tir_get_tirn(&priv_rx->tir), 151 priv_rx->key_id, priv_rx->resync.seq, false, 152 TLS_OFFLOAD_CTX_DIR_RX); 153 wi = (struct mlx5e_icosq_wqe_info) { 154 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, 155 .num_wqebbs = num_wqebbs, 156 .tls_set_params.priv_rx = priv_rx, 157 }; 158 icosq_fill_wi(sq, pi, &wi); 159 sq->pc += num_wqebbs; 160 161 return &wqe->ctrl; 162 } 163 164 static struct mlx5_wqe_ctrl_seg * 165 post_progress_params(struct mlx5e_icosq *sq, 166 struct mlx5e_ktls_offload_context_rx *priv_rx, 167 u32 next_record_tcp_sn) 168 { 169 struct mlx5e_set_tls_progress_params_wqe *wqe; 170 struct mlx5e_icosq_wqe_info wi; 171 u16 pi, num_wqebbs; 172 173 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; 174 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 175 return ERR_PTR(-ENOSPC); 176 177 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 178 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); 179 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, 180 mlx5e_tir_get_tirn(&priv_rx->tir), 181 false, next_record_tcp_sn, 182 TLS_OFFLOAD_CTX_DIR_RX); 183 wi = (struct mlx5e_icosq_wqe_info) { 184 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, 185 .num_wqebbs = num_wqebbs, 186 .tls_set_params.priv_rx = priv_rx, 187 }; 188 189 icosq_fill_wi(sq, pi, &wi); 190 sq->pc += num_wqebbs; 191 192 return &wqe->ctrl; 193 } 194 195 static int post_rx_param_wqes(struct mlx5e_channel *c, 196 struct mlx5e_ktls_offload_context_rx *priv_rx, 197 u32 next_record_tcp_sn) 198 { 199 struct mlx5_wqe_ctrl_seg *cseg; 200 struct mlx5e_icosq *sq; 201 int err; 202 203 err = 0; 204 sq = &c->async_icosq; 205 spin_lock_bh(&c->async_icosq_lock); 206 207 cseg = post_static_params(sq, priv_rx); 208 if (IS_ERR(cseg)) 209 goto err_out; 210 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); 211 if (IS_ERR(cseg)) 212 goto err_out; 213 214 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 215 unlock: 216 spin_unlock_bh(&c->async_icosq_lock); 217 218 return err; 219 220 err_out: 221 priv_rx->rq_stats->tls_resync_req_skip++; 222 err = PTR_ERR(cseg); 223 complete(&priv_rx->add_ctx); 224 goto unlock; 225 } 226 227 static void 228 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, 229 struct mlx5e_ktls_offload_context_rx *priv_rx) 230 { 231 struct mlx5e_ktls_offload_context_rx **ctx = 232 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 233 234 BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > 235 TLS_OFFLOAD_CONTEXT_SIZE_RX); 236 237 *ctx = priv_rx; 238 } 239 240 static struct mlx5e_ktls_offload_context_rx * 241 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) 242 { 243 struct mlx5e_ktls_offload_context_rx **ctx = 244 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 245 246 return *ctx; 247 } 248 249 /* Re-sync */ 250 /* Runs in work context */ 251 static int 252 resync_post_get_progress_params(struct mlx5e_icosq *sq, 253 struct mlx5e_ktls_offload_context_rx *priv_rx) 254 { 255 struct mlx5e_get_tls_progress_params_wqe *wqe; 256 struct mlx5e_ktls_rx_resync_buf *buf; 257 struct mlx5e_icosq_wqe_info wi; 258 struct mlx5_wqe_ctrl_seg *cseg; 259 struct mlx5_seg_get_psv *psv; 260 struct device *pdev; 261 int err; 262 u16 pi; 263 264 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 265 if (unlikely(!buf)) { 266 err = -ENOMEM; 267 goto err_out; 268 } 269 270 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev); 271 buf->dma_addr = dma_map_single(pdev, &buf->progress, 272 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 273 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { 274 err = -ENOMEM; 275 goto err_free; 276 } 277 278 buf->priv_rx = priv_rx; 279 280 spin_lock_bh(&sq->channel->async_icosq_lock); 281 282 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { 283 spin_unlock_bh(&sq->channel->async_icosq_lock); 284 err = -ENOSPC; 285 goto err_dma_unmap; 286 } 287 288 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); 289 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); 290 291 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) 292 293 cseg = &wqe->ctrl; 294 cseg->opmod_idx_opcode = 295 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | 296 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); 297 cseg->qpn_ds = 298 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); 299 300 psv = &wqe->psv; 301 psv->num_psv = 1 << 4; 302 psv->l_key = sq->channel->mkey_be; 303 psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); 304 psv->va = cpu_to_be64(buf->dma_addr); 305 306 wi = (struct mlx5e_icosq_wqe_info) { 307 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, 308 .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, 309 .tls_get_params.buf = buf, 310 }; 311 icosq_fill_wi(sq, pi, &wi); 312 sq->pc++; 313 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 314 spin_unlock_bh(&sq->channel->async_icosq_lock); 315 316 return 0; 317 318 err_dma_unmap: 319 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 320 err_free: 321 kfree(buf); 322 err_out: 323 priv_rx->rq_stats->tls_resync_req_skip++; 324 return err; 325 } 326 327 /* Function is called with elevated refcount. 328 * It decreases it only if no WQE is posted. 329 */ 330 static void resync_handle_work(struct work_struct *work) 331 { 332 struct mlx5e_ktls_offload_context_rx *priv_rx; 333 struct mlx5e_ktls_rx_resync_ctx *resync; 334 struct mlx5e_channel *c; 335 struct mlx5e_icosq *sq; 336 337 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); 338 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); 339 340 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 341 mlx5e_ktls_priv_rx_put(priv_rx); 342 return; 343 } 344 345 c = resync->priv->channels.c[priv_rx->rxq]; 346 sq = &c->async_icosq; 347 348 if (resync_post_get_progress_params(sq, priv_rx)) 349 mlx5e_ktls_priv_rx_put(priv_rx); 350 } 351 352 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, 353 struct mlx5e_priv *priv) 354 { 355 INIT_WORK(&resync->work, resync_handle_work); 356 resync->priv = priv; 357 refcount_set(&resync->refcnt, 1); 358 } 359 360 /* Function can be called with the refcount being either elevated or not. 361 * It does not affect the refcount. 362 */ 363 static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, 364 struct mlx5e_channel *c) 365 { 366 struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info; 367 struct mlx5e_ktls_resync_resp *ktls_resync; 368 struct mlx5e_icosq *sq; 369 bool trigger_poll; 370 371 sq = &c->async_icosq; 372 ktls_resync = sq->ktls_resync; 373 trigger_poll = false; 374 375 spin_lock_bh(&ktls_resync->lock); 376 spin_lock_bh(&priv_rx->lock); 377 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); 378 if (list_empty(&priv_rx->list)) { 379 list_add_tail(&priv_rx->list, &ktls_resync->list); 380 trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 381 } 382 spin_unlock_bh(&priv_rx->lock); 383 spin_unlock_bh(&ktls_resync->lock); 384 385 if (!trigger_poll) 386 return; 387 388 if (!napi_if_scheduled_mark_missed(&c->napi)) { 389 spin_lock_bh(&c->async_icosq_lock); 390 mlx5e_trigger_irq(sq); 391 spin_unlock_bh(&c->async_icosq_lock); 392 } 393 } 394 395 /* Function can be called with the refcount being either elevated or not. 396 * It decreases the refcount and may free the kTLS priv context. 397 * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was 398 * already in flight. 399 */ 400 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, 401 struct mlx5e_icosq *sq) 402 { 403 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 404 struct mlx5e_ktls_offload_context_rx *priv_rx; 405 struct mlx5e_ktls_rx_resync_ctx *resync; 406 u8 tracker_state, auth_state, *ctx; 407 struct device *dev; 408 u32 hw_seq; 409 410 priv_rx = buf->priv_rx; 411 resync = &priv_rx->resync; 412 dev = mlx5_core_dma_dev(resync->priv->mdev); 413 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 414 goto out; 415 416 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, 417 DMA_FROM_DEVICE); 418 419 ctx = buf->progress.ctx; 420 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); 421 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); 422 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 423 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 424 priv_rx->rq_stats->tls_resync_req_skip++; 425 goto out; 426 } 427 428 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 429 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 430 priv_rx->rq_stats->tls_resync_req_end++; 431 out: 432 mlx5e_ktls_priv_rx_put(priv_rx); 433 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 434 kfree(buf); 435 } 436 437 /* Runs in NAPI. 438 * Function elevates the refcount, unless no work is queued. 439 */ 440 static bool resync_queue_get_psv(struct sock *sk) 441 { 442 struct mlx5e_ktls_offload_context_rx *priv_rx; 443 struct mlx5e_ktls_rx_resync_ctx *resync; 444 445 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 446 if (unlikely(!priv_rx)) 447 return false; 448 449 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 450 return false; 451 452 resync = &priv_rx->resync; 453 mlx5e_ktls_priv_rx_get(priv_rx); 454 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) 455 mlx5e_ktls_priv_rx_put(priv_rx); 456 457 return true; 458 } 459 460 /* Runs in NAPI */ 461 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 462 { 463 struct ethhdr *eth = (struct ethhdr *)(skb->data); 464 struct net_device *netdev = rq->netdev; 465 struct sock *sk = NULL; 466 unsigned int datalen; 467 struct iphdr *iph; 468 struct tcphdr *th; 469 __be32 seq; 470 int depth = 0; 471 472 __vlan_get_protocol(skb, eth->h_proto, &depth); 473 iph = (struct iphdr *)(skb->data + depth); 474 475 if (iph->version == 4) { 476 depth += sizeof(struct iphdr); 477 th = (void *)iph + sizeof(struct iphdr); 478 479 sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, 480 iph->saddr, th->source, iph->daddr, 481 th->dest, netdev->ifindex); 482 #if IS_ENABLED(CONFIG_IPV6) 483 } else { 484 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; 485 486 depth += sizeof(struct ipv6hdr); 487 th = (void *)ipv6h + sizeof(struct ipv6hdr); 488 489 sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, 490 &ipv6h->saddr, th->source, 491 &ipv6h->daddr, ntohs(th->dest), 492 netdev->ifindex, 0); 493 #endif 494 } 495 496 depth += sizeof(struct tcphdr); 497 498 if (unlikely(!sk)) 499 return; 500 501 if (unlikely(sk->sk_state == TCP_TIME_WAIT)) 502 goto unref; 503 504 if (unlikely(!resync_queue_get_psv(sk))) 505 goto unref; 506 507 seq = th->seq; 508 datalen = skb->len - depth; 509 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 510 rq->stats->tls_resync_req_start++; 511 512 unref: 513 sock_gen_put(sk); 514 } 515 516 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, 517 u32 seq, u8 *rcd_sn) 518 { 519 struct mlx5e_ktls_offload_context_rx *priv_rx; 520 struct mlx5e_ktls_rx_resync_ctx *resync; 521 struct mlx5e_priv *priv; 522 struct mlx5e_channel *c; 523 524 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 525 if (unlikely(!priv_rx)) 526 return; 527 528 resync = &priv_rx->resync; 529 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; 530 resync->seq = seq; 531 532 priv = netdev_priv(netdev); 533 c = priv->channels.c[priv_rx->rxq]; 534 535 resync_handle_seq_match(priv_rx, c); 536 } 537 538 /* End of resync section */ 539 540 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 541 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 542 { 543 struct mlx5e_rq_stats *stats = rq->stats; 544 545 switch (get_cqe_tls_offload(cqe)) { 546 case CQE_TLS_OFFLOAD_DECRYPTED: 547 skb->decrypted = 1; 548 stats->tls_decrypted_packets++; 549 stats->tls_decrypted_bytes += *cqe_bcnt; 550 break; 551 case CQE_TLS_OFFLOAD_RESYNC: 552 stats->tls_resync_req_pkt++; 553 resync_update_sn(rq, skb); 554 break; 555 default: /* CQE_TLS_OFFLOAD_ERROR: */ 556 stats->tls_err++; 557 break; 558 } 559 } 560 561 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) 562 { 563 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; 564 struct accel_rule *rule = &priv_rx->rule; 565 566 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 567 complete(&priv_rx->add_ctx); 568 return; 569 } 570 queue_work(rule->priv->tls->rx_wq, &rule->work); 571 } 572 573 static int mlx5e_ktls_sk_get_rxq(struct sock *sk) 574 { 575 int rxq = sk_rx_queue_get(sk); 576 577 if (unlikely(rxq == -1)) 578 rxq = 0; 579 580 return rxq; 581 } 582 583 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, 584 struct tls_crypto_info *crypto_info, 585 u32 start_offload_tcp_sn) 586 { 587 struct mlx5e_ktls_offload_context_rx *priv_rx; 588 struct mlx5e_ktls_rx_resync_ctx *resync; 589 struct tls_context *tls_ctx; 590 struct mlx5_core_dev *mdev; 591 struct mlx5e_priv *priv; 592 int rxq, err; 593 594 tls_ctx = tls_get_ctx(sk); 595 priv = netdev_priv(netdev); 596 mdev = priv->mdev; 597 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); 598 if (unlikely(!priv_rx)) 599 return -ENOMEM; 600 601 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id); 602 if (err) 603 goto err_create_key; 604 605 INIT_LIST_HEAD(&priv_rx->list); 606 spin_lock_init(&priv_rx->lock); 607 priv_rx->crypto_info = 608 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 609 610 rxq = mlx5e_ktls_sk_get_rxq(sk); 611 priv_rx->rxq = rxq; 612 priv_rx->sk = sk; 613 614 priv_rx->rq_stats = &priv->channel_stats[rxq]->rq; 615 priv_rx->sw_stats = &priv->tls->sw_stats; 616 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); 617 618 err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); 619 if (err) 620 goto err_create_tir; 621 622 init_completion(&priv_rx->add_ctx); 623 624 accel_rule_init(&priv_rx->rule, priv); 625 resync = &priv_rx->resync; 626 resync_init(resync, priv); 627 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; 628 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); 629 630 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); 631 if (err) 632 goto err_post_wqes; 633 634 atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx); 635 636 return 0; 637 638 err_post_wqes: 639 mlx5e_tir_destroy(&priv_rx->tir); 640 err_create_tir: 641 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 642 err_create_key: 643 kfree(priv_rx); 644 return err; 645 } 646 647 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) 648 { 649 struct mlx5e_ktls_offload_context_rx *priv_rx; 650 struct mlx5e_ktls_rx_resync_ctx *resync; 651 struct mlx5_core_dev *mdev; 652 struct mlx5e_priv *priv; 653 654 priv = netdev_priv(netdev); 655 mdev = priv->mdev; 656 657 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); 658 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); 659 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); 660 synchronize_net(); /* Sync with NAPI */ 661 if (!cancel_work_sync(&priv_rx->rule.work)) 662 /* completion is needed, as the priv_rx in the add flow 663 * is maintained on the wqe info (wi), not on the socket. 664 */ 665 wait_for_completion(&priv_rx->add_ctx); 666 resync = &priv_rx->resync; 667 if (cancel_work_sync(&resync->work)) 668 mlx5e_ktls_priv_rx_put(priv_rx); 669 670 atomic64_inc(&priv_rx->sw_stats->rx_tls_del); 671 if (priv_rx->rule.rule) 672 mlx5e_accel_fs_del_sk(priv_rx->rule.rule); 673 674 mlx5e_tir_destroy(&priv_rx->tir); 675 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 676 /* priv_rx should normally be freed here, but if there is an outstanding 677 * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is 678 * processed. 679 */ 680 mlx5e_ktls_priv_rx_put(priv_rx); 681 } 682 683 bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) 684 { 685 struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; 686 struct mlx5e_ktls_resync_resp *ktls_resync; 687 struct mlx5_wqe_ctrl_seg *db_cseg; 688 struct mlx5e_icosq *sq; 689 LIST_HEAD(local_list); 690 int i, j; 691 692 sq = &c->async_icosq; 693 694 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 695 return false; 696 697 ktls_resync = sq->ktls_resync; 698 db_cseg = NULL; 699 i = 0; 700 701 spin_lock(&ktls_resync->lock); 702 list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { 703 list_move(&priv_rx->list, &local_list); 704 if (++i == budget) 705 break; 706 } 707 if (list_empty(&ktls_resync->list)) 708 clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 709 spin_unlock(&ktls_resync->lock); 710 711 spin_lock(&c->async_icosq_lock); 712 for (j = 0; j < i; j++) { 713 struct mlx5_wqe_ctrl_seg *cseg; 714 715 priv_rx = list_first_entry(&local_list, 716 struct mlx5e_ktls_offload_context_rx, 717 list); 718 spin_lock(&priv_rx->lock); 719 cseg = post_static_params(sq, priv_rx); 720 if (IS_ERR(cseg)) { 721 spin_unlock(&priv_rx->lock); 722 break; 723 } 724 list_del_init(&priv_rx->list); 725 spin_unlock(&priv_rx->lock); 726 db_cseg = cseg; 727 } 728 if (db_cseg) 729 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg); 730 spin_unlock(&c->async_icosq_lock); 731 732 priv_rx->rq_stats->tls_resync_res_ok += j; 733 734 if (!list_empty(&local_list)) { 735 /* This happens only if ICOSQ is full. 736 * There is no need to mark busy or explicitly ask for a NAPI cycle, 737 * it will be triggered by the outstanding ICOSQ completions. 738 */ 739 spin_lock(&ktls_resync->lock); 740 list_splice(&local_list, &ktls_resync->list); 741 set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 742 spin_unlock(&ktls_resync->lock); 743 priv_rx->rq_stats->tls_resync_res_retry++; 744 } 745 746 return i == budget; 747 } 748