1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 // Copyright (c) 2019 Mellanox Technologies. 3 4 #include <net/inet6_hashtables.h> 5 #include "en_accel/en_accel.h" 6 #include "en_accel/ktls.h" 7 #include "en_accel/ktls_txrx.h" 8 #include "en_accel/ktls_utils.h" 9 #include "en_accel/fs_tcp.h" 10 11 struct accel_rule { 12 struct work_struct work; 13 struct mlx5e_priv *priv; 14 struct mlx5_flow_handle *rule; 15 }; 16 17 #define PROGRESS_PARAMS_WRITE_UNIT 64 18 #define PROGRESS_PARAMS_PADDED_SIZE \ 19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ 20 PROGRESS_PARAMS_WRITE_UNIT)) 21 22 struct mlx5e_ktls_rx_resync_buf { 23 union { 24 struct mlx5_wqe_tls_progress_params_seg progress; 25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; 26 } ____cacheline_aligned_in_smp; 27 dma_addr_t dma_addr; 28 struct mlx5e_ktls_offload_context_rx *priv_rx; 29 }; 30 31 enum { 32 MLX5E_PRIV_RX_FLAG_DELETING, 33 MLX5E_NUM_PRIV_RX_FLAGS, 34 }; 35 36 struct mlx5e_ktls_rx_resync_ctx { 37 struct tls_offload_resync_async core; 38 struct work_struct work; 39 struct mlx5e_priv *priv; 40 refcount_t refcnt; 41 __be64 sw_rcd_sn_be; 42 u32 seq; 43 }; 44 45 struct mlx5e_ktls_offload_context_rx { 46 struct tls12_crypto_info_aes_gcm_128 crypto_info; 47 struct accel_rule rule; 48 struct sock *sk; 49 struct mlx5e_rq_stats *rq_stats; 50 struct mlx5e_tls_sw_stats *sw_stats; 51 struct completion add_ctx; 52 struct mlx5e_tir tir; 53 u32 key_id; 54 u32 rxq; 55 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); 56 57 /* resync */ 58 spinlock_t lock; /* protects resync fields */ 59 struct mlx5e_ktls_rx_resync_ctx resync; 60 struct list_head list; 61 }; 62 63 static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) 64 { 65 if (!refcount_dec_and_test(&priv_rx->resync.refcnt)) 66 return false; 67 68 kfree(priv_rx); 69 return true; 70 } 71 72 static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) 73 { 74 refcount_inc(&priv_rx->resync.refcnt); 75 } 76 77 struct mlx5e_ktls_resync_resp { 78 /* protects list changes */ 79 spinlock_t lock; 80 struct list_head list; 81 }; 82 83 void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) 84 { 85 kvfree(resp_list); 86 } 87 88 struct mlx5e_ktls_resync_resp * 89 mlx5e_ktls_rx_resync_create_resp_list(void) 90 { 91 struct mlx5e_ktls_resync_resp *resp_list; 92 93 resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL); 94 if (!resp_list) 95 return ERR_PTR(-ENOMEM); 96 97 INIT_LIST_HEAD(&resp_list->list); 98 spin_lock_init(&resp_list->lock); 99 100 return resp_list; 101 } 102 103 static void accel_rule_handle_work(struct work_struct *work) 104 { 105 struct mlx5e_ktls_offload_context_rx *priv_rx; 106 struct accel_rule *accel_rule; 107 struct mlx5_flow_handle *rule; 108 109 accel_rule = container_of(work, struct accel_rule, work); 110 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); 111 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 112 goto out; 113 114 rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, 115 mlx5e_tir_get_tirn(&priv_rx->tir), 116 MLX5_FS_DEFAULT_FLOW_TAG); 117 if (!IS_ERR_OR_NULL(rule)) 118 accel_rule->rule = rule; 119 out: 120 complete(&priv_rx->add_ctx); 121 } 122 123 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) 124 { 125 INIT_WORK(&rule->work, accel_rule_handle_work); 126 rule->priv = priv; 127 } 128 129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, 130 struct mlx5e_icosq_wqe_info *wi) 131 { 132 sq->db.wqe_info[pi] = *wi; 133 } 134 135 static struct mlx5_wqe_ctrl_seg * 136 post_static_params(struct mlx5e_icosq *sq, 137 struct mlx5e_ktls_offload_context_rx *priv_rx) 138 { 139 struct mlx5e_set_tls_static_params_wqe *wqe; 140 struct mlx5e_icosq_wqe_info wi; 141 u16 pi, num_wqebbs; 142 143 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; 144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 145 return ERR_PTR(-ENOSPC); 146 147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, 150 mlx5e_tir_get_tirn(&priv_rx->tir), 151 priv_rx->key_id, priv_rx->resync.seq, false, 152 TLS_OFFLOAD_CTX_DIR_RX); 153 wi = (struct mlx5e_icosq_wqe_info) { 154 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, 155 .num_wqebbs = num_wqebbs, 156 .tls_set_params.priv_rx = priv_rx, 157 }; 158 icosq_fill_wi(sq, pi, &wi); 159 sq->pc += num_wqebbs; 160 161 return &wqe->ctrl; 162 } 163 164 static struct mlx5_wqe_ctrl_seg * 165 post_progress_params(struct mlx5e_icosq *sq, 166 struct mlx5e_ktls_offload_context_rx *priv_rx, 167 u32 next_record_tcp_sn) 168 { 169 struct mlx5e_set_tls_progress_params_wqe *wqe; 170 struct mlx5e_icosq_wqe_info wi; 171 u16 pi, num_wqebbs; 172 173 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; 174 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 175 return ERR_PTR(-ENOSPC); 176 177 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 178 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); 179 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, 180 mlx5e_tir_get_tirn(&priv_rx->tir), 181 false, next_record_tcp_sn, 182 TLS_OFFLOAD_CTX_DIR_RX); 183 wi = (struct mlx5e_icosq_wqe_info) { 184 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, 185 .num_wqebbs = num_wqebbs, 186 .tls_set_params.priv_rx = priv_rx, 187 }; 188 189 icosq_fill_wi(sq, pi, &wi); 190 sq->pc += num_wqebbs; 191 192 return &wqe->ctrl; 193 } 194 195 static int post_rx_param_wqes(struct mlx5e_channel *c, 196 struct mlx5e_ktls_offload_context_rx *priv_rx, 197 u32 next_record_tcp_sn) 198 { 199 struct mlx5_wqe_ctrl_seg *cseg; 200 struct mlx5e_icosq *sq; 201 int err; 202 203 err = 0; 204 sq = &c->async_icosq; 205 spin_lock_bh(&c->async_icosq_lock); 206 207 cseg = post_static_params(sq, priv_rx); 208 if (IS_ERR(cseg)) 209 goto err_out; 210 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); 211 if (IS_ERR(cseg)) 212 goto err_out; 213 214 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 215 unlock: 216 spin_unlock_bh(&c->async_icosq_lock); 217 218 return err; 219 220 err_out: 221 priv_rx->rq_stats->tls_resync_req_skip++; 222 err = PTR_ERR(cseg); 223 complete(&priv_rx->add_ctx); 224 goto unlock; 225 } 226 227 static void 228 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, 229 struct mlx5e_ktls_offload_context_rx *priv_rx) 230 { 231 struct mlx5e_ktls_offload_context_rx **ctx = 232 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 233 234 BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); 235 236 *ctx = priv_rx; 237 } 238 239 static struct mlx5e_ktls_offload_context_rx * 240 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) 241 { 242 struct mlx5e_ktls_offload_context_rx **ctx = 243 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 244 245 return *ctx; 246 } 247 248 /* Re-sync */ 249 /* Runs in work context */ 250 static int 251 resync_post_get_progress_params(struct mlx5e_icosq *sq, 252 struct mlx5e_ktls_offload_context_rx *priv_rx) 253 { 254 struct mlx5e_get_tls_progress_params_wqe *wqe; 255 struct mlx5e_ktls_rx_resync_buf *buf; 256 struct mlx5e_icosq_wqe_info wi; 257 struct mlx5_wqe_ctrl_seg *cseg; 258 struct mlx5_seg_get_psv *psv; 259 struct device *pdev; 260 int err; 261 u16 pi; 262 263 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 264 if (unlikely(!buf)) { 265 err = -ENOMEM; 266 goto err_out; 267 } 268 269 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev); 270 buf->dma_addr = dma_map_single(pdev, &buf->progress, 271 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 272 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { 273 err = -ENOMEM; 274 goto err_free; 275 } 276 277 buf->priv_rx = priv_rx; 278 279 spin_lock_bh(&sq->channel->async_icosq_lock); 280 281 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { 282 spin_unlock_bh(&sq->channel->async_icosq_lock); 283 err = -ENOSPC; 284 goto err_dma_unmap; 285 } 286 287 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); 288 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); 289 290 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) 291 292 cseg = &wqe->ctrl; 293 cseg->opmod_idx_opcode = 294 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | 295 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); 296 cseg->qpn_ds = 297 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); 298 299 psv = &wqe->psv; 300 psv->num_psv = 1 << 4; 301 psv->l_key = sq->channel->mkey_be; 302 psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); 303 psv->va = cpu_to_be64(buf->dma_addr); 304 305 wi = (struct mlx5e_icosq_wqe_info) { 306 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, 307 .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, 308 .tls_get_params.buf = buf, 309 }; 310 icosq_fill_wi(sq, pi, &wi); 311 sq->pc++; 312 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 313 spin_unlock_bh(&sq->channel->async_icosq_lock); 314 315 return 0; 316 317 err_dma_unmap: 318 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 319 err_free: 320 kfree(buf); 321 err_out: 322 priv_rx->rq_stats->tls_resync_req_skip++; 323 return err; 324 } 325 326 /* Function is called with elevated refcount. 327 * It decreases it only if no WQE is posted. 328 */ 329 static void resync_handle_work(struct work_struct *work) 330 { 331 struct mlx5e_ktls_offload_context_rx *priv_rx; 332 struct mlx5e_ktls_rx_resync_ctx *resync; 333 struct mlx5e_channel *c; 334 struct mlx5e_icosq *sq; 335 336 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); 337 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); 338 339 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 340 mlx5e_ktls_priv_rx_put(priv_rx); 341 return; 342 } 343 344 c = resync->priv->channels.c[priv_rx->rxq]; 345 sq = &c->async_icosq; 346 347 if (resync_post_get_progress_params(sq, priv_rx)) 348 mlx5e_ktls_priv_rx_put(priv_rx); 349 } 350 351 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, 352 struct mlx5e_priv *priv) 353 { 354 INIT_WORK(&resync->work, resync_handle_work); 355 resync->priv = priv; 356 refcount_set(&resync->refcnt, 1); 357 } 358 359 /* Function can be called with the refcount being either elevated or not. 360 * It does not affect the refcount. 361 */ 362 static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, 363 struct mlx5e_channel *c) 364 { 365 struct tls12_crypto_info_aes_gcm_128 *info = &priv_rx->crypto_info; 366 struct mlx5e_ktls_resync_resp *ktls_resync; 367 struct mlx5e_icosq *sq; 368 bool trigger_poll; 369 370 sq = &c->async_icosq; 371 ktls_resync = sq->ktls_resync; 372 trigger_poll = false; 373 374 spin_lock_bh(&ktls_resync->lock); 375 spin_lock_bh(&priv_rx->lock); 376 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, sizeof(info->rec_seq)); 377 if (list_empty(&priv_rx->list)) { 378 list_add_tail(&priv_rx->list, &ktls_resync->list); 379 trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 380 } 381 spin_unlock_bh(&priv_rx->lock); 382 spin_unlock_bh(&ktls_resync->lock); 383 384 if (!trigger_poll) 385 return; 386 387 if (!napi_if_scheduled_mark_missed(&c->napi)) { 388 spin_lock_bh(&c->async_icosq_lock); 389 mlx5e_trigger_irq(sq); 390 spin_unlock_bh(&c->async_icosq_lock); 391 } 392 } 393 394 /* Function can be called with the refcount being either elevated or not. 395 * It decreases the refcount and may free the kTLS priv context. 396 * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was 397 * already in flight. 398 */ 399 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, 400 struct mlx5e_icosq *sq) 401 { 402 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 403 struct mlx5e_ktls_offload_context_rx *priv_rx; 404 struct mlx5e_ktls_rx_resync_ctx *resync; 405 u8 tracker_state, auth_state, *ctx; 406 struct device *dev; 407 u32 hw_seq; 408 409 priv_rx = buf->priv_rx; 410 resync = &priv_rx->resync; 411 dev = mlx5_core_dma_dev(resync->priv->mdev); 412 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 413 goto out; 414 415 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, 416 DMA_FROM_DEVICE); 417 418 ctx = buf->progress.ctx; 419 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); 420 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); 421 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 422 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 423 priv_rx->rq_stats->tls_resync_req_skip++; 424 goto out; 425 } 426 427 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 428 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 429 priv_rx->rq_stats->tls_resync_req_end++; 430 out: 431 mlx5e_ktls_priv_rx_put(priv_rx); 432 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 433 kfree(buf); 434 } 435 436 /* Runs in NAPI. 437 * Function elevates the refcount, unless no work is queued. 438 */ 439 static bool resync_queue_get_psv(struct sock *sk) 440 { 441 struct mlx5e_ktls_offload_context_rx *priv_rx; 442 struct mlx5e_ktls_rx_resync_ctx *resync; 443 444 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 445 if (unlikely(!priv_rx)) 446 return false; 447 448 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 449 return false; 450 451 resync = &priv_rx->resync; 452 mlx5e_ktls_priv_rx_get(priv_rx); 453 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) 454 mlx5e_ktls_priv_rx_put(priv_rx); 455 456 return true; 457 } 458 459 /* Runs in NAPI */ 460 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 461 { 462 struct ethhdr *eth = (struct ethhdr *)(skb->data); 463 struct net_device *netdev = rq->netdev; 464 struct sock *sk = NULL; 465 unsigned int datalen; 466 struct iphdr *iph; 467 struct tcphdr *th; 468 __be32 seq; 469 int depth = 0; 470 471 __vlan_get_protocol(skb, eth->h_proto, &depth); 472 iph = (struct iphdr *)(skb->data + depth); 473 474 if (iph->version == 4) { 475 depth += sizeof(struct iphdr); 476 th = (void *)iph + sizeof(struct iphdr); 477 478 sk = inet_lookup_established(dev_net(netdev), &tcp_hashinfo, 479 iph->saddr, th->source, iph->daddr, 480 th->dest, netdev->ifindex); 481 #if IS_ENABLED(CONFIG_IPV6) 482 } else { 483 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; 484 485 depth += sizeof(struct ipv6hdr); 486 th = (void *)ipv6h + sizeof(struct ipv6hdr); 487 488 sk = __inet6_lookup_established(dev_net(netdev), &tcp_hashinfo, 489 &ipv6h->saddr, th->source, 490 &ipv6h->daddr, ntohs(th->dest), 491 netdev->ifindex, 0); 492 #endif 493 } 494 495 depth += sizeof(struct tcphdr); 496 497 if (unlikely(!sk)) 498 return; 499 500 if (unlikely(sk->sk_state == TCP_TIME_WAIT)) 501 goto unref; 502 503 if (unlikely(!resync_queue_get_psv(sk))) 504 goto unref; 505 506 seq = th->seq; 507 datalen = skb->len - depth; 508 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 509 rq->stats->tls_resync_req_start++; 510 511 unref: 512 sock_gen_put(sk); 513 } 514 515 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, 516 u32 seq, u8 *rcd_sn) 517 { 518 struct mlx5e_ktls_offload_context_rx *priv_rx; 519 struct mlx5e_ktls_rx_resync_ctx *resync; 520 struct mlx5e_priv *priv; 521 struct mlx5e_channel *c; 522 523 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 524 if (unlikely(!priv_rx)) 525 return; 526 527 resync = &priv_rx->resync; 528 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; 529 resync->seq = seq; 530 531 priv = netdev_priv(netdev); 532 c = priv->channels.c[priv_rx->rxq]; 533 534 resync_handle_seq_match(priv_rx, c); 535 } 536 537 /* End of resync section */ 538 539 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 540 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 541 { 542 struct mlx5e_rq_stats *stats = rq->stats; 543 544 switch (get_cqe_tls_offload(cqe)) { 545 case CQE_TLS_OFFLOAD_DECRYPTED: 546 skb->decrypted = 1; 547 stats->tls_decrypted_packets++; 548 stats->tls_decrypted_bytes += *cqe_bcnt; 549 break; 550 case CQE_TLS_OFFLOAD_RESYNC: 551 stats->tls_resync_req_pkt++; 552 resync_update_sn(rq, skb); 553 break; 554 default: /* CQE_TLS_OFFLOAD_ERROR: */ 555 stats->tls_err++; 556 break; 557 } 558 } 559 560 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) 561 { 562 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; 563 struct accel_rule *rule = &priv_rx->rule; 564 565 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 566 complete(&priv_rx->add_ctx); 567 return; 568 } 569 queue_work(rule->priv->tls->rx_wq, &rule->work); 570 } 571 572 static int mlx5e_ktls_sk_get_rxq(struct sock *sk) 573 { 574 int rxq = sk_rx_queue_get(sk); 575 576 if (unlikely(rxq == -1)) 577 rxq = 0; 578 579 return rxq; 580 } 581 582 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, 583 struct tls_crypto_info *crypto_info, 584 u32 start_offload_tcp_sn) 585 { 586 struct mlx5e_ktls_offload_context_rx *priv_rx; 587 struct mlx5e_ktls_rx_resync_ctx *resync; 588 struct tls_context *tls_ctx; 589 struct mlx5_core_dev *mdev; 590 struct mlx5e_priv *priv; 591 int rxq, err; 592 593 tls_ctx = tls_get_ctx(sk); 594 priv = netdev_priv(netdev); 595 mdev = priv->mdev; 596 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); 597 if (unlikely(!priv_rx)) 598 return -ENOMEM; 599 600 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id); 601 if (err) 602 goto err_create_key; 603 604 INIT_LIST_HEAD(&priv_rx->list); 605 spin_lock_init(&priv_rx->lock); 606 priv_rx->crypto_info = 607 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 608 609 rxq = mlx5e_ktls_sk_get_rxq(sk); 610 priv_rx->rxq = rxq; 611 priv_rx->sk = sk; 612 613 priv_rx->rq_stats = &priv->channel_stats[rxq]->rq; 614 priv_rx->sw_stats = &priv->tls->sw_stats; 615 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); 616 617 err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); 618 if (err) 619 goto err_create_tir; 620 621 init_completion(&priv_rx->add_ctx); 622 623 accel_rule_init(&priv_rx->rule, priv); 624 resync = &priv_rx->resync; 625 resync_init(resync, priv); 626 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; 627 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); 628 629 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); 630 if (err) 631 goto err_post_wqes; 632 633 atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx); 634 635 return 0; 636 637 err_post_wqes: 638 mlx5e_tir_destroy(&priv_rx->tir); 639 err_create_tir: 640 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 641 err_create_key: 642 kfree(priv_rx); 643 return err; 644 } 645 646 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) 647 { 648 struct mlx5e_ktls_offload_context_rx *priv_rx; 649 struct mlx5e_ktls_rx_resync_ctx *resync; 650 struct mlx5_core_dev *mdev; 651 struct mlx5e_priv *priv; 652 653 priv = netdev_priv(netdev); 654 mdev = priv->mdev; 655 656 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); 657 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); 658 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); 659 synchronize_net(); /* Sync with NAPI */ 660 if (!cancel_work_sync(&priv_rx->rule.work)) 661 /* completion is needed, as the priv_rx in the add flow 662 * is maintained on the wqe info (wi), not on the socket. 663 */ 664 wait_for_completion(&priv_rx->add_ctx); 665 resync = &priv_rx->resync; 666 if (cancel_work_sync(&resync->work)) 667 mlx5e_ktls_priv_rx_put(priv_rx); 668 669 atomic64_inc(&priv_rx->sw_stats->rx_tls_del); 670 if (priv_rx->rule.rule) 671 mlx5e_accel_fs_del_sk(priv_rx->rule.rule); 672 673 mlx5e_tir_destroy(&priv_rx->tir); 674 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 675 /* priv_rx should normally be freed here, but if there is an outstanding 676 * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is 677 * processed. 678 */ 679 mlx5e_ktls_priv_rx_put(priv_rx); 680 } 681 682 bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) 683 { 684 struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; 685 struct mlx5e_ktls_resync_resp *ktls_resync; 686 struct mlx5_wqe_ctrl_seg *db_cseg; 687 struct mlx5e_icosq *sq; 688 LIST_HEAD(local_list); 689 int i, j; 690 691 sq = &c->async_icosq; 692 693 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 694 return false; 695 696 ktls_resync = sq->ktls_resync; 697 db_cseg = NULL; 698 i = 0; 699 700 spin_lock(&ktls_resync->lock); 701 list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { 702 list_move(&priv_rx->list, &local_list); 703 if (++i == budget) 704 break; 705 } 706 if (list_empty(&ktls_resync->list)) 707 clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 708 spin_unlock(&ktls_resync->lock); 709 710 spin_lock(&c->async_icosq_lock); 711 for (j = 0; j < i; j++) { 712 struct mlx5_wqe_ctrl_seg *cseg; 713 714 priv_rx = list_first_entry(&local_list, 715 struct mlx5e_ktls_offload_context_rx, 716 list); 717 spin_lock(&priv_rx->lock); 718 cseg = post_static_params(sq, priv_rx); 719 if (IS_ERR(cseg)) { 720 spin_unlock(&priv_rx->lock); 721 break; 722 } 723 list_del_init(&priv_rx->list); 724 spin_unlock(&priv_rx->lock); 725 db_cseg = cseg; 726 } 727 if (db_cseg) 728 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg); 729 spin_unlock(&c->async_icosq_lock); 730 731 priv_rx->rq_stats->tls_resync_res_ok += j; 732 733 if (!list_empty(&local_list)) { 734 /* This happens only if ICOSQ is full. 735 * There is no need to mark busy or explicitly ask for a NAPI cycle, 736 * it will be triggered by the outstanding ICOSQ completions. 737 */ 738 spin_lock(&ktls_resync->lock); 739 list_splice(&local_list, &ktls_resync->list); 740 set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 741 spin_unlock(&ktls_resync->lock); 742 priv_rx->rq_stats->tls_resync_res_retry++; 743 } 744 745 return i == budget; 746 } 747