1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 // Copyright (c) 2019 Mellanox Technologies. 3 4 #include <net/inet6_hashtables.h> 5 #include "en_accel/en_accel.h" 6 #include "en_accel/ktls.h" 7 #include "en_accel/ktls_txrx.h" 8 #include "en_accel/ktls_utils.h" 9 #include "en_accel/fs_tcp.h" 10 11 struct accel_rule { 12 struct work_struct work; 13 struct mlx5e_priv *priv; 14 struct mlx5_flow_handle *rule; 15 }; 16 17 #define PROGRESS_PARAMS_WRITE_UNIT 64 18 #define PROGRESS_PARAMS_PADDED_SIZE \ 19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ 20 PROGRESS_PARAMS_WRITE_UNIT)) 21 22 struct mlx5e_ktls_rx_resync_buf { 23 union { 24 struct mlx5_wqe_tls_progress_params_seg progress; 25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; 26 } ____cacheline_aligned_in_smp; 27 dma_addr_t dma_addr; 28 struct mlx5e_ktls_offload_context_rx *priv_rx; 29 }; 30 31 enum { 32 MLX5E_PRIV_RX_FLAG_DELETING, 33 MLX5E_NUM_PRIV_RX_FLAGS, 34 }; 35 36 struct mlx5e_ktls_rx_resync_ctx { 37 struct tls_offload_resync_async core; 38 struct work_struct work; 39 struct mlx5e_priv *priv; 40 refcount_t refcnt; 41 __be64 sw_rcd_sn_be; 42 u32 seq; 43 }; 44 45 struct mlx5e_ktls_offload_context_rx { 46 union mlx5e_crypto_info crypto_info; 47 struct accel_rule rule; 48 struct sock *sk; 49 struct mlx5e_rq_stats *rq_stats; 50 struct mlx5e_tls_sw_stats *sw_stats; 51 struct completion add_ctx; 52 struct mlx5e_tir tir; 53 u32 key_id; 54 u32 rxq; 55 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); 56 57 /* resync */ 58 spinlock_t lock; /* protects resync fields */ 59 struct mlx5e_ktls_rx_resync_ctx resync; 60 struct list_head list; 61 }; 62 63 static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) 64 { 65 if (!refcount_dec_and_test(&priv_rx->resync.refcnt)) 66 return false; 67 68 kfree(priv_rx); 69 return true; 70 } 71 72 static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) 73 { 74 refcount_inc(&priv_rx->resync.refcnt); 75 } 76 77 struct mlx5e_ktls_resync_resp { 78 /* protects list changes */ 79 spinlock_t lock; 80 struct list_head list; 81 }; 82 83 void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) 84 { 85 kvfree(resp_list); 86 } 87 88 struct mlx5e_ktls_resync_resp * 89 mlx5e_ktls_rx_resync_create_resp_list(void) 90 { 91 struct mlx5e_ktls_resync_resp *resp_list; 92 93 resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL); 94 if (!resp_list) 95 return ERR_PTR(-ENOMEM); 96 97 INIT_LIST_HEAD(&resp_list->list); 98 spin_lock_init(&resp_list->lock); 99 100 return resp_list; 101 } 102 103 static void accel_rule_handle_work(struct work_struct *work) 104 { 105 struct mlx5e_ktls_offload_context_rx *priv_rx; 106 struct accel_rule *accel_rule; 107 struct mlx5_flow_handle *rule; 108 109 accel_rule = container_of(work, struct accel_rule, work); 110 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); 111 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 112 goto out; 113 114 rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk, 115 mlx5e_tir_get_tirn(&priv_rx->tir), 116 MLX5_FS_DEFAULT_FLOW_TAG); 117 if (!IS_ERR_OR_NULL(rule)) 118 accel_rule->rule = rule; 119 out: 120 complete(&priv_rx->add_ctx); 121 } 122 123 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) 124 { 125 INIT_WORK(&rule->work, accel_rule_handle_work); 126 rule->priv = priv; 127 } 128 129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, 130 struct mlx5e_icosq_wqe_info *wi) 131 { 132 sq->db.wqe_info[pi] = *wi; 133 } 134 135 static struct mlx5_wqe_ctrl_seg * 136 post_static_params(struct mlx5e_icosq *sq, 137 struct mlx5e_ktls_offload_context_rx *priv_rx) 138 { 139 struct mlx5e_set_tls_static_params_wqe *wqe; 140 struct mlx5e_icosq_wqe_info wi; 141 u16 pi, num_wqebbs; 142 143 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; 144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 145 return ERR_PTR(-ENOSPC); 146 147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, 150 mlx5e_tir_get_tirn(&priv_rx->tir), 151 priv_rx->key_id, priv_rx->resync.seq, false, 152 TLS_OFFLOAD_CTX_DIR_RX); 153 wi = (struct mlx5e_icosq_wqe_info) { 154 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, 155 .num_wqebbs = num_wqebbs, 156 .tls_set_params.priv_rx = priv_rx, 157 }; 158 icosq_fill_wi(sq, pi, &wi); 159 sq->pc += num_wqebbs; 160 161 return &wqe->ctrl; 162 } 163 164 static struct mlx5_wqe_ctrl_seg * 165 post_progress_params(struct mlx5e_icosq *sq, 166 struct mlx5e_ktls_offload_context_rx *priv_rx, 167 u32 next_record_tcp_sn) 168 { 169 struct mlx5e_set_tls_progress_params_wqe *wqe; 170 struct mlx5e_icosq_wqe_info wi; 171 u16 pi, num_wqebbs; 172 173 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; 174 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 175 return ERR_PTR(-ENOSPC); 176 177 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 178 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); 179 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, 180 mlx5e_tir_get_tirn(&priv_rx->tir), 181 false, next_record_tcp_sn, 182 TLS_OFFLOAD_CTX_DIR_RX); 183 wi = (struct mlx5e_icosq_wqe_info) { 184 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, 185 .num_wqebbs = num_wqebbs, 186 .tls_set_params.priv_rx = priv_rx, 187 }; 188 189 icosq_fill_wi(sq, pi, &wi); 190 sq->pc += num_wqebbs; 191 192 return &wqe->ctrl; 193 } 194 195 static int post_rx_param_wqes(struct mlx5e_channel *c, 196 struct mlx5e_ktls_offload_context_rx *priv_rx, 197 u32 next_record_tcp_sn) 198 { 199 struct mlx5_wqe_ctrl_seg *cseg; 200 struct mlx5e_icosq *sq; 201 int err; 202 203 err = 0; 204 sq = &c->async_icosq; 205 spin_lock_bh(&c->async_icosq_lock); 206 207 cseg = post_static_params(sq, priv_rx); 208 if (IS_ERR(cseg)) 209 goto err_out; 210 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); 211 if (IS_ERR(cseg)) 212 goto err_out; 213 214 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 215 unlock: 216 spin_unlock_bh(&c->async_icosq_lock); 217 218 return err; 219 220 err_out: 221 priv_rx->rq_stats->tls_resync_req_skip++; 222 err = PTR_ERR(cseg); 223 complete(&priv_rx->add_ctx); 224 goto unlock; 225 } 226 227 static void 228 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, 229 struct mlx5e_ktls_offload_context_rx *priv_rx) 230 { 231 struct mlx5e_ktls_offload_context_rx **ctx = 232 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 233 234 BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); 235 236 *ctx = priv_rx; 237 } 238 239 static struct mlx5e_ktls_offload_context_rx * 240 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) 241 { 242 struct mlx5e_ktls_offload_context_rx **ctx = 243 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 244 245 return *ctx; 246 } 247 248 /* Re-sync */ 249 /* Runs in work context */ 250 static int 251 resync_post_get_progress_params(struct mlx5e_icosq *sq, 252 struct mlx5e_ktls_offload_context_rx *priv_rx) 253 { 254 struct mlx5e_get_tls_progress_params_wqe *wqe; 255 struct mlx5e_ktls_rx_resync_buf *buf; 256 struct mlx5e_icosq_wqe_info wi; 257 struct mlx5_wqe_ctrl_seg *cseg; 258 struct mlx5_seg_get_psv *psv; 259 struct device *pdev; 260 int err; 261 u16 pi; 262 263 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 264 if (unlikely(!buf)) { 265 err = -ENOMEM; 266 goto err_out; 267 } 268 269 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev); 270 buf->dma_addr = dma_map_single(pdev, &buf->progress, 271 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 272 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { 273 err = -ENOMEM; 274 goto err_free; 275 } 276 277 buf->priv_rx = priv_rx; 278 279 spin_lock_bh(&sq->channel->async_icosq_lock); 280 281 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { 282 spin_unlock_bh(&sq->channel->async_icosq_lock); 283 err = -ENOSPC; 284 goto err_dma_unmap; 285 } 286 287 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); 288 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); 289 290 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) 291 292 cseg = &wqe->ctrl; 293 cseg->opmod_idx_opcode = 294 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | 295 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); 296 cseg->qpn_ds = 297 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); 298 299 psv = &wqe->psv; 300 psv->num_psv = 1 << 4; 301 psv->l_key = sq->channel->mkey_be; 302 psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); 303 psv->va = cpu_to_be64(buf->dma_addr); 304 305 wi = (struct mlx5e_icosq_wqe_info) { 306 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, 307 .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, 308 .tls_get_params.buf = buf, 309 }; 310 icosq_fill_wi(sq, pi, &wi); 311 sq->pc++; 312 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 313 spin_unlock_bh(&sq->channel->async_icosq_lock); 314 315 return 0; 316 317 err_dma_unmap: 318 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 319 err_free: 320 kfree(buf); 321 err_out: 322 priv_rx->rq_stats->tls_resync_req_skip++; 323 return err; 324 } 325 326 /* Function is called with elevated refcount. 327 * It decreases it only if no WQE is posted. 328 */ 329 static void resync_handle_work(struct work_struct *work) 330 { 331 struct mlx5e_ktls_offload_context_rx *priv_rx; 332 struct mlx5e_ktls_rx_resync_ctx *resync; 333 struct mlx5e_channel *c; 334 struct mlx5e_icosq *sq; 335 336 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); 337 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); 338 339 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 340 mlx5e_ktls_priv_rx_put(priv_rx); 341 return; 342 } 343 344 c = resync->priv->channels.c[priv_rx->rxq]; 345 sq = &c->async_icosq; 346 347 if (resync_post_get_progress_params(sq, priv_rx)) 348 mlx5e_ktls_priv_rx_put(priv_rx); 349 } 350 351 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, 352 struct mlx5e_priv *priv) 353 { 354 INIT_WORK(&resync->work, resync_handle_work); 355 resync->priv = priv; 356 refcount_set(&resync->refcnt, 1); 357 } 358 359 /* Function can be called with the refcount being either elevated or not. 360 * It does not affect the refcount. 361 */ 362 static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, 363 struct mlx5e_channel *c) 364 { 365 struct mlx5e_ktls_resync_resp *ktls_resync; 366 struct mlx5e_icosq *sq; 367 bool trigger_poll; 368 369 sq = &c->async_icosq; 370 ktls_resync = sq->ktls_resync; 371 trigger_poll = false; 372 373 spin_lock_bh(&ktls_resync->lock); 374 spin_lock_bh(&priv_rx->lock); 375 switch (priv_rx->crypto_info.crypto_info.cipher_type) { 376 case TLS_CIPHER_AES_GCM_128: { 377 struct tls12_crypto_info_aes_gcm_128 *info = 378 &priv_rx->crypto_info.crypto_info_128; 379 380 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, 381 sizeof(info->rec_seq)); 382 break; 383 } 384 case TLS_CIPHER_AES_GCM_256: { 385 struct tls12_crypto_info_aes_gcm_256 *info = 386 &priv_rx->crypto_info.crypto_info_256; 387 388 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, 389 sizeof(info->rec_seq)); 390 break; 391 } 392 default: 393 WARN_ONCE(1, "Unsupported cipher type %u\n", 394 priv_rx->crypto_info.crypto_info.cipher_type); 395 spin_unlock_bh(&priv_rx->lock); 396 spin_unlock_bh(&ktls_resync->lock); 397 return; 398 } 399 400 if (list_empty(&priv_rx->list)) { 401 list_add_tail(&priv_rx->list, &ktls_resync->list); 402 trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 403 } 404 spin_unlock_bh(&priv_rx->lock); 405 spin_unlock_bh(&ktls_resync->lock); 406 407 if (!trigger_poll) 408 return; 409 410 if (!napi_if_scheduled_mark_missed(&c->napi)) { 411 spin_lock_bh(&c->async_icosq_lock); 412 mlx5e_trigger_irq(sq); 413 spin_unlock_bh(&c->async_icosq_lock); 414 } 415 } 416 417 /* Function can be called with the refcount being either elevated or not. 418 * It decreases the refcount and may free the kTLS priv context. 419 * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was 420 * already in flight. 421 */ 422 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, 423 struct mlx5e_icosq *sq) 424 { 425 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 426 struct mlx5e_ktls_offload_context_rx *priv_rx; 427 struct mlx5e_ktls_rx_resync_ctx *resync; 428 u8 tracker_state, auth_state, *ctx; 429 struct device *dev; 430 u32 hw_seq; 431 432 priv_rx = buf->priv_rx; 433 resync = &priv_rx->resync; 434 dev = mlx5_core_dma_dev(resync->priv->mdev); 435 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 436 goto out; 437 438 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, 439 DMA_FROM_DEVICE); 440 441 ctx = buf->progress.ctx; 442 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); 443 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); 444 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 445 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 446 priv_rx->rq_stats->tls_resync_req_skip++; 447 goto out; 448 } 449 450 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 451 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 452 priv_rx->rq_stats->tls_resync_req_end++; 453 out: 454 mlx5e_ktls_priv_rx_put(priv_rx); 455 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 456 kfree(buf); 457 } 458 459 /* Runs in NAPI. 460 * Function elevates the refcount, unless no work is queued. 461 */ 462 static bool resync_queue_get_psv(struct sock *sk) 463 { 464 struct mlx5e_ktls_offload_context_rx *priv_rx; 465 struct mlx5e_ktls_rx_resync_ctx *resync; 466 467 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 468 if (unlikely(!priv_rx)) 469 return false; 470 471 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 472 return false; 473 474 resync = &priv_rx->resync; 475 mlx5e_ktls_priv_rx_get(priv_rx); 476 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) 477 mlx5e_ktls_priv_rx_put(priv_rx); 478 479 return true; 480 } 481 482 /* Runs in NAPI */ 483 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 484 { 485 struct ethhdr *eth = (struct ethhdr *)(skb->data); 486 struct net_device *netdev = rq->netdev; 487 struct net *net = dev_net(netdev); 488 struct sock *sk = NULL; 489 unsigned int datalen; 490 struct iphdr *iph; 491 struct tcphdr *th; 492 __be32 seq; 493 int depth = 0; 494 495 __vlan_get_protocol(skb, eth->h_proto, &depth); 496 iph = (struct iphdr *)(skb->data + depth); 497 498 if (iph->version == 4) { 499 depth += sizeof(struct iphdr); 500 th = (void *)iph + sizeof(struct iphdr); 501 502 sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, 503 iph->saddr, th->source, iph->daddr, 504 th->dest, netdev->ifindex); 505 #if IS_ENABLED(CONFIG_IPV6) 506 } else { 507 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; 508 509 depth += sizeof(struct ipv6hdr); 510 th = (void *)ipv6h + sizeof(struct ipv6hdr); 511 512 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, 513 &ipv6h->saddr, th->source, 514 &ipv6h->daddr, ntohs(th->dest), 515 netdev->ifindex, 0); 516 #endif 517 } 518 519 depth += sizeof(struct tcphdr); 520 521 if (unlikely(!sk)) 522 return; 523 524 if (unlikely(sk->sk_state == TCP_TIME_WAIT)) 525 goto unref; 526 527 if (unlikely(!resync_queue_get_psv(sk))) 528 goto unref; 529 530 seq = th->seq; 531 datalen = skb->len - depth; 532 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 533 rq->stats->tls_resync_req_start++; 534 535 unref: 536 sock_gen_put(sk); 537 } 538 539 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, 540 u32 seq, u8 *rcd_sn) 541 { 542 struct mlx5e_ktls_offload_context_rx *priv_rx; 543 struct mlx5e_ktls_rx_resync_ctx *resync; 544 struct mlx5e_priv *priv; 545 struct mlx5e_channel *c; 546 547 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 548 if (unlikely(!priv_rx)) 549 return; 550 551 resync = &priv_rx->resync; 552 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; 553 resync->seq = seq; 554 555 priv = netdev_priv(netdev); 556 c = priv->channels.c[priv_rx->rxq]; 557 558 resync_handle_seq_match(priv_rx, c); 559 } 560 561 /* End of resync section */ 562 563 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 564 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 565 { 566 struct mlx5e_rq_stats *stats = rq->stats; 567 568 switch (get_cqe_tls_offload(cqe)) { 569 case CQE_TLS_OFFLOAD_DECRYPTED: 570 skb->decrypted = 1; 571 stats->tls_decrypted_packets++; 572 stats->tls_decrypted_bytes += *cqe_bcnt; 573 break; 574 case CQE_TLS_OFFLOAD_RESYNC: 575 stats->tls_resync_req_pkt++; 576 resync_update_sn(rq, skb); 577 break; 578 default: /* CQE_TLS_OFFLOAD_ERROR: */ 579 stats->tls_err++; 580 break; 581 } 582 } 583 584 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) 585 { 586 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; 587 struct accel_rule *rule = &priv_rx->rule; 588 589 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 590 complete(&priv_rx->add_ctx); 591 return; 592 } 593 queue_work(rule->priv->tls->rx_wq, &rule->work); 594 } 595 596 static int mlx5e_ktls_sk_get_rxq(struct sock *sk) 597 { 598 int rxq = sk_rx_queue_get(sk); 599 600 if (unlikely(rxq == -1)) 601 rxq = 0; 602 603 return rxq; 604 } 605 606 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, 607 struct tls_crypto_info *crypto_info, 608 u32 start_offload_tcp_sn) 609 { 610 struct mlx5e_ktls_offload_context_rx *priv_rx; 611 struct mlx5e_ktls_rx_resync_ctx *resync; 612 struct tls_context *tls_ctx; 613 struct mlx5_core_dev *mdev; 614 struct mlx5e_priv *priv; 615 int rxq, err; 616 617 tls_ctx = tls_get_ctx(sk); 618 priv = netdev_priv(netdev); 619 mdev = priv->mdev; 620 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); 621 if (unlikely(!priv_rx)) 622 return -ENOMEM; 623 624 err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id); 625 if (err) 626 goto err_create_key; 627 628 INIT_LIST_HEAD(&priv_rx->list); 629 spin_lock_init(&priv_rx->lock); 630 switch (crypto_info->cipher_type) { 631 case TLS_CIPHER_AES_GCM_128: 632 priv_rx->crypto_info.crypto_info_128 = 633 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 634 break; 635 case TLS_CIPHER_AES_GCM_256: 636 priv_rx->crypto_info.crypto_info_256 = 637 *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info; 638 break; 639 default: 640 WARN_ONCE(1, "Unsupported cipher type %u\n", 641 crypto_info->cipher_type); 642 return -EOPNOTSUPP; 643 } 644 645 rxq = mlx5e_ktls_sk_get_rxq(sk); 646 priv_rx->rxq = rxq; 647 priv_rx->sk = sk; 648 649 priv_rx->rq_stats = &priv->channel_stats[rxq]->rq; 650 priv_rx->sw_stats = &priv->tls->sw_stats; 651 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); 652 653 err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); 654 if (err) 655 goto err_create_tir; 656 657 init_completion(&priv_rx->add_ctx); 658 659 accel_rule_init(&priv_rx->rule, priv); 660 resync = &priv_rx->resync; 661 resync_init(resync, priv); 662 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; 663 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); 664 665 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); 666 if (err) 667 goto err_post_wqes; 668 669 atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx); 670 671 return 0; 672 673 err_post_wqes: 674 mlx5e_tir_destroy(&priv_rx->tir); 675 err_create_tir: 676 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 677 err_create_key: 678 kfree(priv_rx); 679 return err; 680 } 681 682 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) 683 { 684 struct mlx5e_ktls_offload_context_rx *priv_rx; 685 struct mlx5e_ktls_rx_resync_ctx *resync; 686 struct mlx5_core_dev *mdev; 687 struct mlx5e_priv *priv; 688 689 priv = netdev_priv(netdev); 690 mdev = priv->mdev; 691 692 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); 693 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); 694 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); 695 synchronize_net(); /* Sync with NAPI */ 696 if (!cancel_work_sync(&priv_rx->rule.work)) 697 /* completion is needed, as the priv_rx in the add flow 698 * is maintained on the wqe info (wi), not on the socket. 699 */ 700 wait_for_completion(&priv_rx->add_ctx); 701 resync = &priv_rx->resync; 702 if (cancel_work_sync(&resync->work)) 703 mlx5e_ktls_priv_rx_put(priv_rx); 704 705 atomic64_inc(&priv_rx->sw_stats->rx_tls_del); 706 if (priv_rx->rule.rule) 707 mlx5e_accel_fs_del_sk(priv_rx->rule.rule); 708 709 mlx5e_tir_destroy(&priv_rx->tir); 710 mlx5_ktls_destroy_key(mdev, priv_rx->key_id); 711 /* priv_rx should normally be freed here, but if there is an outstanding 712 * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is 713 * processed. 714 */ 715 mlx5e_ktls_priv_rx_put(priv_rx); 716 } 717 718 bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) 719 { 720 struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; 721 struct mlx5e_ktls_resync_resp *ktls_resync; 722 struct mlx5_wqe_ctrl_seg *db_cseg; 723 struct mlx5e_icosq *sq; 724 LIST_HEAD(local_list); 725 int i, j; 726 727 sq = &c->async_icosq; 728 729 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 730 return false; 731 732 ktls_resync = sq->ktls_resync; 733 db_cseg = NULL; 734 i = 0; 735 736 spin_lock(&ktls_resync->lock); 737 list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { 738 list_move(&priv_rx->list, &local_list); 739 if (++i == budget) 740 break; 741 } 742 if (list_empty(&ktls_resync->list)) 743 clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 744 spin_unlock(&ktls_resync->lock); 745 746 spin_lock(&c->async_icosq_lock); 747 for (j = 0; j < i; j++) { 748 struct mlx5_wqe_ctrl_seg *cseg; 749 750 priv_rx = list_first_entry(&local_list, 751 struct mlx5e_ktls_offload_context_rx, 752 list); 753 spin_lock(&priv_rx->lock); 754 cseg = post_static_params(sq, priv_rx); 755 if (IS_ERR(cseg)) { 756 spin_unlock(&priv_rx->lock); 757 break; 758 } 759 list_del_init(&priv_rx->list); 760 spin_unlock(&priv_rx->lock); 761 db_cseg = cseg; 762 } 763 if (db_cseg) 764 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg); 765 spin_unlock(&c->async_icosq_lock); 766 767 priv_rx->rq_stats->tls_resync_res_ok += j; 768 769 if (!list_empty(&local_list)) { 770 /* This happens only if ICOSQ is full. 771 * There is no need to mark busy or explicitly ask for a NAPI cycle, 772 * it will be triggered by the outstanding ICOSQ completions. 773 */ 774 spin_lock(&ktls_resync->lock); 775 list_splice(&local_list, &ktls_resync->list); 776 set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 777 spin_unlock(&ktls_resync->lock); 778 priv_rx->rq_stats->tls_resync_res_retry++; 779 } 780 781 return i == budget; 782 } 783