1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 // Copyright (c) 2019 Mellanox Technologies. 3 4 #include <net/inet6_hashtables.h> 5 #include "en_accel/en_accel.h" 6 #include "en_accel/ktls.h" 7 #include "en_accel/ktls_txrx.h" 8 #include "en_accel/ktls_utils.h" 9 #include "en_accel/fs_tcp.h" 10 11 struct accel_rule { 12 struct work_struct work; 13 struct mlx5e_priv *priv; 14 struct mlx5_flow_handle *rule; 15 }; 16 17 #define PROGRESS_PARAMS_WRITE_UNIT 64 18 #define PROGRESS_PARAMS_PADDED_SIZE \ 19 (ALIGN(sizeof(struct mlx5_wqe_tls_progress_params_seg), \ 20 PROGRESS_PARAMS_WRITE_UNIT)) 21 22 struct mlx5e_ktls_rx_resync_buf { 23 union { 24 struct mlx5_wqe_tls_progress_params_seg progress; 25 u8 pad[PROGRESS_PARAMS_PADDED_SIZE]; 26 } ____cacheline_aligned_in_smp; 27 dma_addr_t dma_addr; 28 struct mlx5e_ktls_offload_context_rx *priv_rx; 29 }; 30 31 enum { 32 MLX5E_PRIV_RX_FLAG_DELETING, 33 MLX5E_NUM_PRIV_RX_FLAGS, 34 }; 35 36 struct mlx5e_ktls_rx_resync_ctx { 37 struct tls_offload_resync_async core; 38 struct work_struct work; 39 struct mlx5e_priv *priv; 40 refcount_t refcnt; 41 __be64 sw_rcd_sn_be; 42 u32 seq; 43 }; 44 45 struct mlx5e_ktls_offload_context_rx { 46 union mlx5e_crypto_info crypto_info; 47 struct accel_rule rule; 48 struct sock *sk; 49 struct mlx5e_rq_stats *rq_stats; 50 struct mlx5e_tls_sw_stats *sw_stats; 51 struct completion add_ctx; 52 struct mlx5e_tir tir; 53 struct mlx5_crypto_dek *dek; 54 u32 rxq; 55 DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); 56 57 /* resync */ 58 spinlock_t lock; /* protects resync fields */ 59 struct mlx5e_ktls_rx_resync_ctx resync; 60 struct list_head list; 61 }; 62 63 static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx) 64 { 65 if (!refcount_dec_and_test(&priv_rx->resync.refcnt)) 66 return false; 67 68 kfree(priv_rx); 69 return true; 70 } 71 72 static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx) 73 { 74 refcount_inc(&priv_rx->resync.refcnt); 75 } 76 77 struct mlx5e_ktls_resync_resp { 78 /* protects list changes */ 79 spinlock_t lock; 80 struct list_head list; 81 }; 82 83 void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) 84 { 85 kvfree(resp_list); 86 } 87 88 struct mlx5e_ktls_resync_resp * 89 mlx5e_ktls_rx_resync_create_resp_list(void) 90 { 91 struct mlx5e_ktls_resync_resp *resp_list; 92 93 resp_list = kvzalloc(sizeof(*resp_list), GFP_KERNEL); 94 if (!resp_list) 95 return ERR_PTR(-ENOMEM); 96 97 INIT_LIST_HEAD(&resp_list->list); 98 spin_lock_init(&resp_list->lock); 99 100 return resp_list; 101 } 102 103 static void accel_rule_handle_work(struct work_struct *work) 104 { 105 struct mlx5e_ktls_offload_context_rx *priv_rx; 106 struct accel_rule *accel_rule; 107 struct mlx5_flow_handle *rule; 108 109 accel_rule = container_of(work, struct accel_rule, work); 110 priv_rx = container_of(accel_rule, struct mlx5e_ktls_offload_context_rx, rule); 111 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 112 goto out; 113 114 rule = mlx5e_accel_fs_add_sk(accel_rule->priv->fs, priv_rx->sk, 115 mlx5e_tir_get_tirn(&priv_rx->tir), 116 MLX5_FS_DEFAULT_FLOW_TAG); 117 if (!IS_ERR_OR_NULL(rule)) 118 accel_rule->rule = rule; 119 out: 120 complete(&priv_rx->add_ctx); 121 } 122 123 static void accel_rule_init(struct accel_rule *rule, struct mlx5e_priv *priv) 124 { 125 INIT_WORK(&rule->work, accel_rule_handle_work); 126 rule->priv = priv; 127 } 128 129 static void icosq_fill_wi(struct mlx5e_icosq *sq, u16 pi, 130 struct mlx5e_icosq_wqe_info *wi) 131 { 132 sq->db.wqe_info[pi] = *wi; 133 } 134 135 static struct mlx5_wqe_ctrl_seg * 136 post_static_params(struct mlx5e_icosq *sq, 137 struct mlx5e_ktls_offload_context_rx *priv_rx) 138 { 139 struct mlx5e_set_tls_static_params_wqe *wqe; 140 struct mlx5e_icosq_wqe_info wi; 141 u16 pi, num_wqebbs; 142 143 num_wqebbs = MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS; 144 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 145 return ERR_PTR(-ENOSPC); 146 147 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 148 wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); 149 mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, 150 mlx5e_tir_get_tirn(&priv_rx->tir), 151 mlx5_crypto_dek_get_id(priv_rx->dek), 152 priv_rx->resync.seq, false, 153 TLS_OFFLOAD_CTX_DIR_RX); 154 wi = (struct mlx5e_icosq_wqe_info) { 155 .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, 156 .num_wqebbs = num_wqebbs, 157 .tls_set_params.priv_rx = priv_rx, 158 }; 159 icosq_fill_wi(sq, pi, &wi); 160 sq->pc += num_wqebbs; 161 162 return &wqe->ctrl; 163 } 164 165 static struct mlx5_wqe_ctrl_seg * 166 post_progress_params(struct mlx5e_icosq *sq, 167 struct mlx5e_ktls_offload_context_rx *priv_rx, 168 u32 next_record_tcp_sn) 169 { 170 struct mlx5e_set_tls_progress_params_wqe *wqe; 171 struct mlx5e_icosq_wqe_info wi; 172 u16 pi, num_wqebbs; 173 174 num_wqebbs = MLX5E_TLS_SET_PROGRESS_PARAMS_WQEBBS; 175 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, num_wqebbs))) 176 return ERR_PTR(-ENOSPC); 177 178 pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); 179 wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); 180 mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, 181 mlx5e_tir_get_tirn(&priv_rx->tir), 182 false, next_record_tcp_sn, 183 TLS_OFFLOAD_CTX_DIR_RX); 184 wi = (struct mlx5e_icosq_wqe_info) { 185 .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, 186 .num_wqebbs = num_wqebbs, 187 .tls_set_params.priv_rx = priv_rx, 188 }; 189 190 icosq_fill_wi(sq, pi, &wi); 191 sq->pc += num_wqebbs; 192 193 return &wqe->ctrl; 194 } 195 196 static int post_rx_param_wqes(struct mlx5e_channel *c, 197 struct mlx5e_ktls_offload_context_rx *priv_rx, 198 u32 next_record_tcp_sn) 199 { 200 struct mlx5_wqe_ctrl_seg *cseg; 201 struct mlx5e_icosq *sq; 202 int err; 203 204 err = 0; 205 sq = &c->async_icosq; 206 spin_lock_bh(&c->async_icosq_lock); 207 208 cseg = post_static_params(sq, priv_rx); 209 if (IS_ERR(cseg)) 210 goto err_out; 211 cseg = post_progress_params(sq, priv_rx, next_record_tcp_sn); 212 if (IS_ERR(cseg)) 213 goto err_out; 214 215 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 216 unlock: 217 spin_unlock_bh(&c->async_icosq_lock); 218 219 return err; 220 221 err_out: 222 priv_rx->rq_stats->tls_resync_req_skip++; 223 err = PTR_ERR(cseg); 224 complete(&priv_rx->add_ctx); 225 goto unlock; 226 } 227 228 static void 229 mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, 230 struct mlx5e_ktls_offload_context_rx *priv_rx) 231 { 232 struct mlx5e_ktls_offload_context_rx **ctx = 233 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 234 235 BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); 236 237 *ctx = priv_rx; 238 } 239 240 static struct mlx5e_ktls_offload_context_rx * 241 mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) 242 { 243 struct mlx5e_ktls_offload_context_rx **ctx = 244 __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); 245 246 return *ctx; 247 } 248 249 /* Re-sync */ 250 /* Runs in work context */ 251 static int 252 resync_post_get_progress_params(struct mlx5e_icosq *sq, 253 struct mlx5e_ktls_offload_context_rx *priv_rx) 254 { 255 struct mlx5e_get_tls_progress_params_wqe *wqe; 256 struct mlx5e_ktls_rx_resync_buf *buf; 257 struct mlx5e_icosq_wqe_info wi; 258 struct mlx5_wqe_ctrl_seg *cseg; 259 struct mlx5_seg_get_psv *psv; 260 struct device *pdev; 261 int err; 262 u16 pi; 263 264 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 265 if (unlikely(!buf)) { 266 err = -ENOMEM; 267 goto err_out; 268 } 269 270 pdev = mlx5_core_dma_dev(sq->channel->priv->mdev); 271 buf->dma_addr = dma_map_single(pdev, &buf->progress, 272 PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 273 if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { 274 err = -ENOMEM; 275 goto err_free; 276 } 277 278 buf->priv_rx = priv_rx; 279 280 spin_lock_bh(&sq->channel->async_icosq_lock); 281 282 if (unlikely(!mlx5e_icosq_can_post_wqe(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS))) { 283 spin_unlock_bh(&sq->channel->async_icosq_lock); 284 err = -ENOSPC; 285 goto err_dma_unmap; 286 } 287 288 pi = mlx5e_icosq_get_next_pi(sq, MLX5E_KTLS_GET_PROGRESS_WQEBBS); 289 wqe = MLX5E_TLS_FETCH_GET_PROGRESS_PARAMS_WQE(sq, pi); 290 291 #define GET_PSV_DS_CNT (DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS)) 292 293 cseg = &wqe->ctrl; 294 cseg->opmod_idx_opcode = 295 cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_GET_PSV | 296 (MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS << 24)); 297 cseg->qpn_ds = 298 cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | GET_PSV_DS_CNT); 299 300 psv = &wqe->psv; 301 psv->num_psv = 1 << 4; 302 psv->l_key = sq->channel->mkey_be; 303 psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); 304 psv->va = cpu_to_be64(buf->dma_addr); 305 306 wi = (struct mlx5e_icosq_wqe_info) { 307 .wqe_type = MLX5E_ICOSQ_WQE_GET_PSV_TLS, 308 .num_wqebbs = MLX5E_KTLS_GET_PROGRESS_WQEBBS, 309 .tls_get_params.buf = buf, 310 }; 311 icosq_fill_wi(sq, pi, &wi); 312 sq->pc++; 313 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); 314 spin_unlock_bh(&sq->channel->async_icosq_lock); 315 316 return 0; 317 318 err_dma_unmap: 319 dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 320 err_free: 321 kfree(buf); 322 err_out: 323 priv_rx->rq_stats->tls_resync_req_skip++; 324 return err; 325 } 326 327 /* Function is called with elevated refcount. 328 * It decreases it only if no WQE is posted. 329 */ 330 static void resync_handle_work(struct work_struct *work) 331 { 332 struct mlx5e_ktls_offload_context_rx *priv_rx; 333 struct mlx5e_ktls_rx_resync_ctx *resync; 334 struct mlx5e_channel *c; 335 struct mlx5e_icosq *sq; 336 337 resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); 338 priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); 339 340 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 341 mlx5e_ktls_priv_rx_put(priv_rx); 342 return; 343 } 344 345 c = resync->priv->channels.c[priv_rx->rxq]; 346 sq = &c->async_icosq; 347 348 if (resync_post_get_progress_params(sq, priv_rx)) 349 mlx5e_ktls_priv_rx_put(priv_rx); 350 } 351 352 static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, 353 struct mlx5e_priv *priv) 354 { 355 INIT_WORK(&resync->work, resync_handle_work); 356 resync->priv = priv; 357 refcount_set(&resync->refcnt, 1); 358 } 359 360 /* Function can be called with the refcount being either elevated or not. 361 * It does not affect the refcount. 362 */ 363 static void resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx, 364 struct mlx5e_channel *c) 365 { 366 struct mlx5e_ktls_resync_resp *ktls_resync; 367 struct mlx5e_icosq *sq; 368 bool trigger_poll; 369 370 sq = &c->async_icosq; 371 ktls_resync = sq->ktls_resync; 372 trigger_poll = false; 373 374 spin_lock_bh(&ktls_resync->lock); 375 spin_lock_bh(&priv_rx->lock); 376 switch (priv_rx->crypto_info.crypto_info.cipher_type) { 377 case TLS_CIPHER_AES_GCM_128: { 378 struct tls12_crypto_info_aes_gcm_128 *info = 379 &priv_rx->crypto_info.crypto_info_128; 380 381 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, 382 sizeof(info->rec_seq)); 383 break; 384 } 385 case TLS_CIPHER_AES_GCM_256: { 386 struct tls12_crypto_info_aes_gcm_256 *info = 387 &priv_rx->crypto_info.crypto_info_256; 388 389 memcpy(info->rec_seq, &priv_rx->resync.sw_rcd_sn_be, 390 sizeof(info->rec_seq)); 391 break; 392 } 393 default: 394 WARN_ONCE(1, "Unsupported cipher type %u\n", 395 priv_rx->crypto_info.crypto_info.cipher_type); 396 spin_unlock_bh(&priv_rx->lock); 397 spin_unlock_bh(&ktls_resync->lock); 398 return; 399 } 400 401 if (list_empty(&priv_rx->list)) { 402 list_add_tail(&priv_rx->list, &ktls_resync->list); 403 trigger_poll = !test_and_set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 404 } 405 spin_unlock_bh(&priv_rx->lock); 406 spin_unlock_bh(&ktls_resync->lock); 407 408 if (!trigger_poll) 409 return; 410 411 if (!napi_if_scheduled_mark_missed(&c->napi)) { 412 spin_lock_bh(&c->async_icosq_lock); 413 mlx5e_trigger_irq(sq); 414 spin_unlock_bh(&c->async_icosq_lock); 415 } 416 } 417 418 /* Function can be called with the refcount being either elevated or not. 419 * It decreases the refcount and may free the kTLS priv context. 420 * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was 421 * already in flight. 422 */ 423 void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, 424 struct mlx5e_icosq *sq) 425 { 426 struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; 427 struct mlx5e_ktls_offload_context_rx *priv_rx; 428 struct mlx5e_ktls_rx_resync_ctx *resync; 429 u8 tracker_state, auth_state, *ctx; 430 struct device *dev; 431 u32 hw_seq; 432 433 priv_rx = buf->priv_rx; 434 resync = &priv_rx->resync; 435 dev = mlx5_core_dma_dev(resync->priv->mdev); 436 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 437 goto out; 438 439 dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, 440 DMA_FROM_DEVICE); 441 442 ctx = buf->progress.ctx; 443 tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); 444 auth_state = MLX5_GET(tls_progress_params, ctx, auth_state); 445 if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || 446 auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { 447 priv_rx->rq_stats->tls_resync_req_skip++; 448 goto out; 449 } 450 451 hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); 452 tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); 453 priv_rx->rq_stats->tls_resync_req_end++; 454 out: 455 mlx5e_ktls_priv_rx_put(priv_rx); 456 dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); 457 kfree(buf); 458 } 459 460 /* Runs in NAPI. 461 * Function elevates the refcount, unless no work is queued. 462 */ 463 static bool resync_queue_get_psv(struct sock *sk) 464 { 465 struct mlx5e_ktls_offload_context_rx *priv_rx; 466 struct mlx5e_ktls_rx_resync_ctx *resync; 467 468 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 469 if (unlikely(!priv_rx)) 470 return false; 471 472 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) 473 return false; 474 475 resync = &priv_rx->resync; 476 mlx5e_ktls_priv_rx_get(priv_rx); 477 if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) 478 mlx5e_ktls_priv_rx_put(priv_rx); 479 480 return true; 481 } 482 483 /* Runs in NAPI */ 484 static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) 485 { 486 struct ethhdr *eth = (struct ethhdr *)(skb->data); 487 struct net_device *netdev = rq->netdev; 488 struct net *net = dev_net(netdev); 489 struct sock *sk = NULL; 490 unsigned int datalen; 491 struct iphdr *iph; 492 struct tcphdr *th; 493 __be32 seq; 494 int depth = 0; 495 496 __vlan_get_protocol(skb, eth->h_proto, &depth); 497 iph = (struct iphdr *)(skb->data + depth); 498 499 if (iph->version == 4) { 500 depth += sizeof(struct iphdr); 501 th = (void *)iph + sizeof(struct iphdr); 502 503 sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, 504 iph->saddr, th->source, iph->daddr, 505 th->dest, netdev->ifindex); 506 #if IS_ENABLED(CONFIG_IPV6) 507 } else { 508 struct ipv6hdr *ipv6h = (struct ipv6hdr *)iph; 509 510 depth += sizeof(struct ipv6hdr); 511 th = (void *)ipv6h + sizeof(struct ipv6hdr); 512 513 sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, 514 &ipv6h->saddr, th->source, 515 &ipv6h->daddr, ntohs(th->dest), 516 netdev->ifindex, 0); 517 #endif 518 } 519 520 depth += sizeof(struct tcphdr); 521 522 if (unlikely(!sk)) 523 return; 524 525 if (unlikely(sk->sk_state == TCP_TIME_WAIT)) 526 goto unref; 527 528 if (unlikely(!resync_queue_get_psv(sk))) 529 goto unref; 530 531 seq = th->seq; 532 datalen = skb->len - depth; 533 tls_offload_rx_resync_async_request_start(sk, seq, datalen); 534 rq->stats->tls_resync_req_start++; 535 536 unref: 537 sock_gen_put(sk); 538 } 539 540 void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, 541 u32 seq, u8 *rcd_sn) 542 { 543 struct mlx5e_ktls_offload_context_rx *priv_rx; 544 struct mlx5e_ktls_rx_resync_ctx *resync; 545 struct mlx5e_priv *priv; 546 struct mlx5e_channel *c; 547 548 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_get_ctx(sk)); 549 if (unlikely(!priv_rx)) 550 return; 551 552 resync = &priv_rx->resync; 553 resync->sw_rcd_sn_be = *(__be64 *)rcd_sn; 554 resync->seq = seq; 555 556 priv = netdev_priv(netdev); 557 c = priv->channels.c[priv_rx->rxq]; 558 559 resync_handle_seq_match(priv_rx, c); 560 } 561 562 /* End of resync section */ 563 564 void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, 565 struct mlx5_cqe64 *cqe, u32 *cqe_bcnt) 566 { 567 struct mlx5e_rq_stats *stats = rq->stats; 568 569 switch (get_cqe_tls_offload(cqe)) { 570 case CQE_TLS_OFFLOAD_DECRYPTED: 571 skb->decrypted = 1; 572 stats->tls_decrypted_packets++; 573 stats->tls_decrypted_bytes += *cqe_bcnt; 574 break; 575 case CQE_TLS_OFFLOAD_RESYNC: 576 stats->tls_resync_req_pkt++; 577 resync_update_sn(rq, skb); 578 break; 579 default: /* CQE_TLS_OFFLOAD_ERROR: */ 580 stats->tls_err++; 581 break; 582 } 583 } 584 585 void mlx5e_ktls_handle_ctx_completion(struct mlx5e_icosq_wqe_info *wi) 586 { 587 struct mlx5e_ktls_offload_context_rx *priv_rx = wi->tls_set_params.priv_rx; 588 struct accel_rule *rule = &priv_rx->rule; 589 590 if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { 591 complete(&priv_rx->add_ctx); 592 return; 593 } 594 queue_work(rule->priv->tls->rx_wq, &rule->work); 595 } 596 597 static int mlx5e_ktls_sk_get_rxq(struct sock *sk) 598 { 599 int rxq = sk_rx_queue_get(sk); 600 601 if (unlikely(rxq == -1)) 602 rxq = 0; 603 604 return rxq; 605 } 606 607 int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, 608 struct tls_crypto_info *crypto_info, 609 u32 start_offload_tcp_sn) 610 { 611 struct mlx5e_ktls_offload_context_rx *priv_rx; 612 struct mlx5e_ktls_rx_resync_ctx *resync; 613 struct tls_context *tls_ctx; 614 struct mlx5_crypto_dek *dek; 615 struct mlx5e_priv *priv; 616 int rxq, err; 617 618 tls_ctx = tls_get_ctx(sk); 619 priv = netdev_priv(netdev); 620 priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL); 621 if (unlikely(!priv_rx)) 622 return -ENOMEM; 623 624 switch (crypto_info->cipher_type) { 625 case TLS_CIPHER_AES_GCM_128: 626 priv_rx->crypto_info.crypto_info_128 = 627 *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info; 628 break; 629 case TLS_CIPHER_AES_GCM_256: 630 priv_rx->crypto_info.crypto_info_256 = 631 *(struct tls12_crypto_info_aes_gcm_256 *)crypto_info; 632 break; 633 default: 634 WARN_ONCE(1, "Unsupported cipher type %u\n", 635 crypto_info->cipher_type); 636 err = -EOPNOTSUPP; 637 goto err_cipher_type; 638 } 639 640 dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); 641 if (IS_ERR(dek)) { 642 err = PTR_ERR(dek); 643 goto err_cipher_type; 644 } 645 priv_rx->dek = dek; 646 647 INIT_LIST_HEAD(&priv_rx->list); 648 spin_lock_init(&priv_rx->lock); 649 650 rxq = mlx5e_ktls_sk_get_rxq(sk); 651 priv_rx->rxq = rxq; 652 priv_rx->sk = sk; 653 654 priv_rx->rq_stats = &priv->channel_stats[rxq]->rq; 655 priv_rx->sw_stats = &priv->tls->sw_stats; 656 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); 657 658 err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir); 659 if (err) 660 goto err_create_tir; 661 662 init_completion(&priv_rx->add_ctx); 663 664 accel_rule_init(&priv_rx->rule, priv); 665 resync = &priv_rx->resync; 666 resync_init(resync, priv); 667 tls_offload_ctx_rx(tls_ctx)->resync_async = &resync->core; 668 tls_offload_rx_resync_set_type(sk, TLS_OFFLOAD_SYNC_TYPE_DRIVER_REQ_ASYNC); 669 670 err = post_rx_param_wqes(priv->channels.c[rxq], priv_rx, start_offload_tcp_sn); 671 if (err) 672 goto err_post_wqes; 673 674 atomic64_inc(&priv_rx->sw_stats->rx_tls_ctx); 675 676 return 0; 677 678 err_post_wqes: 679 mlx5e_tir_destroy(&priv_rx->tir); 680 err_create_tir: 681 mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek); 682 err_cipher_type: 683 kfree(priv_rx); 684 return err; 685 } 686 687 void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) 688 { 689 struct mlx5e_ktls_offload_context_rx *priv_rx; 690 struct mlx5e_ktls_rx_resync_ctx *resync; 691 struct mlx5e_priv *priv; 692 693 priv = netdev_priv(netdev); 694 695 priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); 696 set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); 697 mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); 698 synchronize_net(); /* Sync with NAPI */ 699 if (!cancel_work_sync(&priv_rx->rule.work)) 700 /* completion is needed, as the priv_rx in the add flow 701 * is maintained on the wqe info (wi), not on the socket. 702 */ 703 wait_for_completion(&priv_rx->add_ctx); 704 resync = &priv_rx->resync; 705 if (cancel_work_sync(&resync->work)) 706 mlx5e_ktls_priv_rx_put(priv_rx); 707 708 atomic64_inc(&priv_rx->sw_stats->rx_tls_del); 709 if (priv_rx->rule.rule) 710 mlx5e_accel_fs_del_sk(priv_rx->rule.rule); 711 712 mlx5e_tir_destroy(&priv_rx->tir); 713 mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek); 714 /* priv_rx should normally be freed here, but if there is an outstanding 715 * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is 716 * processed. 717 */ 718 mlx5e_ktls_priv_rx_put(priv_rx); 719 } 720 721 bool mlx5e_ktls_rx_handle_resync_list(struct mlx5e_channel *c, int budget) 722 { 723 struct mlx5e_ktls_offload_context_rx *priv_rx, *tmp; 724 struct mlx5e_ktls_resync_resp *ktls_resync; 725 struct mlx5_wqe_ctrl_seg *db_cseg; 726 struct mlx5e_icosq *sq; 727 LIST_HEAD(local_list); 728 int i, j; 729 730 sq = &c->async_icosq; 731 732 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 733 return false; 734 735 ktls_resync = sq->ktls_resync; 736 db_cseg = NULL; 737 i = 0; 738 739 spin_lock(&ktls_resync->lock); 740 list_for_each_entry_safe(priv_rx, tmp, &ktls_resync->list, list) { 741 list_move(&priv_rx->list, &local_list); 742 if (++i == budget) 743 break; 744 } 745 if (list_empty(&ktls_resync->list)) 746 clear_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 747 spin_unlock(&ktls_resync->lock); 748 749 spin_lock(&c->async_icosq_lock); 750 for (j = 0; j < i; j++) { 751 struct mlx5_wqe_ctrl_seg *cseg; 752 753 priv_rx = list_first_entry(&local_list, 754 struct mlx5e_ktls_offload_context_rx, 755 list); 756 spin_lock(&priv_rx->lock); 757 cseg = post_static_params(sq, priv_rx); 758 if (IS_ERR(cseg)) { 759 spin_unlock(&priv_rx->lock); 760 break; 761 } 762 list_del_init(&priv_rx->list); 763 spin_unlock(&priv_rx->lock); 764 db_cseg = cseg; 765 } 766 if (db_cseg) 767 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, db_cseg); 768 spin_unlock(&c->async_icosq_lock); 769 770 priv_rx->rq_stats->tls_resync_res_ok += j; 771 772 if (!list_empty(&local_list)) { 773 /* This happens only if ICOSQ is full. 774 * There is no need to mark busy or explicitly ask for a NAPI cycle, 775 * it will be triggered by the outstanding ICOSQ completions. 776 */ 777 spin_lock(&ktls_resync->lock); 778 list_splice(&local_list, &ktls_resync->list); 779 set_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &sq->state); 780 spin_unlock(&ktls_resync->lock); 781 priv_rx->rq_stats->tls_resync_res_retry++; 782 } 783 784 return i == budget; 785 } 786