1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include "en.h"
5 #include "en_accel/tls.h"
6 #include "en_accel/ktls.h"
7 #include "en_accel/ktls_utils.h"
8 #include "en_accel/fs_tcp.h"
9 
10 static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
11 			  enum tls_offload_ctx_dir direction,
12 			  struct tls_crypto_info *crypto_info,
13 			  u32 start_offload_tcp_sn)
14 {
15 	struct mlx5e_priv *priv = netdev_priv(netdev);
16 	struct mlx5_core_dev *mdev = priv->mdev;
17 	int err;
18 
19 	if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
20 		return -EOPNOTSUPP;
21 
22 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
23 		err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn);
24 	else
25 		err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn);
26 
27 	return err;
28 }
29 
30 static void mlx5e_ktls_del(struct net_device *netdev,
31 			   struct tls_context *tls_ctx,
32 			   enum tls_offload_ctx_dir direction)
33 {
34 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
35 		mlx5e_ktls_del_tx(netdev, tls_ctx);
36 	else
37 		mlx5e_ktls_del_rx(netdev, tls_ctx);
38 }
39 
40 static int mlx5e_ktls_resync(struct net_device *netdev,
41 			     struct sock *sk, u32 seq, u8 *rcd_sn,
42 			     enum tls_offload_ctx_dir direction)
43 {
44 	if (unlikely(direction != TLS_OFFLOAD_CTX_DIR_RX))
45 		return -EOPNOTSUPP;
46 
47 	mlx5e_ktls_rx_resync(netdev, sk, seq, rcd_sn);
48 	return 0;
49 }
50 
51 static const struct tlsdev_ops mlx5e_ktls_ops = {
52 	.tls_dev_add = mlx5e_ktls_add,
53 	.tls_dev_del = mlx5e_ktls_del,
54 	.tls_dev_resync = mlx5e_ktls_resync,
55 };
56 
57 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
58 {
59 	struct net_device *netdev = priv->netdev;
60 	struct mlx5_core_dev *mdev = priv->mdev;
61 
62 	if (mlx5_accel_is_ktls_tx(mdev)) {
63 		netdev->hw_features |= NETIF_F_HW_TLS_TX;
64 		netdev->features    |= NETIF_F_HW_TLS_TX;
65 	}
66 
67 	if (mlx5_accel_is_ktls_rx(mdev))
68 		netdev->hw_features |= NETIF_F_HW_TLS_RX;
69 
70 	netdev->tlsdev_ops = &mlx5e_ktls_ops;
71 }
72 
73 int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
74 {
75 	struct mlx5e_priv *priv = netdev_priv(netdev);
76 	int err = 0;
77 
78 	mutex_lock(&priv->state_lock);
79 	if (enable)
80 		err = mlx5e_accel_fs_tcp_create(priv);
81 	else
82 		mlx5e_accel_fs_tcp_destroy(priv);
83 	mutex_unlock(&priv->state_lock);
84 
85 	return err;
86 }
87 
88 int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
89 {
90 	int err;
91 
92 	if (!mlx5_accel_is_ktls_rx(priv->mdev))
93 		return 0;
94 
95 	priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
96 	if (!priv->tls->rx_wq)
97 		return -ENOMEM;
98 
99 	if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
100 		err = mlx5e_accel_fs_tcp_create(priv);
101 		if (err) {
102 			destroy_workqueue(priv->tls->rx_wq);
103 			return err;
104 		}
105 	}
106 
107 	return 0;
108 }
109 
110 void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
111 {
112 	if (!mlx5_accel_is_ktls_rx(priv->mdev))
113 		return;
114 
115 	if (priv->netdev->features & NETIF_F_HW_TLS_RX)
116 		mlx5e_accel_fs_tcp_destroy(priv);
117 
118 	destroy_workqueue(priv->tls->rx_wq);
119 }
120