1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include "en.h"
5 #include "en_accel/tls.h"
6 #include "en_accel/ktls.h"
7 #include "en_accel/ktls_utils.h"
8 #include "en_accel/fs_tcp.h"
9 
10 static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
11 			  enum tls_offload_ctx_dir direction,
12 			  struct tls_crypto_info *crypto_info,
13 			  u32 start_offload_tcp_sn)
14 {
15 	struct mlx5e_priv *priv = netdev_priv(netdev);
16 	struct mlx5_core_dev *mdev = priv->mdev;
17 	int err;
18 
19 	if (WARN_ON(!mlx5e_ktls_type_check(mdev, crypto_info)))
20 		return -EOPNOTSUPP;
21 
22 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
23 		err = mlx5e_ktls_add_tx(netdev, sk, crypto_info, start_offload_tcp_sn);
24 	else
25 		err = mlx5e_ktls_add_rx(netdev, sk, crypto_info, start_offload_tcp_sn);
26 
27 	return err;
28 }
29 
30 static void mlx5e_ktls_del(struct net_device *netdev,
31 			   struct tls_context *tls_ctx,
32 			   enum tls_offload_ctx_dir direction)
33 {
34 	if (direction == TLS_OFFLOAD_CTX_DIR_TX)
35 		mlx5e_ktls_del_tx(netdev, tls_ctx);
36 	else
37 		mlx5e_ktls_del_rx(netdev, tls_ctx);
38 }
39 
40 static int mlx5e_ktls_resync(struct net_device *netdev,
41 			     struct sock *sk, u32 seq, u8 *rcd_sn,
42 			     enum tls_offload_ctx_dir direction)
43 {
44 	if (unlikely(direction != TLS_OFFLOAD_CTX_DIR_RX))
45 		return -EOPNOTSUPP;
46 
47 	mlx5e_ktls_rx_resync(netdev, sk, seq, rcd_sn);
48 	return 0;
49 }
50 
51 static const struct tlsdev_ops mlx5e_ktls_ops = {
52 	.tls_dev_add = mlx5e_ktls_add,
53 	.tls_dev_del = mlx5e_ktls_del,
54 	.tls_dev_resync = mlx5e_ktls_resync,
55 };
56 
57 void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
58 {
59 	struct net_device *netdev = priv->netdev;
60 	struct mlx5_core_dev *mdev = priv->mdev;
61 
62 	if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
63 		return;
64 
65 	if (mlx5e_accel_is_ktls_tx(mdev)) {
66 		netdev->hw_features |= NETIF_F_HW_TLS_TX;
67 		netdev->features    |= NETIF_F_HW_TLS_TX;
68 	}
69 
70 	if (mlx5e_accel_is_ktls_rx(mdev))
71 		netdev->hw_features |= NETIF_F_HW_TLS_RX;
72 
73 	netdev->tlsdev_ops = &mlx5e_ktls_ops;
74 }
75 
76 int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
77 {
78 	struct mlx5e_priv *priv = netdev_priv(netdev);
79 	int err = 0;
80 
81 	mutex_lock(&priv->state_lock);
82 	if (enable)
83 		err = mlx5e_accel_fs_tcp_create(priv);
84 	else
85 		mlx5e_accel_fs_tcp_destroy(priv);
86 	mutex_unlock(&priv->state_lock);
87 
88 	return err;
89 }
90 
91 int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
92 {
93 	int err;
94 
95 	if (!mlx5e_accel_is_ktls_rx(priv->mdev))
96 		return 0;
97 
98 	priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
99 	if (!priv->tls->rx_wq)
100 		return -ENOMEM;
101 
102 	if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
103 		err = mlx5e_accel_fs_tcp_create(priv);
104 		if (err) {
105 			destroy_workqueue(priv->tls->rx_wq);
106 			return err;
107 		}
108 	}
109 
110 	return 0;
111 }
112 
113 void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
114 {
115 	if (!mlx5e_accel_is_ktls_rx(priv->mdev))
116 		return;
117 
118 	if (priv->netdev->features & NETIF_F_HW_TLS_RX)
119 		mlx5e_accel_fs_tcp_destroy(priv);
120 
121 	destroy_workqueue(priv->tls->rx_wq);
122 }
123