1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 
3 #include "funeth.h"
4 #include "funeth_ktls.h"
5 
fun_admin_ktls_create(struct funeth_priv * fp,unsigned int id)6 static int fun_admin_ktls_create(struct funeth_priv *fp, unsigned int id)
7 {
8 	struct fun_admin_ktls_create_req req = {
9 		.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
10 						     sizeof(req)),
11 		.subop = FUN_ADMIN_SUBOP_CREATE,
12 		.id = cpu_to_be32(id),
13 	};
14 
15 	return fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
16 }
17 
fun_ktls_add(struct net_device * netdev,struct sock * sk,enum tls_offload_ctx_dir direction,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)18 static int fun_ktls_add(struct net_device *netdev, struct sock *sk,
19 			enum tls_offload_ctx_dir direction,
20 			struct tls_crypto_info *crypto_info,
21 			u32 start_offload_tcp_sn)
22 {
23 	struct funeth_priv *fp = netdev_priv(netdev);
24 	struct fun_admin_ktls_modify_req req = {
25 		.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
26 						     sizeof(req)),
27 		.subop = FUN_ADMIN_SUBOP_MODIFY,
28 		.id = cpu_to_be32(fp->ktls_id),
29 		.tcp_seq = cpu_to_be32(start_offload_tcp_sn),
30 	};
31 	struct fun_admin_ktls_modify_rsp rsp;
32 	struct fun_ktls_tx_ctx *tx_ctx;
33 	int rc;
34 
35 	if (direction != TLS_OFFLOAD_CTX_DIR_TX)
36 		return -EOPNOTSUPP;
37 
38 	if (crypto_info->version == TLS_1_2_VERSION)
39 		req.version = FUN_KTLS_TLSV2;
40 	else
41 		return -EOPNOTSUPP;
42 
43 	switch (crypto_info->cipher_type) {
44 	case TLS_CIPHER_AES_GCM_128: {
45 		struct tls12_crypto_info_aes_gcm_128 *c = (void *)crypto_info;
46 
47 		req.cipher = FUN_KTLS_CIPHER_AES_GCM_128;
48 		memcpy(req.key, c->key, sizeof(c->key));
49 		memcpy(req.iv, c->iv, sizeof(c->iv));
50 		memcpy(req.salt, c->salt, sizeof(c->salt));
51 		memcpy(req.record_seq, c->rec_seq, sizeof(c->rec_seq));
52 		break;
53 	}
54 	default:
55 		return -EOPNOTSUPP;
56 	}
57 
58 	rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, &rsp,
59 				       sizeof(rsp), 0);
60 	memzero_explicit(&req, sizeof(req));
61 	if (rc)
62 		return rc;
63 
64 	tx_ctx = tls_driver_ctx(sk, direction);
65 	tx_ctx->tlsid = rsp.tlsid;
66 	tx_ctx->next_seq = start_offload_tcp_sn;
67 	atomic64_inc(&fp->tx_tls_add);
68 	return 0;
69 }
70 
fun_ktls_del(struct net_device * netdev,struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)71 static void fun_ktls_del(struct net_device *netdev,
72 			 struct tls_context *tls_ctx,
73 			 enum tls_offload_ctx_dir direction)
74 {
75 	struct funeth_priv *fp = netdev_priv(netdev);
76 	struct fun_admin_ktls_modify_req req;
77 	struct fun_ktls_tx_ctx *tx_ctx;
78 
79 	if (direction != TLS_OFFLOAD_CTX_DIR_TX)
80 		return;
81 
82 	tx_ctx = __tls_driver_ctx(tls_ctx, direction);
83 
84 	req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
85 			offsetof(struct fun_admin_ktls_modify_req, tcp_seq));
86 	req.subop = FUN_ADMIN_SUBOP_MODIFY;
87 	req.flags = cpu_to_be16(FUN_KTLS_MODIFY_REMOVE);
88 	req.id = cpu_to_be32(fp->ktls_id);
89 	req.tlsid = tx_ctx->tlsid;
90 
91 	fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
92 	atomic64_inc(&fp->tx_tls_del);
93 }
94 
fun_ktls_resync(struct net_device * netdev,struct sock * sk,u32 seq,u8 * rcd_sn,enum tls_offload_ctx_dir direction)95 static int fun_ktls_resync(struct net_device *netdev, struct sock *sk, u32 seq,
96 			   u8 *rcd_sn, enum tls_offload_ctx_dir direction)
97 {
98 	struct funeth_priv *fp = netdev_priv(netdev);
99 	struct fun_admin_ktls_modify_req req;
100 	struct fun_ktls_tx_ctx *tx_ctx;
101 	int rc;
102 
103 	if (direction != TLS_OFFLOAD_CTX_DIR_TX)
104 		return -EOPNOTSUPP;
105 
106 	tx_ctx = tls_driver_ctx(sk, direction);
107 
108 	req.common = FUN_ADMIN_REQ_COMMON_INIT2(FUN_ADMIN_OP_KTLS,
109 			offsetof(struct fun_admin_ktls_modify_req, key));
110 	req.subop = FUN_ADMIN_SUBOP_MODIFY;
111 	req.flags = 0;
112 	req.id = cpu_to_be32(fp->ktls_id);
113 	req.tlsid = tx_ctx->tlsid;
114 	req.tcp_seq = cpu_to_be32(seq);
115 	req.version = 0;
116 	req.cipher = 0;
117 	memcpy(req.record_seq, rcd_sn, sizeof(req.record_seq));
118 
119 	atomic64_inc(&fp->tx_tls_resync);
120 	rc = fun_submit_admin_sync_cmd(fp->fdev, &req.common, NULL, 0, 0);
121 	if (!rc)
122 		tx_ctx->next_seq = seq;
123 	return rc;
124 }
125 
126 static const struct tlsdev_ops fun_ktls_ops = {
127 	.tls_dev_add = fun_ktls_add,
128 	.tls_dev_del = fun_ktls_del,
129 	.tls_dev_resync = fun_ktls_resync,
130 };
131 
fun_ktls_init(struct net_device * netdev)132 int fun_ktls_init(struct net_device *netdev)
133 {
134 	struct funeth_priv *fp = netdev_priv(netdev);
135 	int rc;
136 
137 	rc = fun_admin_ktls_create(fp, netdev->dev_port);
138 	if (rc)
139 		return rc;
140 
141 	fp->ktls_id = netdev->dev_port;
142 	netdev->tlsdev_ops = &fun_ktls_ops;
143 	netdev->hw_features |= NETIF_F_HW_TLS_TX;
144 	netdev->features |= NETIF_F_HW_TLS_TX;
145 	return 0;
146 }
147 
fun_ktls_cleanup(struct funeth_priv * fp)148 void fun_ktls_cleanup(struct funeth_priv *fp)
149 {
150 	if (fp->ktls_id == FUN_HCI_ID_INVALID)
151 		return;
152 
153 	fun_res_destroy(fp->fdev, FUN_ADMIN_OP_KTLS, 0, fp->ktls_id);
154 	fp->ktls_id = FUN_HCI_ID_INVALID;
155 }
156