1 /* 2 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com> 3 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #ifndef _TLS_INT_H 36 #define _TLS_INT_H 37 38 #include <asm/byteorder.h> 39 #include <linux/types.h> 40 #include <linux/skmsg.h> 41 #include <net/tls.h> 42 #include <net/tls_prot.h> 43 44 #define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \ 45 TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT)) 46 47 #define __TLS_INC_STATS(net, field) \ 48 __SNMP_INC_STATS((net)->mib.tls_statistics, field) 49 #define TLS_INC_STATS(net, field) \ 50 SNMP_INC_STATS((net)->mib.tls_statistics, field) 51 #define TLS_DEC_STATS(net, field) \ 52 SNMP_DEC_STATS((net)->mib.tls_statistics, field) 53 54 struct tls_cipher_desc { 55 unsigned int iv; 56 unsigned int key; 57 unsigned int salt; 58 unsigned int tag; 59 unsigned int rec_seq; 60 }; 61 62 #define TLS_CIPHER_MIN TLS_CIPHER_AES_GCM_128 63 #define TLS_CIPHER_MAX TLS_CIPHER_ARIA_GCM_256 64 extern const struct tls_cipher_desc tls_cipher_desc[TLS_CIPHER_MAX + 1 - TLS_CIPHER_MIN]; 65 66 static inline const struct tls_cipher_desc *get_cipher_desc(u16 cipher_type) 67 { 68 if (cipher_type < TLS_CIPHER_MIN || cipher_type > TLS_CIPHER_MAX) 69 return NULL; 70 71 return &tls_cipher_desc[cipher_type - TLS_CIPHER_MIN]; 72 } 73 74 75 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages 76 * allocated or mapped for each TLS record. After encryption, the records are 77 * stores in a linked list. 78 */ 79 struct tls_rec { 80 struct list_head list; 81 int tx_ready; 82 int tx_flags; 83 84 struct sk_msg msg_plaintext; 85 struct sk_msg msg_encrypted; 86 87 /* AAD | msg_plaintext.sg.data | sg_tag */ 88 struct scatterlist sg_aead_in[2]; 89 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ 90 struct scatterlist sg_aead_out[2]; 91 92 char content_type; 93 struct scatterlist sg_content_type; 94 95 struct sock *sk; 96 97 char aad_space[TLS_AAD_SPACE_SIZE]; 98 u8 iv_data[MAX_IV_SIZE]; 99 struct aead_request aead_req; 100 u8 aead_req_ctx[]; 101 }; 102 103 int __net_init tls_proc_init(struct net *net); 104 void __net_exit tls_proc_fini(struct net *net); 105 106 struct tls_context *tls_ctx_create(struct sock *sk); 107 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 108 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 109 110 int wait_on_pending_writer(struct sock *sk, long *timeo); 111 void tls_err_abort(struct sock *sk, int err); 112 113 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 114 void tls_update_rx_zc_capable(struct tls_context *tls_ctx); 115 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 116 void tls_sw_strparser_done(struct tls_context *tls_ctx); 117 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 118 void tls_sw_splice_eof(struct socket *sock); 119 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); 120 void tls_sw_release_resources_tx(struct sock *sk); 121 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); 122 void tls_sw_free_resources_rx(struct sock *sk); 123 void tls_sw_release_resources_rx(struct sock *sk); 124 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 125 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 126 int flags, int *addr_len); 127 bool tls_sw_sock_is_readable(struct sock *sk); 128 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 129 struct pipe_inode_info *pipe, 130 size_t len, unsigned int flags); 131 int tls_sw_read_sock(struct sock *sk, read_descriptor_t *desc, 132 sk_read_actor_t read_actor); 133 134 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 135 void tls_device_splice_eof(struct socket *sock); 136 int tls_tx_records(struct sock *sk, int flags); 137 138 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 139 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 140 141 int tls_process_cmsg(struct sock *sk, struct msghdr *msg, 142 unsigned char *record_type); 143 int decrypt_skb(struct sock *sk, struct scatterlist *sgout); 144 145 int tls_sw_fallback_init(struct sock *sk, 146 struct tls_offload_context_tx *offload_ctx, 147 struct tls_crypto_info *crypto_info); 148 149 int tls_strp_dev_init(void); 150 void tls_strp_dev_exit(void); 151 152 void tls_strp_done(struct tls_strparser *strp); 153 void tls_strp_stop(struct tls_strparser *strp); 154 int tls_strp_init(struct tls_strparser *strp, struct sock *sk); 155 void tls_strp_data_ready(struct tls_strparser *strp); 156 157 void tls_strp_check_rcv(struct tls_strparser *strp); 158 void tls_strp_msg_done(struct tls_strparser *strp); 159 160 int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb); 161 void tls_rx_msg_ready(struct tls_strparser *strp); 162 163 void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh); 164 int tls_strp_msg_cow(struct tls_sw_context_rx *ctx); 165 struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx); 166 int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst); 167 168 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 169 { 170 struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb; 171 172 return &scb->tls; 173 } 174 175 static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx) 176 { 177 DEBUG_NET_WARN_ON_ONCE(!ctx->strp.msg_ready || !ctx->strp.anchor->len); 178 return ctx->strp.anchor; 179 } 180 181 static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx) 182 { 183 return ctx->strp.msg_ready; 184 } 185 186 static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx) 187 { 188 return ctx->strp.mixed_decrypted; 189 } 190 191 #ifdef CONFIG_TLS_DEVICE 192 int tls_device_init(void); 193 void tls_device_cleanup(void); 194 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 195 void tls_device_free_resources_tx(struct sock *sk); 196 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 197 void tls_device_offload_cleanup_rx(struct sock *sk); 198 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 199 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx); 200 #else 201 static inline int tls_device_init(void) { return 0; } 202 static inline void tls_device_cleanup(void) {} 203 204 static inline int 205 tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 206 { 207 return -EOPNOTSUPP; 208 } 209 210 static inline void tls_device_free_resources_tx(struct sock *sk) {} 211 212 static inline int 213 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 214 { 215 return -EOPNOTSUPP; 216 } 217 218 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 219 static inline void 220 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 221 222 static inline int 223 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) 224 { 225 return 0; 226 } 227 #endif 228 229 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 230 struct scatterlist *sg, u16 first_offset, 231 int flags); 232 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 233 int flags); 234 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); 235 236 static inline bool tls_is_partially_sent_record(struct tls_context *ctx) 237 { 238 return !!ctx->partially_sent_record; 239 } 240 241 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 242 { 243 return tls_ctx->pending_open_record_frags; 244 } 245 246 static inline bool tls_bigint_increment(unsigned char *seq, int len) 247 { 248 int i; 249 250 for (i = len - 1; i >= 0; i--) { 251 ++seq[i]; 252 if (seq[i] != 0) 253 break; 254 } 255 256 return (i == -1); 257 } 258 259 static inline void tls_bigint_subtract(unsigned char *seq, int n) 260 { 261 u64 rcd_sn; 262 __be64 *p; 263 264 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8); 265 266 p = (__be64 *)seq; 267 rcd_sn = be64_to_cpu(*p); 268 *p = cpu_to_be64(rcd_sn - n); 269 } 270 271 static inline void 272 tls_advance_record_sn(struct sock *sk, struct tls_prot_info *prot, 273 struct cipher_context *ctx) 274 { 275 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 276 tls_err_abort(sk, -EBADMSG); 277 278 if (prot->version != TLS_1_3_VERSION && 279 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 280 tls_bigint_increment(ctx->iv + prot->salt_size, 281 prot->iv_size); 282 } 283 284 static inline void 285 tls_xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) 286 { 287 int i; 288 289 if (prot->version == TLS_1_3_VERSION || 290 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 291 for (i = 0; i < 8; i++) 292 iv[i + 4] ^= seq[i]; 293 } 294 } 295 296 static inline void 297 tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len, 298 unsigned char record_type) 299 { 300 struct tls_prot_info *prot = &ctx->prot_info; 301 size_t pkt_len, iv_size = prot->iv_size; 302 303 pkt_len = plaintext_len + prot->tag_size; 304 if (prot->version != TLS_1_3_VERSION && 305 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) { 306 pkt_len += iv_size; 307 308 memcpy(buf + TLS_NONCE_OFFSET, 309 ctx->tx.iv + prot->salt_size, iv_size); 310 } 311 312 /* we cover nonce explicit here as well, so buf should be of 313 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 314 */ 315 buf[0] = prot->version == TLS_1_3_VERSION ? 316 TLS_RECORD_TYPE_DATA : record_type; 317 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ 318 buf[1] = TLS_1_2_VERSION_MINOR; 319 buf[2] = TLS_1_2_VERSION_MAJOR; 320 /* we can use IV for nonce explicit according to spec */ 321 buf[3] = pkt_len >> 8; 322 buf[4] = pkt_len & 0xFF; 323 } 324 325 static inline 326 void tls_make_aad(char *buf, size_t size, char *record_sequence, 327 unsigned char record_type, struct tls_prot_info *prot) 328 { 329 if (prot->version != TLS_1_3_VERSION) { 330 memcpy(buf, record_sequence, prot->rec_seq_size); 331 buf += 8; 332 } else { 333 size += prot->tag_size; 334 } 335 336 buf[0] = prot->version == TLS_1_3_VERSION ? 337 TLS_RECORD_TYPE_DATA : record_type; 338 buf[1] = TLS_1_2_VERSION_MAJOR; 339 buf[2] = TLS_1_2_VERSION_MINOR; 340 buf[3] = size >> 8; 341 buf[4] = size & 0xFF; 342 } 343 344 #endif 345