1 /* 2 * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #ifndef _TLS_INT_H 35 #define _TLS_INT_H 36 37 #include <asm/byteorder.h> 38 #include <linux/types.h> 39 #include <linux/skmsg.h> 40 #include <net/tls.h> 41 42 #define TLS_PAGE_ORDER (min_t(unsigned int, PAGE_ALLOC_COSTLY_ORDER, \ 43 TLS_MAX_PAYLOAD_SIZE >> PAGE_SHIFT)) 44 45 #define __TLS_INC_STATS(net, field) \ 46 __SNMP_INC_STATS((net)->mib.tls_statistics, field) 47 #define TLS_INC_STATS(net, field) \ 48 SNMP_INC_STATS((net)->mib.tls_statistics, field) 49 #define TLS_DEC_STATS(net, field) \ 50 SNMP_DEC_STATS((net)->mib.tls_statistics, field) 51 52 /* TLS records are maintained in 'struct tls_rec'. It stores the memory pages 53 * allocated or mapped for each TLS record. After encryption, the records are 54 * stores in a linked list. 55 */ 56 struct tls_rec { 57 struct list_head list; 58 int tx_ready; 59 int tx_flags; 60 61 struct sk_msg msg_plaintext; 62 struct sk_msg msg_encrypted; 63 64 /* AAD | msg_plaintext.sg.data | sg_tag */ 65 struct scatterlist sg_aead_in[2]; 66 /* AAD | msg_encrypted.sg.data (data contains overhead for hdr & iv & tag) */ 67 struct scatterlist sg_aead_out[2]; 68 69 char content_type; 70 struct scatterlist sg_content_type; 71 72 char aad_space[TLS_AAD_SPACE_SIZE]; 73 u8 iv_data[MAX_IV_SIZE]; 74 struct aead_request aead_req; 75 u8 aead_req_ctx[]; 76 }; 77 78 int __net_init tls_proc_init(struct net *net); 79 void __net_exit tls_proc_fini(struct net *net); 80 81 struct tls_context *tls_ctx_create(struct sock *sk); 82 void tls_ctx_free(struct sock *sk, struct tls_context *ctx); 83 void update_sk_prot(struct sock *sk, struct tls_context *ctx); 84 85 int wait_on_pending_writer(struct sock *sk, long *timeo); 86 int tls_sk_query(struct sock *sk, int optname, char __user *optval, 87 int __user *optlen); 88 int tls_sk_attach(struct sock *sk, int optname, char __user *optval, 89 unsigned int optlen); 90 void tls_err_abort(struct sock *sk, int err); 91 92 int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx); 93 void tls_update_rx_zc_capable(struct tls_context *tls_ctx); 94 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 95 void tls_sw_strparser_done(struct tls_context *tls_ctx); 96 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 97 int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 98 int offset, size_t size, int flags); 99 int tls_sw_sendpage(struct sock *sk, struct page *page, 100 int offset, size_t size, int flags); 101 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx); 102 void tls_sw_release_resources_tx(struct sock *sk); 103 void tls_sw_free_ctx_tx(struct tls_context *tls_ctx); 104 void tls_sw_free_resources_rx(struct sock *sk); 105 void tls_sw_release_resources_rx(struct sock *sk); 106 void tls_sw_free_ctx_rx(struct tls_context *tls_ctx); 107 int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 108 int flags, int *addr_len); 109 bool tls_sw_sock_is_readable(struct sock *sk); 110 ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos, 111 struct pipe_inode_info *pipe, 112 size_t len, unsigned int flags); 113 114 int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 115 int tls_device_sendpage(struct sock *sk, struct page *page, 116 int offset, size_t size, int flags); 117 int tls_tx_records(struct sock *sk, int flags); 118 119 void tls_sw_write_space(struct sock *sk, struct tls_context *ctx); 120 void tls_device_write_space(struct sock *sk, struct tls_context *ctx); 121 122 int tls_process_cmsg(struct sock *sk, struct msghdr *msg, 123 unsigned char *record_type); 124 int decrypt_skb(struct sock *sk, struct scatterlist *sgout); 125 126 int tls_sw_fallback_init(struct sock *sk, 127 struct tls_offload_context_tx *offload_ctx, 128 struct tls_crypto_info *crypto_info); 129 130 int tls_strp_msg_hold(struct sock *sk, struct sk_buff *skb, 131 struct sk_buff_head *dst); 132 133 static inline struct tls_msg *tls_msg(struct sk_buff *skb) 134 { 135 struct sk_skb_cb *scb = (struct sk_skb_cb *)skb->cb; 136 137 return &scb->tls; 138 } 139 140 static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx) 141 { 142 return ctx->recv_pkt; 143 } 144 145 #ifdef CONFIG_TLS_DEVICE 146 int tls_device_init(void); 147 void tls_device_cleanup(void); 148 int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); 149 void tls_device_free_resources_tx(struct sock *sk); 150 int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx); 151 void tls_device_offload_cleanup_rx(struct sock *sk); 152 void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq); 153 int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx); 154 #else 155 static inline int tls_device_init(void) { return 0; } 156 static inline void tls_device_cleanup(void) {} 157 158 static inline int 159 tls_set_device_offload(struct sock *sk, struct tls_context *ctx) 160 { 161 return -EOPNOTSUPP; 162 } 163 164 static inline void tls_device_free_resources_tx(struct sock *sk) {} 165 166 static inline int 167 tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx) 168 { 169 return -EOPNOTSUPP; 170 } 171 172 static inline void tls_device_offload_cleanup_rx(struct sock *sk) {} 173 static inline void 174 tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {} 175 176 static inline int 177 tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx) 178 { 179 return 0; 180 } 181 #endif 182 183 int tls_push_sg(struct sock *sk, struct tls_context *ctx, 184 struct scatterlist *sg, u16 first_offset, 185 int flags); 186 int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, 187 int flags); 188 void tls_free_partial_record(struct sock *sk, struct tls_context *ctx); 189 190 static inline bool tls_is_partially_sent_record(struct tls_context *ctx) 191 { 192 return !!ctx->partially_sent_record; 193 } 194 195 static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx) 196 { 197 return tls_ctx->pending_open_record_frags; 198 } 199 200 static inline bool tls_bigint_increment(unsigned char *seq, int len) 201 { 202 int i; 203 204 for (i = len - 1; i >= 0; i--) { 205 ++seq[i]; 206 if (seq[i] != 0) 207 break; 208 } 209 210 return (i == -1); 211 } 212 213 static inline void tls_bigint_subtract(unsigned char *seq, int n) 214 { 215 u64 rcd_sn; 216 __be64 *p; 217 218 BUILD_BUG_ON(TLS_MAX_REC_SEQ_SIZE != 8); 219 220 p = (__be64 *)seq; 221 rcd_sn = be64_to_cpu(*p); 222 *p = cpu_to_be64(rcd_sn - n); 223 } 224 225 static inline void 226 tls_advance_record_sn(struct sock *sk, struct tls_prot_info *prot, 227 struct cipher_context *ctx) 228 { 229 if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size)) 230 tls_err_abort(sk, -EBADMSG); 231 232 if (prot->version != TLS_1_3_VERSION && 233 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) 234 tls_bigint_increment(ctx->iv + prot->salt_size, 235 prot->iv_size); 236 } 237 238 static inline void 239 tls_xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq) 240 { 241 int i; 242 243 if (prot->version == TLS_1_3_VERSION || 244 prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) { 245 for (i = 0; i < 8; i++) 246 iv[i + 4] ^= seq[i]; 247 } 248 } 249 250 static inline void 251 tls_fill_prepend(struct tls_context *ctx, char *buf, size_t plaintext_len, 252 unsigned char record_type) 253 { 254 struct tls_prot_info *prot = &ctx->prot_info; 255 size_t pkt_len, iv_size = prot->iv_size; 256 257 pkt_len = plaintext_len + prot->tag_size; 258 if (prot->version != TLS_1_3_VERSION && 259 prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) { 260 pkt_len += iv_size; 261 262 memcpy(buf + TLS_NONCE_OFFSET, 263 ctx->tx.iv + prot->salt_size, iv_size); 264 } 265 266 /* we cover nonce explicit here as well, so buf should be of 267 * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE 268 */ 269 buf[0] = prot->version == TLS_1_3_VERSION ? 270 TLS_RECORD_TYPE_DATA : record_type; 271 /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */ 272 buf[1] = TLS_1_2_VERSION_MINOR; 273 buf[2] = TLS_1_2_VERSION_MAJOR; 274 /* we can use IV for nonce explicit according to spec */ 275 buf[3] = pkt_len >> 8; 276 buf[4] = pkt_len & 0xFF; 277 } 278 279 static inline 280 void tls_make_aad(char *buf, size_t size, char *record_sequence, 281 unsigned char record_type, struct tls_prot_info *prot) 282 { 283 if (prot->version != TLS_1_3_VERSION) { 284 memcpy(buf, record_sequence, prot->rec_seq_size); 285 buf += 8; 286 } else { 287 size += prot->tag_size; 288 } 289 290 buf[0] = prot->version == TLS_1_3_VERSION ? 291 TLS_RECORD_TYPE_DATA : record_type; 292 buf[1] = TLS_1_2_VERSION_MAJOR; 293 buf[2] = TLS_1_2_VERSION_MINOR; 294 buf[3] = size >> 8; 295 buf[4] = size & 0xFF; 296 } 297 298 #endif 299