1 /* 2 * FILS AEAD for (Re)Association Request/Response frames 3 * Copyright 2016, Qualcomm Atheros, Inc. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <crypto/aes.h> 11 #include <crypto/algapi.h> 12 #include <crypto/hash.h> 13 #include <crypto/skcipher.h> 14 15 #include "ieee80211_i.h" 16 #include "aes_cmac.h" 17 #include "fils_aead.h" 18 19 static void gf_mulx(u8 *pad) 20 { 21 u64 a = get_unaligned_be64(pad); 22 u64 b = get_unaligned_be64(pad + 8); 23 24 put_unaligned_be64((a << 1) | (b >> 63), pad); 25 put_unaligned_be64((b << 1) ^ ((a >> 63) ? 0x87 : 0), pad + 8); 26 } 27 28 static int aes_s2v(struct crypto_shash *tfm, 29 size_t num_elem, const u8 *addr[], size_t len[], u8 *v) 30 { 31 u8 d[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE] = {}; 32 SHASH_DESC_ON_STACK(desc, tfm); 33 size_t i; 34 35 desc->tfm = tfm; 36 37 /* D = AES-CMAC(K, <zero>) */ 38 crypto_shash_digest(desc, tmp, AES_BLOCK_SIZE, d); 39 40 for (i = 0; i < num_elem - 1; i++) { 41 /* D = dbl(D) xor AES_CMAC(K, Si) */ 42 gf_mulx(d); /* dbl */ 43 crypto_shash_digest(desc, addr[i], len[i], tmp); 44 crypto_xor(d, tmp, AES_BLOCK_SIZE); 45 } 46 47 crypto_shash_init(desc); 48 49 if (len[i] >= AES_BLOCK_SIZE) { 50 /* len(Sn) >= 128 */ 51 /* T = Sn xorend D */ 52 crypto_shash_update(desc, addr[i], len[i] - AES_BLOCK_SIZE); 53 crypto_xor(d, addr[i] + len[i] - AES_BLOCK_SIZE, 54 AES_BLOCK_SIZE); 55 } else { 56 /* len(Sn) < 128 */ 57 /* T = dbl(D) xor pad(Sn) */ 58 gf_mulx(d); /* dbl */ 59 crypto_xor(d, addr[i], len[i]); 60 d[len[i]] ^= 0x80; 61 } 62 /* V = AES-CMAC(K, T) */ 63 crypto_shash_finup(desc, d, AES_BLOCK_SIZE, v); 64 65 return 0; 66 } 67 68 /* Note: addr[] and len[] needs to have one extra slot at the end. */ 69 static int aes_siv_encrypt(const u8 *key, size_t key_len, 70 const u8 *plain, size_t plain_len, 71 size_t num_elem, const u8 *addr[], 72 size_t len[], u8 *out) 73 { 74 u8 v[AES_BLOCK_SIZE]; 75 struct crypto_shash *tfm; 76 struct crypto_skcipher *tfm2; 77 struct skcipher_request *req; 78 int res; 79 struct scatterlist src[1], dst[1]; 80 u8 *tmp; 81 82 key_len /= 2; /* S2V key || CTR key */ 83 84 addr[num_elem] = plain; 85 len[num_elem] = plain_len; 86 num_elem++; 87 88 /* S2V */ 89 90 tfm = crypto_alloc_shash("cmac(aes)", 0, 0); 91 if (IS_ERR(tfm)) 92 return PTR_ERR(tfm); 93 /* K1 for S2V */ 94 res = crypto_shash_setkey(tfm, key, key_len); 95 if (!res) 96 res = aes_s2v(tfm, num_elem, addr, len, v); 97 crypto_free_shash(tfm); 98 if (res) 99 return res; 100 101 /* Use a temporary buffer of the plaintext to handle need for 102 * overwriting this during AES-CTR. 103 */ 104 tmp = kmemdup(plain, plain_len, GFP_KERNEL); 105 if (!tmp) 106 return -ENOMEM; 107 108 /* IV for CTR before encrypted data */ 109 memcpy(out, v, AES_BLOCK_SIZE); 110 111 /* Synthetic IV to be used as the initial counter in CTR: 112 * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31) 113 */ 114 v[8] &= 0x7f; 115 v[12] &= 0x7f; 116 117 /* CTR */ 118 119 tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 120 if (IS_ERR(tfm2)) { 121 kfree(tmp); 122 return PTR_ERR(tfm2); 123 } 124 /* K2 for CTR */ 125 res = crypto_skcipher_setkey(tfm2, key + key_len, key_len); 126 if (res) 127 goto fail; 128 129 req = skcipher_request_alloc(tfm2, GFP_KERNEL); 130 if (!req) { 131 res = -ENOMEM; 132 goto fail; 133 } 134 135 sg_init_one(src, tmp, plain_len); 136 sg_init_one(dst, out + AES_BLOCK_SIZE, plain_len); 137 skcipher_request_set_crypt(req, src, dst, plain_len, v); 138 res = crypto_skcipher_encrypt(req); 139 skcipher_request_free(req); 140 fail: 141 kfree(tmp); 142 crypto_free_skcipher(tfm2); 143 return res; 144 } 145 146 /* Note: addr[] and len[] needs to have one extra slot at the end. */ 147 static int aes_siv_decrypt(const u8 *key, size_t key_len, 148 const u8 *iv_crypt, size_t iv_c_len, 149 size_t num_elem, const u8 *addr[], size_t len[], 150 u8 *out) 151 { 152 struct crypto_shash *tfm; 153 struct crypto_skcipher *tfm2; 154 struct skcipher_request *req; 155 struct scatterlist src[1], dst[1]; 156 size_t crypt_len; 157 int res; 158 u8 frame_iv[AES_BLOCK_SIZE], iv[AES_BLOCK_SIZE]; 159 u8 check[AES_BLOCK_SIZE]; 160 161 crypt_len = iv_c_len - AES_BLOCK_SIZE; 162 key_len /= 2; /* S2V key || CTR key */ 163 addr[num_elem] = out; 164 len[num_elem] = crypt_len; 165 num_elem++; 166 167 memcpy(iv, iv_crypt, AES_BLOCK_SIZE); 168 memcpy(frame_iv, iv_crypt, AES_BLOCK_SIZE); 169 170 /* Synthetic IV to be used as the initial counter in CTR: 171 * Q = V bitand (1^64 || 0^1 || 1^31 || 0^1 || 1^31) 172 */ 173 iv[8] &= 0x7f; 174 iv[12] &= 0x7f; 175 176 /* CTR */ 177 178 tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); 179 if (IS_ERR(tfm2)) 180 return PTR_ERR(tfm2); 181 /* K2 for CTR */ 182 res = crypto_skcipher_setkey(tfm2, key + key_len, key_len); 183 if (res) { 184 crypto_free_skcipher(tfm2); 185 return res; 186 } 187 188 req = skcipher_request_alloc(tfm2, GFP_KERNEL); 189 if (!req) { 190 crypto_free_skcipher(tfm2); 191 return -ENOMEM; 192 } 193 194 sg_init_one(src, iv_crypt + AES_BLOCK_SIZE, crypt_len); 195 sg_init_one(dst, out, crypt_len); 196 skcipher_request_set_crypt(req, src, dst, crypt_len, iv); 197 res = crypto_skcipher_decrypt(req); 198 skcipher_request_free(req); 199 crypto_free_skcipher(tfm2); 200 if (res) 201 return res; 202 203 /* S2V */ 204 205 tfm = crypto_alloc_shash("cmac(aes)", 0, 0); 206 if (IS_ERR(tfm)) 207 return PTR_ERR(tfm); 208 /* K1 for S2V */ 209 res = crypto_shash_setkey(tfm, key, key_len); 210 if (!res) 211 res = aes_s2v(tfm, num_elem, addr, len, check); 212 crypto_free_shash(tfm); 213 if (res) 214 return res; 215 if (memcmp(check, frame_iv, AES_BLOCK_SIZE) != 0) 216 return -EINVAL; 217 return 0; 218 } 219 220 int fils_encrypt_assoc_req(struct sk_buff *skb, 221 struct ieee80211_mgd_assoc_data *assoc_data) 222 { 223 struct ieee80211_mgmt *mgmt = (void *)skb->data; 224 u8 *capab, *ies, *encr; 225 const u8 *addr[5 + 1], *session; 226 size_t len[5 + 1]; 227 size_t crypt_len; 228 229 if (ieee80211_is_reassoc_req(mgmt->frame_control)) { 230 capab = (u8 *)&mgmt->u.reassoc_req.capab_info; 231 ies = mgmt->u.reassoc_req.variable; 232 } else { 233 capab = (u8 *)&mgmt->u.assoc_req.capab_info; 234 ies = mgmt->u.assoc_req.variable; 235 } 236 237 session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION, 238 ies, skb->data + skb->len - ies); 239 if (!session || session[1] != 1 + 8) 240 return -EINVAL; 241 /* encrypt after FILS Session element */ 242 encr = (u8 *)session + 2 + 1 + 8; 243 244 /* AES-SIV AAD vectors */ 245 246 /* The STA's MAC address */ 247 addr[0] = mgmt->sa; 248 len[0] = ETH_ALEN; 249 /* The AP's BSSID */ 250 addr[1] = mgmt->da; 251 len[1] = ETH_ALEN; 252 /* The STA's nonce */ 253 addr[2] = assoc_data->fils_nonces; 254 len[2] = FILS_NONCE_LEN; 255 /* The AP's nonce */ 256 addr[3] = &assoc_data->fils_nonces[FILS_NONCE_LEN]; 257 len[3] = FILS_NONCE_LEN; 258 /* The (Re)Association Request frame from the Capability Information 259 * field to the FILS Session element (both inclusive). 260 */ 261 addr[4] = capab; 262 len[4] = encr - capab; 263 264 crypt_len = skb->data + skb->len - encr; 265 skb_put(skb, AES_BLOCK_SIZE); 266 return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, 267 encr, crypt_len, 5, addr, len, encr); 268 } 269 270 int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata, 271 u8 *frame, size_t *frame_len, 272 struct ieee80211_mgd_assoc_data *assoc_data) 273 { 274 struct ieee80211_mgmt *mgmt = (void *)frame; 275 u8 *capab, *ies, *encr; 276 const u8 *addr[5 + 1], *session; 277 size_t len[5 + 1]; 278 int res; 279 size_t crypt_len; 280 281 if (*frame_len < 24 + 6) 282 return -EINVAL; 283 284 capab = (u8 *)&mgmt->u.assoc_resp.capab_info; 285 ies = mgmt->u.assoc_resp.variable; 286 session = cfg80211_find_ext_ie(WLAN_EID_EXT_FILS_SESSION, 287 ies, frame + *frame_len - ies); 288 if (!session || session[1] != 1 + 8) { 289 mlme_dbg(sdata, 290 "No (valid) FILS Session element in (Re)Association Response frame from %pM", 291 mgmt->sa); 292 return -EINVAL; 293 } 294 /* decrypt after FILS Session element */ 295 encr = (u8 *)session + 2 + 1 + 8; 296 297 /* AES-SIV AAD vectors */ 298 299 /* The AP's BSSID */ 300 addr[0] = mgmt->sa; 301 len[0] = ETH_ALEN; 302 /* The STA's MAC address */ 303 addr[1] = mgmt->da; 304 len[1] = ETH_ALEN; 305 /* The AP's nonce */ 306 addr[2] = &assoc_data->fils_nonces[FILS_NONCE_LEN]; 307 len[2] = FILS_NONCE_LEN; 308 /* The STA's nonce */ 309 addr[3] = assoc_data->fils_nonces; 310 len[3] = FILS_NONCE_LEN; 311 /* The (Re)Association Response frame from the Capability Information 312 * field to the FILS Session element (both inclusive). 313 */ 314 addr[4] = capab; 315 len[4] = encr - capab; 316 317 crypt_len = frame + *frame_len - encr; 318 if (crypt_len < AES_BLOCK_SIZE) { 319 mlme_dbg(sdata, 320 "Not enough room for AES-SIV data after FILS Session element in (Re)Association Response frame from %pM", 321 mgmt->sa); 322 return -EINVAL; 323 } 324 res = aes_siv_decrypt(assoc_data->fils_kek, assoc_data->fils_kek_len, 325 encr, crypt_len, 5, addr, len, encr); 326 if (res != 0) { 327 mlme_dbg(sdata, 328 "AES-SIV decryption of (Re)Association Response frame from %pM failed", 329 mgmt->sa); 330 return res; 331 } 332 *frame_len -= AES_BLOCK_SIZE; 333 return 0; 334 } 335