1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/ceph/ceph_debug.h> 4 5 #include <linux/err.h> 6 #include <linux/scatterlist.h> 7 #include <linux/sched.h> 8 #include <linux/slab.h> 9 #include <crypto/aes.h> 10 #include <crypto/skcipher.h> 11 #include <linux/key-type.h> 12 #include <linux/sched/mm.h> 13 14 #include <keys/ceph-type.h> 15 #include <keys/user-type.h> 16 #include <linux/ceph/decode.h> 17 #include "crypto.h" 18 19 /* 20 * Set ->key and ->tfm. The rest of the key should be filled in before 21 * this function is called. 22 */ 23 static int set_secret(struct ceph_crypto_key *key, void *buf) 24 { 25 unsigned int noio_flag; 26 int ret; 27 28 key->key = NULL; 29 key->tfm = NULL; 30 31 switch (key->type) { 32 case CEPH_CRYPTO_NONE: 33 return 0; /* nothing to do */ 34 case CEPH_CRYPTO_AES: 35 break; 36 default: 37 return -ENOTSUPP; 38 } 39 40 WARN_ON(!key->len); 41 key->key = kmemdup(buf, key->len, GFP_NOIO); 42 if (!key->key) { 43 ret = -ENOMEM; 44 goto fail; 45 } 46 47 /* crypto_alloc_skcipher() allocates with GFP_KERNEL */ 48 noio_flag = memalloc_noio_save(); 49 key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 50 memalloc_noio_restore(noio_flag); 51 if (IS_ERR(key->tfm)) { 52 ret = PTR_ERR(key->tfm); 53 key->tfm = NULL; 54 goto fail; 55 } 56 57 ret = crypto_skcipher_setkey(key->tfm, key->key, key->len); 58 if (ret) 59 goto fail; 60 61 return 0; 62 63 fail: 64 ceph_crypto_key_destroy(key); 65 return ret; 66 } 67 68 int ceph_crypto_key_clone(struct ceph_crypto_key *dst, 69 const struct ceph_crypto_key *src) 70 { 71 memcpy(dst, src, sizeof(struct ceph_crypto_key)); 72 return set_secret(dst, src->key); 73 } 74 75 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) 76 { 77 if (*p + sizeof(u16) + sizeof(key->created) + 78 sizeof(u16) + key->len > end) 79 return -ERANGE; 80 ceph_encode_16(p, key->type); 81 ceph_encode_copy(p, &key->created, sizeof(key->created)); 82 ceph_encode_16(p, key->len); 83 ceph_encode_copy(p, key->key, key->len); 84 return 0; 85 } 86 87 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end) 88 { 89 int ret; 90 91 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad); 92 key->type = ceph_decode_16(p); 93 ceph_decode_copy(p, &key->created, sizeof(key->created)); 94 key->len = ceph_decode_16(p); 95 ceph_decode_need(p, end, key->len, bad); 96 ret = set_secret(key, *p); 97 *p += key->len; 98 return ret; 99 100 bad: 101 dout("failed to decode crypto key\n"); 102 return -EINVAL; 103 } 104 105 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey) 106 { 107 int inlen = strlen(inkey); 108 int blen = inlen * 3 / 4; 109 void *buf, *p; 110 int ret; 111 112 dout("crypto_key_unarmor %s\n", inkey); 113 buf = kmalloc(blen, GFP_NOFS); 114 if (!buf) 115 return -ENOMEM; 116 blen = ceph_unarmor(buf, inkey, inkey+inlen); 117 if (blen < 0) { 118 kfree(buf); 119 return blen; 120 } 121 122 p = buf; 123 ret = ceph_crypto_key_decode(key, &p, p + blen); 124 kfree(buf); 125 if (ret) 126 return ret; 127 dout("crypto_key_unarmor key %p type %d len %d\n", key, 128 key->type, key->len); 129 return 0; 130 } 131 132 void ceph_crypto_key_destroy(struct ceph_crypto_key *key) 133 { 134 if (key) { 135 kfree(key->key); 136 key->key = NULL; 137 crypto_free_skcipher(key->tfm); 138 key->tfm = NULL; 139 } 140 } 141 142 static const u8 *aes_iv = (u8 *)CEPH_AES_IV; 143 144 /* 145 * Should be used for buffers allocated with ceph_kvmalloc(). 146 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt 147 * in-buffer (msg front). 148 * 149 * Dispose of @sgt with teardown_sgtable(). 150 * 151 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() 152 * in cases where a single sg is sufficient. No attempt to reduce the 153 * number of sgs by squeezing physically contiguous pages together is 154 * made though, for simplicity. 155 */ 156 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, 157 const void *buf, unsigned int buf_len) 158 { 159 struct scatterlist *sg; 160 const bool is_vmalloc = is_vmalloc_addr(buf); 161 unsigned int off = offset_in_page(buf); 162 unsigned int chunk_cnt = 1; 163 unsigned int chunk_len = PAGE_ALIGN(off + buf_len); 164 int i; 165 int ret; 166 167 if (buf_len == 0) { 168 memset(sgt, 0, sizeof(*sgt)); 169 return -EINVAL; 170 } 171 172 if (is_vmalloc) { 173 chunk_cnt = chunk_len >> PAGE_SHIFT; 174 chunk_len = PAGE_SIZE; 175 } 176 177 if (chunk_cnt > 1) { 178 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); 179 if (ret) 180 return ret; 181 } else { 182 WARN_ON(chunk_cnt != 1); 183 sg_init_table(prealloc_sg, 1); 184 sgt->sgl = prealloc_sg; 185 sgt->nents = sgt->orig_nents = 1; 186 } 187 188 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { 189 struct page *page; 190 unsigned int len = min(chunk_len - off, buf_len); 191 192 if (is_vmalloc) 193 page = vmalloc_to_page(buf); 194 else 195 page = virt_to_page(buf); 196 197 sg_set_page(sg, page, len, off); 198 199 off = 0; 200 buf += len; 201 buf_len -= len; 202 } 203 WARN_ON(buf_len != 0); 204 205 return 0; 206 } 207 208 static void teardown_sgtable(struct sg_table *sgt) 209 { 210 if (sgt->orig_nents > 1) 211 sg_free_table(sgt); 212 } 213 214 static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt, 215 void *buf, int buf_len, int in_len, int *pout_len) 216 { 217 SKCIPHER_REQUEST_ON_STACK(req, key->tfm); 218 struct sg_table sgt; 219 struct scatterlist prealloc_sg; 220 char iv[AES_BLOCK_SIZE] __aligned(8); 221 int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1)); 222 int crypt_len = encrypt ? in_len + pad_byte : in_len; 223 int ret; 224 225 WARN_ON(crypt_len > buf_len); 226 if (encrypt) 227 memset(buf + in_len, pad_byte, pad_byte); 228 ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len); 229 if (ret) 230 return ret; 231 232 memcpy(iv, aes_iv, AES_BLOCK_SIZE); 233 skcipher_request_set_tfm(req, key->tfm); 234 skcipher_request_set_callback(req, 0, NULL, NULL); 235 skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv); 236 237 /* 238 print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1, 239 key->key, key->len, 1); 240 print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1, 241 buf, crypt_len, 1); 242 */ 243 if (encrypt) 244 ret = crypto_skcipher_encrypt(req); 245 else 246 ret = crypto_skcipher_decrypt(req); 247 skcipher_request_zero(req); 248 if (ret) { 249 pr_err("%s %scrypt failed: %d\n", __func__, 250 encrypt ? "en" : "de", ret); 251 goto out_sgt; 252 } 253 /* 254 print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1, 255 buf, crypt_len, 1); 256 */ 257 258 if (encrypt) { 259 *pout_len = crypt_len; 260 } else { 261 pad_byte = *(char *)(buf + in_len - 1); 262 if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE && 263 in_len >= pad_byte) { 264 *pout_len = in_len - pad_byte; 265 } else { 266 pr_err("%s got bad padding %d on in_len %d\n", 267 __func__, pad_byte, in_len); 268 ret = -EPERM; 269 goto out_sgt; 270 } 271 } 272 273 out_sgt: 274 teardown_sgtable(&sgt); 275 return ret; 276 } 277 278 int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt, 279 void *buf, int buf_len, int in_len, int *pout_len) 280 { 281 switch (key->type) { 282 case CEPH_CRYPTO_NONE: 283 *pout_len = in_len; 284 return 0; 285 case CEPH_CRYPTO_AES: 286 return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len, 287 pout_len); 288 default: 289 return -ENOTSUPP; 290 } 291 } 292 293 static int ceph_key_preparse(struct key_preparsed_payload *prep) 294 { 295 struct ceph_crypto_key *ckey; 296 size_t datalen = prep->datalen; 297 int ret; 298 void *p; 299 300 ret = -EINVAL; 301 if (datalen <= 0 || datalen > 32767 || !prep->data) 302 goto err; 303 304 ret = -ENOMEM; 305 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL); 306 if (!ckey) 307 goto err; 308 309 /* TODO ceph_crypto_key_decode should really take const input */ 310 p = (void *)prep->data; 311 ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen); 312 if (ret < 0) 313 goto err_ckey; 314 315 prep->payload.data[0] = ckey; 316 prep->quotalen = datalen; 317 return 0; 318 319 err_ckey: 320 kfree(ckey); 321 err: 322 return ret; 323 } 324 325 static void ceph_key_free_preparse(struct key_preparsed_payload *prep) 326 { 327 struct ceph_crypto_key *ckey = prep->payload.data[0]; 328 ceph_crypto_key_destroy(ckey); 329 kfree(ckey); 330 } 331 332 static void ceph_key_destroy(struct key *key) 333 { 334 struct ceph_crypto_key *ckey = key->payload.data[0]; 335 336 ceph_crypto_key_destroy(ckey); 337 kfree(ckey); 338 } 339 340 struct key_type key_type_ceph = { 341 .name = "ceph", 342 .preparse = ceph_key_preparse, 343 .free_preparse = ceph_key_free_preparse, 344 .instantiate = generic_key_instantiate, 345 .destroy = ceph_key_destroy, 346 }; 347 348 int ceph_crypto_init(void) { 349 return register_key_type(&key_type_ceph); 350 } 351 352 void ceph_crypto_shutdown(void) { 353 unregister_key_type(&key_type_ceph); 354 } 355