xref: /openbmc/linux/net/ceph/crypto.c (revision 8730046c)
1 
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/err.h>
5 #include <linux/scatterlist.h>
6 #include <linux/slab.h>
7 #include <crypto/aes.h>
8 #include <crypto/skcipher.h>
9 #include <linux/key-type.h>
10 
11 #include <keys/ceph-type.h>
12 #include <keys/user-type.h>
13 #include <linux/ceph/decode.h>
14 #include "crypto.h"
15 
16 /*
17  * Set ->key and ->tfm.  The rest of the key should be filled in before
18  * this function is called.
19  */
20 static int set_secret(struct ceph_crypto_key *key, void *buf)
21 {
22 	unsigned int noio_flag;
23 	int ret;
24 
25 	key->key = NULL;
26 	key->tfm = NULL;
27 
28 	switch (key->type) {
29 	case CEPH_CRYPTO_NONE:
30 		return 0; /* nothing to do */
31 	case CEPH_CRYPTO_AES:
32 		break;
33 	default:
34 		return -ENOTSUPP;
35 	}
36 
37 	WARN_ON(!key->len);
38 	key->key = kmemdup(buf, key->len, GFP_NOIO);
39 	if (!key->key) {
40 		ret = -ENOMEM;
41 		goto fail;
42 	}
43 
44 	/* crypto_alloc_skcipher() allocates with GFP_KERNEL */
45 	noio_flag = memalloc_noio_save();
46 	key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
47 	memalloc_noio_restore(noio_flag);
48 	if (IS_ERR(key->tfm)) {
49 		ret = PTR_ERR(key->tfm);
50 		key->tfm = NULL;
51 		goto fail;
52 	}
53 
54 	ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
55 	if (ret)
56 		goto fail;
57 
58 	return 0;
59 
60 fail:
61 	ceph_crypto_key_destroy(key);
62 	return ret;
63 }
64 
65 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
66 			  const struct ceph_crypto_key *src)
67 {
68 	memcpy(dst, src, sizeof(struct ceph_crypto_key));
69 	return set_secret(dst, src->key);
70 }
71 
72 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
73 {
74 	if (*p + sizeof(u16) + sizeof(key->created) +
75 	    sizeof(u16) + key->len > end)
76 		return -ERANGE;
77 	ceph_encode_16(p, key->type);
78 	ceph_encode_copy(p, &key->created, sizeof(key->created));
79 	ceph_encode_16(p, key->len);
80 	ceph_encode_copy(p, key->key, key->len);
81 	return 0;
82 }
83 
84 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
85 {
86 	int ret;
87 
88 	ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
89 	key->type = ceph_decode_16(p);
90 	ceph_decode_copy(p, &key->created, sizeof(key->created));
91 	key->len = ceph_decode_16(p);
92 	ceph_decode_need(p, end, key->len, bad);
93 	ret = set_secret(key, *p);
94 	*p += key->len;
95 	return ret;
96 
97 bad:
98 	dout("failed to decode crypto key\n");
99 	return -EINVAL;
100 }
101 
102 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
103 {
104 	int inlen = strlen(inkey);
105 	int blen = inlen * 3 / 4;
106 	void *buf, *p;
107 	int ret;
108 
109 	dout("crypto_key_unarmor %s\n", inkey);
110 	buf = kmalloc(blen, GFP_NOFS);
111 	if (!buf)
112 		return -ENOMEM;
113 	blen = ceph_unarmor(buf, inkey, inkey+inlen);
114 	if (blen < 0) {
115 		kfree(buf);
116 		return blen;
117 	}
118 
119 	p = buf;
120 	ret = ceph_crypto_key_decode(key, &p, p + blen);
121 	kfree(buf);
122 	if (ret)
123 		return ret;
124 	dout("crypto_key_unarmor key %p type %d len %d\n", key,
125 	     key->type, key->len);
126 	return 0;
127 }
128 
129 void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
130 {
131 	if (key) {
132 		kfree(key->key);
133 		key->key = NULL;
134 		crypto_free_skcipher(key->tfm);
135 		key->tfm = NULL;
136 	}
137 }
138 
139 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
140 
141 /*
142  * Should be used for buffers allocated with ceph_kvmalloc().
143  * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
144  * in-buffer (msg front).
145  *
146  * Dispose of @sgt with teardown_sgtable().
147  *
148  * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
149  * in cases where a single sg is sufficient.  No attempt to reduce the
150  * number of sgs by squeezing physically contiguous pages together is
151  * made though, for simplicity.
152  */
153 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
154 			 const void *buf, unsigned int buf_len)
155 {
156 	struct scatterlist *sg;
157 	const bool is_vmalloc = is_vmalloc_addr(buf);
158 	unsigned int off = offset_in_page(buf);
159 	unsigned int chunk_cnt = 1;
160 	unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
161 	int i;
162 	int ret;
163 
164 	if (buf_len == 0) {
165 		memset(sgt, 0, sizeof(*sgt));
166 		return -EINVAL;
167 	}
168 
169 	if (is_vmalloc) {
170 		chunk_cnt = chunk_len >> PAGE_SHIFT;
171 		chunk_len = PAGE_SIZE;
172 	}
173 
174 	if (chunk_cnt > 1) {
175 		ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
176 		if (ret)
177 			return ret;
178 	} else {
179 		WARN_ON(chunk_cnt != 1);
180 		sg_init_table(prealloc_sg, 1);
181 		sgt->sgl = prealloc_sg;
182 		sgt->nents = sgt->orig_nents = 1;
183 	}
184 
185 	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
186 		struct page *page;
187 		unsigned int len = min(chunk_len - off, buf_len);
188 
189 		if (is_vmalloc)
190 			page = vmalloc_to_page(buf);
191 		else
192 			page = virt_to_page(buf);
193 
194 		sg_set_page(sg, page, len, off);
195 
196 		off = 0;
197 		buf += len;
198 		buf_len -= len;
199 	}
200 	WARN_ON(buf_len != 0);
201 
202 	return 0;
203 }
204 
205 static void teardown_sgtable(struct sg_table *sgt)
206 {
207 	if (sgt->orig_nents > 1)
208 		sg_free_table(sgt);
209 }
210 
211 static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
212 			  void *buf, int buf_len, int in_len, int *pout_len)
213 {
214 	SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
215 	struct sg_table sgt;
216 	struct scatterlist prealloc_sg;
217 	char iv[AES_BLOCK_SIZE];
218 	int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
219 	int crypt_len = encrypt ? in_len + pad_byte : in_len;
220 	int ret;
221 
222 	WARN_ON(crypt_len > buf_len);
223 	if (encrypt)
224 		memset(buf + in_len, pad_byte, pad_byte);
225 	ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
226 	if (ret)
227 		return ret;
228 
229 	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
230 	skcipher_request_set_tfm(req, key->tfm);
231 	skcipher_request_set_callback(req, 0, NULL, NULL);
232 	skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
233 
234 	/*
235 	print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
236 		       key->key, key->len, 1);
237 	print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
238 		       buf, crypt_len, 1);
239 	*/
240 	if (encrypt)
241 		ret = crypto_skcipher_encrypt(req);
242 	else
243 		ret = crypto_skcipher_decrypt(req);
244 	skcipher_request_zero(req);
245 	if (ret) {
246 		pr_err("%s %scrypt failed: %d\n", __func__,
247 		       encrypt ? "en" : "de", ret);
248 		goto out_sgt;
249 	}
250 	/*
251 	print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
252 		       buf, crypt_len, 1);
253 	*/
254 
255 	if (encrypt) {
256 		*pout_len = crypt_len;
257 	} else {
258 		pad_byte = *(char *)(buf + in_len - 1);
259 		if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
260 		    in_len >= pad_byte) {
261 			*pout_len = in_len - pad_byte;
262 		} else {
263 			pr_err("%s got bad padding %d on in_len %d\n",
264 			       __func__, pad_byte, in_len);
265 			ret = -EPERM;
266 			goto out_sgt;
267 		}
268 	}
269 
270 out_sgt:
271 	teardown_sgtable(&sgt);
272 	return ret;
273 }
274 
275 int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
276 	       void *buf, int buf_len, int in_len, int *pout_len)
277 {
278 	switch (key->type) {
279 	case CEPH_CRYPTO_NONE:
280 		*pout_len = in_len;
281 		return 0;
282 	case CEPH_CRYPTO_AES:
283 		return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
284 				      pout_len);
285 	default:
286 		return -ENOTSUPP;
287 	}
288 }
289 
290 static int ceph_key_preparse(struct key_preparsed_payload *prep)
291 {
292 	struct ceph_crypto_key *ckey;
293 	size_t datalen = prep->datalen;
294 	int ret;
295 	void *p;
296 
297 	ret = -EINVAL;
298 	if (datalen <= 0 || datalen > 32767 || !prep->data)
299 		goto err;
300 
301 	ret = -ENOMEM;
302 	ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
303 	if (!ckey)
304 		goto err;
305 
306 	/* TODO ceph_crypto_key_decode should really take const input */
307 	p = (void *)prep->data;
308 	ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
309 	if (ret < 0)
310 		goto err_ckey;
311 
312 	prep->payload.data[0] = ckey;
313 	prep->quotalen = datalen;
314 	return 0;
315 
316 err_ckey:
317 	kfree(ckey);
318 err:
319 	return ret;
320 }
321 
322 static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
323 {
324 	struct ceph_crypto_key *ckey = prep->payload.data[0];
325 	ceph_crypto_key_destroy(ckey);
326 	kfree(ckey);
327 }
328 
329 static void ceph_key_destroy(struct key *key)
330 {
331 	struct ceph_crypto_key *ckey = key->payload.data[0];
332 
333 	ceph_crypto_key_destroy(ckey);
334 	kfree(ckey);
335 }
336 
337 struct key_type key_type_ceph = {
338 	.name		= "ceph",
339 	.preparse	= ceph_key_preparse,
340 	.free_preparse	= ceph_key_free_preparse,
341 	.instantiate	= generic_key_instantiate,
342 	.destroy	= ceph_key_destroy,
343 };
344 
345 int ceph_crypto_init(void) {
346 	return register_key_type(&key_type_ceph);
347 }
348 
349 void ceph_crypto_shutdown(void) {
350 	unregister_key_type(&key_type_ceph);
351 }
352