xref: /openbmc/linux/net/ceph/crypto.c (revision 174cd4b1)
1 
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/err.h>
5 #include <linux/scatterlist.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <crypto/aes.h>
9 #include <crypto/skcipher.h>
10 #include <linux/key-type.h>
11 
12 #include <keys/ceph-type.h>
13 #include <keys/user-type.h>
14 #include <linux/ceph/decode.h>
15 #include "crypto.h"
16 
17 /*
18  * Set ->key and ->tfm.  The rest of the key should be filled in before
19  * this function is called.
20  */
21 static int set_secret(struct ceph_crypto_key *key, void *buf)
22 {
23 	unsigned int noio_flag;
24 	int ret;
25 
26 	key->key = NULL;
27 	key->tfm = NULL;
28 
29 	switch (key->type) {
30 	case CEPH_CRYPTO_NONE:
31 		return 0; /* nothing to do */
32 	case CEPH_CRYPTO_AES:
33 		break;
34 	default:
35 		return -ENOTSUPP;
36 	}
37 
38 	WARN_ON(!key->len);
39 	key->key = kmemdup(buf, key->len, GFP_NOIO);
40 	if (!key->key) {
41 		ret = -ENOMEM;
42 		goto fail;
43 	}
44 
45 	/* crypto_alloc_skcipher() allocates with GFP_KERNEL */
46 	noio_flag = memalloc_noio_save();
47 	key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
48 	memalloc_noio_restore(noio_flag);
49 	if (IS_ERR(key->tfm)) {
50 		ret = PTR_ERR(key->tfm);
51 		key->tfm = NULL;
52 		goto fail;
53 	}
54 
55 	ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
56 	if (ret)
57 		goto fail;
58 
59 	return 0;
60 
61 fail:
62 	ceph_crypto_key_destroy(key);
63 	return ret;
64 }
65 
66 int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
67 			  const struct ceph_crypto_key *src)
68 {
69 	memcpy(dst, src, sizeof(struct ceph_crypto_key));
70 	return set_secret(dst, src->key);
71 }
72 
73 int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
74 {
75 	if (*p + sizeof(u16) + sizeof(key->created) +
76 	    sizeof(u16) + key->len > end)
77 		return -ERANGE;
78 	ceph_encode_16(p, key->type);
79 	ceph_encode_copy(p, &key->created, sizeof(key->created));
80 	ceph_encode_16(p, key->len);
81 	ceph_encode_copy(p, key->key, key->len);
82 	return 0;
83 }
84 
85 int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
86 {
87 	int ret;
88 
89 	ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
90 	key->type = ceph_decode_16(p);
91 	ceph_decode_copy(p, &key->created, sizeof(key->created));
92 	key->len = ceph_decode_16(p);
93 	ceph_decode_need(p, end, key->len, bad);
94 	ret = set_secret(key, *p);
95 	*p += key->len;
96 	return ret;
97 
98 bad:
99 	dout("failed to decode crypto key\n");
100 	return -EINVAL;
101 }
102 
103 int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
104 {
105 	int inlen = strlen(inkey);
106 	int blen = inlen * 3 / 4;
107 	void *buf, *p;
108 	int ret;
109 
110 	dout("crypto_key_unarmor %s\n", inkey);
111 	buf = kmalloc(blen, GFP_NOFS);
112 	if (!buf)
113 		return -ENOMEM;
114 	blen = ceph_unarmor(buf, inkey, inkey+inlen);
115 	if (blen < 0) {
116 		kfree(buf);
117 		return blen;
118 	}
119 
120 	p = buf;
121 	ret = ceph_crypto_key_decode(key, &p, p + blen);
122 	kfree(buf);
123 	if (ret)
124 		return ret;
125 	dout("crypto_key_unarmor key %p type %d len %d\n", key,
126 	     key->type, key->len);
127 	return 0;
128 }
129 
130 void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
131 {
132 	if (key) {
133 		kfree(key->key);
134 		key->key = NULL;
135 		crypto_free_skcipher(key->tfm);
136 		key->tfm = NULL;
137 	}
138 }
139 
140 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
141 
142 /*
143  * Should be used for buffers allocated with ceph_kvmalloc().
144  * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
145  * in-buffer (msg front).
146  *
147  * Dispose of @sgt with teardown_sgtable().
148  *
149  * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
150  * in cases where a single sg is sufficient.  No attempt to reduce the
151  * number of sgs by squeezing physically contiguous pages together is
152  * made though, for simplicity.
153  */
154 static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
155 			 const void *buf, unsigned int buf_len)
156 {
157 	struct scatterlist *sg;
158 	const bool is_vmalloc = is_vmalloc_addr(buf);
159 	unsigned int off = offset_in_page(buf);
160 	unsigned int chunk_cnt = 1;
161 	unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
162 	int i;
163 	int ret;
164 
165 	if (buf_len == 0) {
166 		memset(sgt, 0, sizeof(*sgt));
167 		return -EINVAL;
168 	}
169 
170 	if (is_vmalloc) {
171 		chunk_cnt = chunk_len >> PAGE_SHIFT;
172 		chunk_len = PAGE_SIZE;
173 	}
174 
175 	if (chunk_cnt > 1) {
176 		ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
177 		if (ret)
178 			return ret;
179 	} else {
180 		WARN_ON(chunk_cnt != 1);
181 		sg_init_table(prealloc_sg, 1);
182 		sgt->sgl = prealloc_sg;
183 		sgt->nents = sgt->orig_nents = 1;
184 	}
185 
186 	for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
187 		struct page *page;
188 		unsigned int len = min(chunk_len - off, buf_len);
189 
190 		if (is_vmalloc)
191 			page = vmalloc_to_page(buf);
192 		else
193 			page = virt_to_page(buf);
194 
195 		sg_set_page(sg, page, len, off);
196 
197 		off = 0;
198 		buf += len;
199 		buf_len -= len;
200 	}
201 	WARN_ON(buf_len != 0);
202 
203 	return 0;
204 }
205 
206 static void teardown_sgtable(struct sg_table *sgt)
207 {
208 	if (sgt->orig_nents > 1)
209 		sg_free_table(sgt);
210 }
211 
212 static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
213 			  void *buf, int buf_len, int in_len, int *pout_len)
214 {
215 	SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
216 	struct sg_table sgt;
217 	struct scatterlist prealloc_sg;
218 	char iv[AES_BLOCK_SIZE] __aligned(8);
219 	int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
220 	int crypt_len = encrypt ? in_len + pad_byte : in_len;
221 	int ret;
222 
223 	WARN_ON(crypt_len > buf_len);
224 	if (encrypt)
225 		memset(buf + in_len, pad_byte, pad_byte);
226 	ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
227 	if (ret)
228 		return ret;
229 
230 	memcpy(iv, aes_iv, AES_BLOCK_SIZE);
231 	skcipher_request_set_tfm(req, key->tfm);
232 	skcipher_request_set_callback(req, 0, NULL, NULL);
233 	skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
234 
235 	/*
236 	print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
237 		       key->key, key->len, 1);
238 	print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
239 		       buf, crypt_len, 1);
240 	*/
241 	if (encrypt)
242 		ret = crypto_skcipher_encrypt(req);
243 	else
244 		ret = crypto_skcipher_decrypt(req);
245 	skcipher_request_zero(req);
246 	if (ret) {
247 		pr_err("%s %scrypt failed: %d\n", __func__,
248 		       encrypt ? "en" : "de", ret);
249 		goto out_sgt;
250 	}
251 	/*
252 	print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
253 		       buf, crypt_len, 1);
254 	*/
255 
256 	if (encrypt) {
257 		*pout_len = crypt_len;
258 	} else {
259 		pad_byte = *(char *)(buf + in_len - 1);
260 		if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
261 		    in_len >= pad_byte) {
262 			*pout_len = in_len - pad_byte;
263 		} else {
264 			pr_err("%s got bad padding %d on in_len %d\n",
265 			       __func__, pad_byte, in_len);
266 			ret = -EPERM;
267 			goto out_sgt;
268 		}
269 	}
270 
271 out_sgt:
272 	teardown_sgtable(&sgt);
273 	return ret;
274 }
275 
276 int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
277 	       void *buf, int buf_len, int in_len, int *pout_len)
278 {
279 	switch (key->type) {
280 	case CEPH_CRYPTO_NONE:
281 		*pout_len = in_len;
282 		return 0;
283 	case CEPH_CRYPTO_AES:
284 		return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
285 				      pout_len);
286 	default:
287 		return -ENOTSUPP;
288 	}
289 }
290 
291 static int ceph_key_preparse(struct key_preparsed_payload *prep)
292 {
293 	struct ceph_crypto_key *ckey;
294 	size_t datalen = prep->datalen;
295 	int ret;
296 	void *p;
297 
298 	ret = -EINVAL;
299 	if (datalen <= 0 || datalen > 32767 || !prep->data)
300 		goto err;
301 
302 	ret = -ENOMEM;
303 	ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
304 	if (!ckey)
305 		goto err;
306 
307 	/* TODO ceph_crypto_key_decode should really take const input */
308 	p = (void *)prep->data;
309 	ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
310 	if (ret < 0)
311 		goto err_ckey;
312 
313 	prep->payload.data[0] = ckey;
314 	prep->quotalen = datalen;
315 	return 0;
316 
317 err_ckey:
318 	kfree(ckey);
319 err:
320 	return ret;
321 }
322 
323 static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
324 {
325 	struct ceph_crypto_key *ckey = prep->payload.data[0];
326 	ceph_crypto_key_destroy(ckey);
327 	kfree(ckey);
328 }
329 
330 static void ceph_key_destroy(struct key *key)
331 {
332 	struct ceph_crypto_key *ckey = key->payload.data[0];
333 
334 	ceph_crypto_key_destroy(ckey);
335 	kfree(ckey);
336 }
337 
338 struct key_type key_type_ceph = {
339 	.name		= "ceph",
340 	.preparse	= ceph_key_preparse,
341 	.free_preparse	= ceph_key_free_preparse,
342 	.instantiate	= generic_key_instantiate,
343 	.destroy	= ceph_key_destroy,
344 };
345 
346 int ceph_crypto_init(void) {
347 	return register_key_type(&key_type_ceph);
348 }
349 
350 void ceph_crypto_shutdown(void) {
351 	unregister_key_type(&key_type_ceph);
352 }
353