1 /* 2 * Synchronous Compression operations 3 * 4 * Copyright 2015 LG Electronics Inc. 5 * Copyright (c) 2016, Intel Corporation 6 * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the Free 10 * Software Foundation; either version 2 of the License, or (at your option) 11 * any later version. 12 * 13 */ 14 #include <linux/errno.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/seq_file.h> 18 #include <linux/slab.h> 19 #include <linux/string.h> 20 #include <linux/crypto.h> 21 #include <linux/compiler.h> 22 #include <linux/vmalloc.h> 23 #include <crypto/algapi.h> 24 #include <linux/cryptouser.h> 25 #include <net/netlink.h> 26 #include <linux/scatterlist.h> 27 #include <crypto/scatterwalk.h> 28 #include <crypto/internal/acompress.h> 29 #include <crypto/internal/scompress.h> 30 #include "internal.h" 31 32 static const struct crypto_type crypto_scomp_type; 33 static void * __percpu *scomp_src_scratches; 34 static void * __percpu *scomp_dst_scratches; 35 static int scomp_scratch_users; 36 static DEFINE_MUTEX(scomp_lock); 37 38 #ifdef CONFIG_NET 39 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) 40 { 41 struct crypto_report_comp rscomp; 42 43 strncpy(rscomp.type, "scomp", sizeof(rscomp.type)); 44 45 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, 46 sizeof(struct crypto_report_comp), &rscomp)) 47 goto nla_put_failure; 48 return 0; 49 50 nla_put_failure: 51 return -EMSGSIZE; 52 } 53 #else 54 static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg) 55 { 56 return -ENOSYS; 57 } 58 #endif 59 60 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 61 __maybe_unused; 62 63 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) 64 { 65 seq_puts(m, "type : scomp\n"); 66 } 67 68 static void crypto_scomp_free_scratches(void * __percpu *scratches) 69 { 70 int i; 71 72 if (!scratches) 73 return; 74 75 for_each_possible_cpu(i) 76 vfree(*per_cpu_ptr(scratches, i)); 77 78 free_percpu(scratches); 79 } 80 81 static void * __percpu *crypto_scomp_alloc_scratches(void) 82 { 83 void * __percpu *scratches; 84 int i; 85 86 scratches = alloc_percpu(void *); 87 if (!scratches) 88 return NULL; 89 90 for_each_possible_cpu(i) { 91 void *scratch; 92 93 scratch = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i)); 94 if (!scratch) 95 goto error; 96 *per_cpu_ptr(scratches, i) = scratch; 97 } 98 99 return scratches; 100 101 error: 102 crypto_scomp_free_scratches(scratches); 103 return NULL; 104 } 105 106 static void crypto_scomp_free_all_scratches(void) 107 { 108 if (!--scomp_scratch_users) { 109 crypto_scomp_free_scratches(scomp_src_scratches); 110 crypto_scomp_free_scratches(scomp_dst_scratches); 111 scomp_src_scratches = NULL; 112 scomp_dst_scratches = NULL; 113 } 114 } 115 116 static int crypto_scomp_alloc_all_scratches(void) 117 { 118 if (!scomp_scratch_users++) { 119 scomp_src_scratches = crypto_scomp_alloc_scratches(); 120 if (!scomp_src_scratches) 121 return -ENOMEM; 122 scomp_dst_scratches = crypto_scomp_alloc_scratches(); 123 if (!scomp_dst_scratches) { 124 crypto_scomp_free_scratches(scomp_src_scratches); 125 scomp_src_scratches = NULL; 126 return -ENOMEM; 127 } 128 } 129 return 0; 130 } 131 132 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm) 133 { 134 int ret; 135 136 mutex_lock(&scomp_lock); 137 ret = crypto_scomp_alloc_all_scratches(); 138 mutex_unlock(&scomp_lock); 139 140 return ret; 141 } 142 143 static void crypto_scomp_sg_free(struct scatterlist *sgl) 144 { 145 int i, n; 146 struct page *page; 147 148 if (!sgl) 149 return; 150 151 n = sg_nents(sgl); 152 for_each_sg(sgl, sgl, n, i) { 153 page = sg_page(sgl); 154 if (page) 155 __free_page(page); 156 } 157 158 kfree(sgl); 159 } 160 161 static struct scatterlist *crypto_scomp_sg_alloc(size_t size, gfp_t gfp) 162 { 163 struct scatterlist *sgl; 164 struct page *page; 165 int i, n; 166 167 n = ((size - 1) >> PAGE_SHIFT) + 1; 168 169 sgl = kmalloc_array(n, sizeof(struct scatterlist), gfp); 170 if (!sgl) 171 return NULL; 172 173 sg_init_table(sgl, n); 174 175 for (i = 0; i < n; i++) { 176 page = alloc_page(gfp); 177 if (!page) 178 goto err; 179 sg_set_page(sgl + i, page, PAGE_SIZE, 0); 180 } 181 182 return sgl; 183 184 err: 185 sg_mark_end(sgl + i); 186 crypto_scomp_sg_free(sgl); 187 return NULL; 188 } 189 190 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir) 191 { 192 struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); 193 void **tfm_ctx = acomp_tfm_ctx(tfm); 194 struct crypto_scomp *scomp = *tfm_ctx; 195 void **ctx = acomp_request_ctx(req); 196 const int cpu = get_cpu(); 197 u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu); 198 u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu); 199 int ret; 200 201 if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE) { 202 ret = -EINVAL; 203 goto out; 204 } 205 206 if (req->dst && !req->dlen) { 207 ret = -EINVAL; 208 goto out; 209 } 210 211 if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE) 212 req->dlen = SCOMP_SCRATCH_SIZE; 213 214 scatterwalk_map_and_copy(scratch_src, req->src, 0, req->slen, 0); 215 if (dir) 216 ret = crypto_scomp_compress(scomp, scratch_src, req->slen, 217 scratch_dst, &req->dlen, *ctx); 218 else 219 ret = crypto_scomp_decompress(scomp, scratch_src, req->slen, 220 scratch_dst, &req->dlen, *ctx); 221 if (!ret) { 222 if (!req->dst) { 223 req->dst = crypto_scomp_sg_alloc(req->dlen, GFP_ATOMIC); 224 if (!req->dst) 225 goto out; 226 } 227 scatterwalk_map_and_copy(scratch_dst, req->dst, 0, req->dlen, 228 1); 229 } 230 out: 231 put_cpu(); 232 return ret; 233 } 234 235 static int scomp_acomp_compress(struct acomp_req *req) 236 { 237 return scomp_acomp_comp_decomp(req, 1); 238 } 239 240 static int scomp_acomp_decompress(struct acomp_req *req) 241 { 242 return scomp_acomp_comp_decomp(req, 0); 243 } 244 245 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) 246 { 247 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 248 249 crypto_free_scomp(*ctx); 250 251 mutex_lock(&scomp_lock); 252 crypto_scomp_free_all_scratches(); 253 mutex_unlock(&scomp_lock); 254 } 255 256 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) 257 { 258 struct crypto_alg *calg = tfm->__crt_alg; 259 struct crypto_acomp *crt = __crypto_acomp_tfm(tfm); 260 struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); 261 struct crypto_scomp *scomp; 262 263 if (!crypto_mod_get(calg)) 264 return -EAGAIN; 265 266 scomp = crypto_create_tfm(calg, &crypto_scomp_type); 267 if (IS_ERR(scomp)) { 268 crypto_mod_put(calg); 269 return PTR_ERR(scomp); 270 } 271 272 *ctx = scomp; 273 tfm->exit = crypto_exit_scomp_ops_async; 274 275 crt->compress = scomp_acomp_compress; 276 crt->decompress = scomp_acomp_decompress; 277 crt->dst_free = crypto_scomp_sg_free; 278 crt->reqsize = sizeof(void *); 279 280 return 0; 281 } 282 283 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req) 284 { 285 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); 286 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); 287 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); 288 struct crypto_scomp *scomp = *tfm_ctx; 289 void *ctx; 290 291 ctx = crypto_scomp_alloc_ctx(scomp); 292 if (IS_ERR(ctx)) { 293 kfree(req); 294 return NULL; 295 } 296 297 *req->__ctx = ctx; 298 299 return req; 300 } 301 302 void crypto_acomp_scomp_free_ctx(struct acomp_req *req) 303 { 304 struct crypto_acomp *acomp = crypto_acomp_reqtfm(req); 305 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); 306 struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm); 307 struct crypto_scomp *scomp = *tfm_ctx; 308 void *ctx = *req->__ctx; 309 310 if (ctx) 311 crypto_scomp_free_ctx(scomp, ctx); 312 } 313 314 static const struct crypto_type crypto_scomp_type = { 315 .extsize = crypto_alg_extsize, 316 .init_tfm = crypto_scomp_init_tfm, 317 #ifdef CONFIG_PROC_FS 318 .show = crypto_scomp_show, 319 #endif 320 .report = crypto_scomp_report, 321 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 322 .maskset = CRYPTO_ALG_TYPE_MASK, 323 .type = CRYPTO_ALG_TYPE_SCOMPRESS, 324 .tfmsize = offsetof(struct crypto_scomp, base), 325 }; 326 327 int crypto_register_scomp(struct scomp_alg *alg) 328 { 329 struct crypto_alg *base = &alg->base; 330 331 base->cra_type = &crypto_scomp_type; 332 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 333 base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; 334 335 return crypto_register_alg(base); 336 } 337 EXPORT_SYMBOL_GPL(crypto_register_scomp); 338 339 int crypto_unregister_scomp(struct scomp_alg *alg) 340 { 341 return crypto_unregister_alg(&alg->base); 342 } 343 EXPORT_SYMBOL_GPL(crypto_unregister_scomp); 344 345 int crypto_register_scomps(struct scomp_alg *algs, int count) 346 { 347 int i, ret; 348 349 for (i = 0; i < count; i++) { 350 ret = crypto_register_scomp(&algs[i]); 351 if (ret) 352 goto err; 353 } 354 355 return 0; 356 357 err: 358 for (--i; i >= 0; --i) 359 crypto_unregister_scomp(&algs[i]); 360 361 return ret; 362 } 363 EXPORT_SYMBOL_GPL(crypto_register_scomps); 364 365 void crypto_unregister_scomps(struct scomp_alg *algs, int count) 366 { 367 int i; 368 369 for (i = count - 1; i >= 0; --i) 370 crypto_unregister_scomp(&algs[i]); 371 } 372 EXPORT_SYMBOL_GPL(crypto_unregister_scomps); 373 374 MODULE_LICENSE("GPL"); 375 MODULE_DESCRIPTION("Synchronous compression type"); 376