1 /* 2 * linux/net/sunrpc/gss_krb5_crypto.c 3 * 4 * Copyright (c) 2000 The Regents of the University of Michigan. 5 * All rights reserved. 6 * 7 * Andy Adamson <andros@umich.edu> 8 * Bruce Fields <bfields@umich.edu> 9 */ 10 11 /* 12 * Copyright (C) 1998 by the FundsXpress, INC. 13 * 14 * All rights reserved. 15 * 16 * Export of this software from the United States of America may require 17 * a specific license from the United States Government. It is the 18 * responsibility of any person or organization contemplating export to 19 * obtain such a license before exporting. 20 * 21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and 22 * distribute this software and its documentation for any purpose and 23 * without fee is hereby granted, provided that the above copyright 24 * notice appear in all copies and that both that copyright notice and 25 * this permission notice appear in supporting documentation, and that 26 * the name of FundsXpress. not be used in advertising or publicity pertaining 27 * to distribution of the software without specific, written prior 28 * permission. FundsXpress makes no representations about the suitability of 29 * this software for any purpose. It is provided "as is" without express 30 * or implied warranty. 31 * 32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 35 */ 36 37 #include <linux/err.h> 38 #include <linux/types.h> 39 #include <linux/mm.h> 40 #include <linux/slab.h> 41 #include <linux/scatterlist.h> 42 #include <linux/crypto.h> 43 #include <linux/highmem.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/gss_krb5.h> 46 #include <linux/sunrpc/xdr.h> 47 48 #ifdef RPC_DEBUG 49 # define RPCDBG_FACILITY RPCDBG_AUTH 50 #endif 51 52 u32 53 krb5_encrypt( 54 struct crypto_blkcipher *tfm, 55 void * iv, 56 void * in, 57 void * out, 58 int length) 59 { 60 u32 ret = -EINVAL; 61 struct scatterlist sg[1]; 62 u8 local_iv[16] = {0}; 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 64 65 if (length % crypto_blkcipher_blocksize(tfm) != 0) 66 goto out; 67 68 if (crypto_blkcipher_ivsize(tfm) > 16) { 69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 70 crypto_blkcipher_ivsize(tfm)); 71 goto out; 72 } 73 74 if (iv) 75 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm)); 76 77 memcpy(out, in, length); 78 sg_init_one(sg, out, length); 79 80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); 81 out: 82 dprintk("RPC: krb5_encrypt returns %d\n", ret); 83 return ret; 84 } 85 86 EXPORT_SYMBOL(krb5_encrypt); 87 88 u32 89 krb5_decrypt( 90 struct crypto_blkcipher *tfm, 91 void * iv, 92 void * in, 93 void * out, 94 int length) 95 { 96 u32 ret = -EINVAL; 97 struct scatterlist sg[1]; 98 u8 local_iv[16] = {0}; 99 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 100 101 if (length % crypto_blkcipher_blocksize(tfm) != 0) 102 goto out; 103 104 if (crypto_blkcipher_ivsize(tfm) > 16) { 105 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 106 crypto_blkcipher_ivsize(tfm)); 107 goto out; 108 } 109 if (iv) 110 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm)); 111 112 memcpy(out, in, length); 113 sg_init_one(sg, out, length); 114 115 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); 116 out: 117 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 118 return ret; 119 } 120 121 EXPORT_SYMBOL(krb5_decrypt); 122 123 static int 124 checksummer(struct scatterlist *sg, void *data) 125 { 126 struct hash_desc *desc = data; 127 128 return crypto_hash_update(desc, sg, sg->length); 129 } 130 131 /* checksum the plaintext data and hdrlen bytes of the token header */ 132 s32 133 make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, 134 int body_offset, struct xdr_netobj *cksum) 135 { 136 struct hash_desc desc; /* XXX add to ctx? */ 137 struct scatterlist sg[1]; 138 int err; 139 140 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); 141 if (IS_ERR(desc.tfm)) 142 return GSS_S_FAILURE; 143 cksum->len = crypto_hash_digestsize(desc.tfm); 144 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; 145 146 err = crypto_hash_init(&desc); 147 if (err) 148 goto out; 149 sg_init_one(sg, header, hdrlen); 150 err = crypto_hash_update(&desc, sg, hdrlen); 151 if (err) 152 goto out; 153 err = xdr_process_buf(body, body_offset, body->len - body_offset, 154 checksummer, &desc); 155 if (err) 156 goto out; 157 err = crypto_hash_final(&desc, cksum->data); 158 159 out: 160 crypto_free_hash(desc.tfm); 161 return err ? GSS_S_FAILURE : 0; 162 } 163 164 EXPORT_SYMBOL(make_checksum); 165 166 struct encryptor_desc { 167 u8 iv[8]; /* XXX hard-coded blocksize */ 168 struct blkcipher_desc desc; 169 int pos; 170 struct xdr_buf *outbuf; 171 struct page **pages; 172 struct scatterlist infrags[4]; 173 struct scatterlist outfrags[4]; 174 int fragno; 175 int fraglen; 176 }; 177 178 static int 179 encryptor(struct scatterlist *sg, void *data) 180 { 181 struct encryptor_desc *desc = data; 182 struct xdr_buf *outbuf = desc->outbuf; 183 struct page *in_page; 184 int thislen = desc->fraglen + sg->length; 185 int fraglen, ret; 186 int page_pos; 187 188 /* Worst case is 4 fragments: head, end of page 1, start 189 * of page 2, tail. Anything more is a bug. */ 190 BUG_ON(desc->fragno > 3); 191 192 page_pos = desc->pos - outbuf->head[0].iov_len; 193 if (page_pos >= 0 && page_pos < outbuf->page_len) { 194 /* pages are not in place: */ 195 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; 196 in_page = desc->pages[i]; 197 } else { 198 in_page = sg_page(sg); 199 } 200 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, 201 sg->offset); 202 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, 203 sg->offset); 204 desc->fragno++; 205 desc->fraglen += sg->length; 206 desc->pos += sg->length; 207 208 fraglen = thislen & 7; /* XXX hardcoded blocksize */ 209 thislen -= fraglen; 210 211 if (thislen == 0) 212 return 0; 213 214 sg_mark_end(&desc->infrags[desc->fragno - 1]); 215 sg_mark_end(&desc->outfrags[desc->fragno - 1]); 216 217 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, 218 desc->infrags, thislen); 219 if (ret) 220 return ret; 221 222 sg_init_table(desc->infrags, 4); 223 sg_init_table(desc->outfrags, 4); 224 225 if (fraglen) { 226 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, 227 sg->offset + sg->length - fraglen); 228 desc->infrags[0] = desc->outfrags[0]; 229 sg_assign_page(&desc->infrags[0], in_page); 230 desc->fragno = 1; 231 desc->fraglen = fraglen; 232 } else { 233 desc->fragno = 0; 234 desc->fraglen = 0; 235 } 236 return 0; 237 } 238 239 int 240 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, 241 int offset, struct page **pages) 242 { 243 int ret; 244 struct encryptor_desc desc; 245 246 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); 247 248 memset(desc.iv, 0, sizeof(desc.iv)); 249 desc.desc.tfm = tfm; 250 desc.desc.info = desc.iv; 251 desc.desc.flags = 0; 252 desc.pos = offset; 253 desc.outbuf = buf; 254 desc.pages = pages; 255 desc.fragno = 0; 256 desc.fraglen = 0; 257 258 sg_init_table(desc.infrags, 4); 259 sg_init_table(desc.outfrags, 4); 260 261 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); 262 return ret; 263 } 264 265 EXPORT_SYMBOL(gss_encrypt_xdr_buf); 266 267 struct decryptor_desc { 268 u8 iv[8]; /* XXX hard-coded blocksize */ 269 struct blkcipher_desc desc; 270 struct scatterlist frags[4]; 271 int fragno; 272 int fraglen; 273 }; 274 275 static int 276 decryptor(struct scatterlist *sg, void *data) 277 { 278 struct decryptor_desc *desc = data; 279 int thislen = desc->fraglen + sg->length; 280 int fraglen, ret; 281 282 /* Worst case is 4 fragments: head, end of page 1, start 283 * of page 2, tail. Anything more is a bug. */ 284 BUG_ON(desc->fragno > 3); 285 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, 286 sg->offset); 287 desc->fragno++; 288 desc->fraglen += sg->length; 289 290 fraglen = thislen & 7; /* XXX hardcoded blocksize */ 291 thislen -= fraglen; 292 293 if (thislen == 0) 294 return 0; 295 296 sg_mark_end(&desc->frags[desc->fragno - 1]); 297 298 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, 299 desc->frags, thislen); 300 if (ret) 301 return ret; 302 303 sg_init_table(desc->frags, 4); 304 305 if (fraglen) { 306 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, 307 sg->offset + sg->length - fraglen); 308 desc->fragno = 1; 309 desc->fraglen = fraglen; 310 } else { 311 desc->fragno = 0; 312 desc->fraglen = 0; 313 } 314 return 0; 315 } 316 317 int 318 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, 319 int offset) 320 { 321 struct decryptor_desc desc; 322 323 /* XXXJBF: */ 324 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0); 325 326 memset(desc.iv, 0, sizeof(desc.iv)); 327 desc.desc.tfm = tfm; 328 desc.desc.info = desc.iv; 329 desc.desc.flags = 0; 330 desc.fragno = 0; 331 desc.fraglen = 0; 332 333 sg_init_table(desc.frags, 4); 334 335 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); 336 } 337 338 EXPORT_SYMBOL(gss_decrypt_xdr_buf); 339