1 /* 2 * COPYRIGHT (c) 2008 3 * The Regents of the University of Michigan 4 * ALL RIGHTS RESERVED 5 * 6 * Permission is granted to use, copy, create derivative works 7 * and redistribute this software and such derivative works 8 * for any purpose, so long as the name of The University of 9 * Michigan is not used in any advertising or publicity 10 * pertaining to the use of distribution of this software 11 * without specific, written prior authorization. If the 12 * above copyright notice or any other identification of the 13 * University of Michigan is included in any copy of any 14 * portion of this software, then the disclaimer below must 15 * also be included. 16 * 17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION 18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY 19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF 20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING 21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE 23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE 24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR 25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING 26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN 27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGES. 29 */ 30 31 #include <crypto/skcipher.h> 32 #include <linux/types.h> 33 #include <linux/jiffies.h> 34 #include <linux/sunrpc/gss_krb5.h> 35 #include <linux/pagemap.h> 36 37 #include "gss_krb5_internal.h" 38 39 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 40 # define RPCDBG_FACILITY RPCDBG_AUTH 41 #endif 42 43 static inline int 44 gss_krb5_padding(int blocksize, int length) 45 { 46 return blocksize - (length % blocksize); 47 } 48 49 static inline void 50 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 51 { 52 int padding = gss_krb5_padding(blocksize, buf->len - offset); 53 char *p; 54 struct kvec *iov; 55 56 if (buf->page_len || buf->tail[0].iov_len) 57 iov = &buf->tail[0]; 58 else 59 iov = &buf->head[0]; 60 p = iov->iov_base + iov->iov_len; 61 iov->iov_len += padding; 62 buf->len += padding; 63 memset(p, padding, padding); 64 } 65 66 static inline int 67 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 68 { 69 u8 *ptr; 70 u8 pad; 71 size_t len = buf->len; 72 73 if (len <= buf->head[0].iov_len) { 74 pad = *(u8 *)(buf->head[0].iov_base + len - 1); 75 if (pad > buf->head[0].iov_len) 76 return -EINVAL; 77 buf->head[0].iov_len -= pad; 78 goto out; 79 } else 80 len -= buf->head[0].iov_len; 81 if (len <= buf->page_len) { 82 unsigned int last = (buf->page_base + len - 1) 83 >>PAGE_SHIFT; 84 unsigned int offset = (buf->page_base + len - 1) 85 & (PAGE_SIZE - 1); 86 ptr = kmap_atomic(buf->pages[last]); 87 pad = *(ptr + offset); 88 kunmap_atomic(ptr); 89 goto out; 90 } else 91 len -= buf->page_len; 92 BUG_ON(len > buf->tail[0].iov_len); 93 pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 94 out: 95 /* XXX: NOTE: we do not adjust the page lengths--they represent 96 * a range of data in the real filesystem page cache, and we need 97 * to know that range so the xdr code can properly place read data. 98 * However adjusting the head length, as we do above, is harmless. 99 * In the case of a request that fits into a single page, the server 100 * also uses length and head length together to determine the original 101 * start of the request to copy the request for deferal; so it's 102 * easier on the server if we adjust head and tail length in tandem. 103 * It's not really a problem that we don't fool with the page and 104 * tail lengths, though--at worst badly formed xdr might lead the 105 * server to attempt to parse the padding. 106 * XXX: Document all these weird requirements for gss mechanism 107 * wrap/unwrap functions. */ 108 if (pad > blocksize) 109 return -EINVAL; 110 if (buf->len > pad) 111 buf->len -= pad; 112 else 113 return -EINVAL; 114 return 0; 115 } 116 117 /* Assumptions: the head and tail of inbuf are ours to play with. 118 * The pages, however, may be real pages in the page cache and we replace 119 * them with scratch pages from **pages before writing to them. */ 120 /* XXX: obviously the above should be documentation of wrap interface, 121 * and shouldn't be in this kerberos-specific file. */ 122 123 /* XXX factor out common code with seal/unseal. */ 124 125 u32 126 gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset, 127 struct xdr_buf *buf, struct page **pages) 128 { 129 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 130 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 131 .data = cksumdata}; 132 int blocksize = 0, plainlen; 133 unsigned char *ptr, *msg_start; 134 time64_t now; 135 int headlen; 136 struct page **tmp_pages; 137 u32 seq_send; 138 u8 *cksumkey; 139 u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc); 140 141 dprintk("RPC: %s\n", __func__); 142 143 now = ktime_get_real_seconds(); 144 145 blocksize = crypto_sync_skcipher_blocksize(kctx->enc); 146 gss_krb5_add_padding(buf, offset, blocksize); 147 BUG_ON((buf->len - offset) % blocksize); 148 plainlen = conflen + buf->len - offset; 149 150 headlen = g_token_size(&kctx->mech_used, 151 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - 152 (buf->len - offset); 153 154 ptr = buf->head[0].iov_base + offset; 155 /* shift data to make room for header. */ 156 xdr_extend_head(buf, offset, headlen); 157 158 /* XXX Would be cleverer to encrypt while copying. */ 159 BUG_ON((buf->len - offset - headlen) % blocksize); 160 161 g_make_token_header(&kctx->mech_used, 162 GSS_KRB5_TOK_HDR_LEN + 163 kctx->gk5e->cksumlength + plainlen, &ptr); 164 165 166 /* ptr now at header described in rfc 1964, section 1.2.1: */ 167 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 168 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 169 170 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; 171 172 /* 173 * signalg and sealalg are stored as if they were converted from LE 174 * to host endian, even though they're opaque pairs of bytes according 175 * to the RFC. 176 */ 177 *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); 178 *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); 179 ptr[6] = 0xff; 180 ptr[7] = 0xff; 181 182 krb5_make_confounder(msg_start, conflen); 183 184 if (kctx->gk5e->keyed_cksum) 185 cksumkey = kctx->cksum; 186 else 187 cksumkey = NULL; 188 189 /* XXXJBF: UGH!: */ 190 tmp_pages = buf->pages; 191 buf->pages = pages; 192 if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, 193 cksumkey, KG_USAGE_SEAL, &md5cksum)) 194 return GSS_S_FAILURE; 195 buf->pages = tmp_pages; 196 197 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 198 199 seq_send = atomic_fetch_inc(&kctx->seq_send); 200 201 /* XXX would probably be more efficient to compute checksum 202 * and encrypt at the same time: */ 203 if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, 204 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 205 return GSS_S_FAILURE; 206 207 if (gss_encrypt_xdr_buf(kctx->enc, buf, 208 offset + headlen - conflen, pages)) 209 return GSS_S_FAILURE; 210 211 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 212 } 213 214 u32 215 gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len, 216 struct xdr_buf *buf, unsigned int *slack, 217 unsigned int *align) 218 { 219 int signalg; 220 int sealalg; 221 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 222 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 223 .data = cksumdata}; 224 time64_t now; 225 int direction; 226 s32 seqnum; 227 unsigned char *ptr; 228 int bodysize; 229 void *data_start, *orig_start; 230 int data_len; 231 int blocksize; 232 u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc); 233 int crypt_offset; 234 u8 *cksumkey; 235 unsigned int saved_len = buf->len; 236 237 dprintk("RPC: gss_unwrap_kerberos\n"); 238 239 ptr = (u8 *)buf->head[0].iov_base + offset; 240 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 241 len - offset)) 242 return GSS_S_DEFECTIVE_TOKEN; 243 244 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || 245 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) 246 return GSS_S_DEFECTIVE_TOKEN; 247 248 /* XXX sanity-check bodysize?? */ 249 250 /* get the sign and seal algorithms */ 251 252 signalg = ptr[2] + (ptr[3] << 8); 253 if (signalg != kctx->gk5e->signalg) 254 return GSS_S_DEFECTIVE_TOKEN; 255 256 sealalg = ptr[4] + (ptr[5] << 8); 257 if (sealalg != kctx->gk5e->sealalg) 258 return GSS_S_DEFECTIVE_TOKEN; 259 260 if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 261 return GSS_S_DEFECTIVE_TOKEN; 262 263 /* 264 * Data starts after token header and checksum. ptr points 265 * to the beginning of the token header 266 */ 267 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - 268 (unsigned char *)buf->head[0].iov_base; 269 270 buf->len = len; 271 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) 272 return GSS_S_DEFECTIVE_TOKEN; 273 274 if (kctx->gk5e->keyed_cksum) 275 cksumkey = kctx->cksum; 276 else 277 cksumkey = NULL; 278 279 if (make_checksum(kctx, ptr, 8, buf, crypt_offset, 280 cksumkey, KG_USAGE_SEAL, &md5cksum)) 281 return GSS_S_FAILURE; 282 283 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, 284 kctx->gk5e->cksumlength)) 285 return GSS_S_BAD_SIG; 286 287 /* it got through unscathed. Make sure the context is unexpired */ 288 289 now = ktime_get_real_seconds(); 290 291 if (now > kctx->endtime) 292 return GSS_S_CONTEXT_EXPIRED; 293 294 /* do sequencing checks */ 295 296 if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, 297 ptr + 8, &direction, &seqnum)) 298 return GSS_S_BAD_SIG; 299 300 if ((kctx->initiate && direction != 0xff) || 301 (!kctx->initiate && direction != 0)) 302 return GSS_S_BAD_SIG; 303 304 /* Copy the data back to the right position. XXX: Would probably be 305 * better to copy and encrypt at the same time. */ 306 307 blocksize = crypto_sync_skcipher_blocksize(kctx->enc); 308 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 309 conflen; 310 orig_start = buf->head[0].iov_base + offset; 311 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 312 memmove(orig_start, data_start, data_len); 313 buf->head[0].iov_len -= (data_start - orig_start); 314 buf->len = len - (data_start - orig_start); 315 316 if (gss_krb5_remove_padding(buf, blocksize)) 317 return GSS_S_DEFECTIVE_TOKEN; 318 319 /* slack must include room for krb5 padding */ 320 *slack = XDR_QUADLEN(saved_len - buf->len); 321 /* The GSS blob always precedes the RPC message payload */ 322 *align = *slack; 323 return GSS_S_COMPLETE; 324 } 325 326 /* 327 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need 328 * to do more than that, we shift repeatedly. Kevin Coffman reports 329 * seeing 28 bytes as the value used by Microsoft clients and servers 330 * with AES, so this constant is chosen to allow handling 28 in one pass 331 * without using too much stack space. 332 * 333 * If that proves to a problem perhaps we could use a more clever 334 * algorithm. 335 */ 336 #define LOCAL_BUF_LEN 32u 337 338 static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift) 339 { 340 char head[LOCAL_BUF_LEN]; 341 char tmp[LOCAL_BUF_LEN]; 342 unsigned int this_len, i; 343 344 BUG_ON(shift > LOCAL_BUF_LEN); 345 346 read_bytes_from_xdr_buf(buf, 0, head, shift); 347 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) { 348 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift)); 349 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len); 350 write_bytes_to_xdr_buf(buf, i, tmp, this_len); 351 } 352 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); 353 } 354 355 static void _rotate_left(struct xdr_buf *buf, unsigned int shift) 356 { 357 int shifted = 0; 358 int this_shift; 359 360 shift %= buf->len; 361 while (shifted < shift) { 362 this_shift = min(shift - shifted, LOCAL_BUF_LEN); 363 rotate_buf_a_little(buf, this_shift); 364 shifted += this_shift; 365 } 366 } 367 368 static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift) 369 { 370 struct xdr_buf subbuf; 371 372 xdr_buf_subsegment(buf, &subbuf, base, buf->len - base); 373 _rotate_left(&subbuf, shift); 374 } 375 376 u32 377 gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset, 378 struct xdr_buf *buf, struct page **pages) 379 { 380 u8 *ptr; 381 time64_t now; 382 u8 flags = 0x00; 383 __be16 *be16ptr; 384 __be64 *be64ptr; 385 u32 err; 386 387 dprintk("RPC: %s\n", __func__); 388 389 if (kctx->gk5e->encrypt_v2 == NULL) 390 return GSS_S_FAILURE; 391 392 /* make room for gss token header */ 393 if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) 394 return GSS_S_FAILURE; 395 396 /* construct gss token header */ 397 ptr = buf->head[0].iov_base + offset; 398 *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); 399 *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); 400 401 if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) 402 flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; 403 if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) 404 flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; 405 /* We always do confidentiality in wrap tokens */ 406 flags |= KG2_TOKEN_FLAG_SEALED; 407 408 *ptr++ = flags; 409 *ptr++ = 0xff; 410 be16ptr = (__be16 *)ptr; 411 412 *be16ptr++ = 0; 413 /* "inner" token header always uses 0 for RRC */ 414 *be16ptr++ = 0; 415 416 be64ptr = (__be64 *)be16ptr; 417 *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64)); 418 419 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); 420 if (err) 421 return err; 422 423 now = ktime_get_real_seconds(); 424 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 425 } 426 427 u32 428 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len, 429 struct xdr_buf *buf, unsigned int *slack, 430 unsigned int *align) 431 { 432 time64_t now; 433 u8 *ptr; 434 u8 flags = 0x00; 435 u16 ec, rrc; 436 int err; 437 u32 headskip, tailskip; 438 u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; 439 unsigned int movelen; 440 441 442 dprintk("RPC: %s\n", __func__); 443 444 if (kctx->gk5e->decrypt_v2 == NULL) 445 return GSS_S_FAILURE; 446 447 ptr = buf->head[0].iov_base + offset; 448 449 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) 450 return GSS_S_DEFECTIVE_TOKEN; 451 452 flags = ptr[2]; 453 if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || 454 (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) 455 return GSS_S_BAD_SIG; 456 457 if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { 458 dprintk("%s: token missing expected sealed flag\n", __func__); 459 return GSS_S_DEFECTIVE_TOKEN; 460 } 461 462 if (ptr[3] != 0xff) 463 return GSS_S_DEFECTIVE_TOKEN; 464 465 ec = be16_to_cpup((__be16 *)(ptr + 4)); 466 rrc = be16_to_cpup((__be16 *)(ptr + 6)); 467 468 /* 469 * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss 470 * doesn't want it checked; see page 6 of rfc 2203. 471 */ 472 473 if (rrc != 0) 474 rotate_left(offset + 16, buf, rrc); 475 476 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf, 477 &headskip, &tailskip); 478 if (err) 479 return GSS_S_FAILURE; 480 481 /* 482 * Retrieve the decrypted gss token header and verify 483 * it against the original 484 */ 485 err = read_bytes_from_xdr_buf(buf, 486 len - GSS_KRB5_TOK_HDR_LEN - tailskip, 487 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); 488 if (err) { 489 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); 490 return GSS_S_FAILURE; 491 } 492 if (memcmp(ptr, decrypted_hdr, 6) 493 || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { 494 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); 495 return GSS_S_FAILURE; 496 } 497 498 /* do sequencing checks */ 499 500 /* it got through unscathed. Make sure the context is unexpired */ 501 now = ktime_get_real_seconds(); 502 if (now > kctx->endtime) 503 return GSS_S_CONTEXT_EXPIRED; 504 505 /* 506 * Move the head data back to the right position in xdr_buf. 507 * We ignore any "ec" data since it might be in the head or 508 * the tail, and we really don't need to deal with it. 509 * Note that buf->head[0].iov_len may indicate the available 510 * head buffer space rather than that actually occupied. 511 */ 512 movelen = min_t(unsigned int, buf->head[0].iov_len, len); 513 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; 514 BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > 515 buf->head[0].iov_len); 516 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); 517 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; 518 buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip); 519 520 /* Trim off the trailing "extra count" and checksum blob */ 521 xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip); 522 523 *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip); 524 *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip); 525 return GSS_S_COMPLETE; 526 } 527