181d4a433SKevin Coffman /* 281d4a433SKevin Coffman * COPYRIGHT (c) 2008 381d4a433SKevin Coffman * The Regents of the University of Michigan 481d4a433SKevin Coffman * ALL RIGHTS RESERVED 581d4a433SKevin Coffman * 681d4a433SKevin Coffman * Permission is granted to use, copy, create derivative works 781d4a433SKevin Coffman * and redistribute this software and such derivative works 881d4a433SKevin Coffman * for any purpose, so long as the name of The University of 981d4a433SKevin Coffman * Michigan is not used in any advertising or publicity 1081d4a433SKevin Coffman * pertaining to the use of distribution of this software 1181d4a433SKevin Coffman * without specific, written prior authorization. If the 1281d4a433SKevin Coffman * above copyright notice or any other identification of the 1381d4a433SKevin Coffman * University of Michigan is included in any copy of any 1481d4a433SKevin Coffman * portion of this software, then the disclaimer below must 1581d4a433SKevin Coffman * also be included. 1681d4a433SKevin Coffman * 1781d4a433SKevin Coffman * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION 1881d4a433SKevin Coffman * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY 1981d4a433SKevin Coffman * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF 2081d4a433SKevin Coffman * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING 2181d4a433SKevin Coffman * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF 2281d4a433SKevin Coffman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE 2381d4a433SKevin Coffman * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE 2481d4a433SKevin Coffman * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR 2581d4a433SKevin Coffman * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING 2681d4a433SKevin Coffman * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN 2781d4a433SKevin Coffman * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF 2881d4a433SKevin Coffman * SUCH DAMAGES. 2981d4a433SKevin Coffman */ 3081d4a433SKevin Coffman 3114ae162cSJ. Bruce Fields #include <linux/types.h> 3214ae162cSJ. Bruce Fields #include <linux/jiffies.h> 3314ae162cSJ. Bruce Fields #include <linux/sunrpc/gss_krb5.h> 3414ae162cSJ. Bruce Fields #include <linux/random.h> 3514ae162cSJ. Bruce Fields #include <linux/pagemap.h> 3614ae162cSJ. Bruce Fields #include <linux/crypto.h> 3714ae162cSJ. Bruce Fields 3814ae162cSJ. Bruce Fields #ifdef RPC_DEBUG 3914ae162cSJ. Bruce Fields # define RPCDBG_FACILITY RPCDBG_AUTH 4014ae162cSJ. Bruce Fields #endif 4114ae162cSJ. Bruce Fields 4214ae162cSJ. Bruce Fields static inline int 4314ae162cSJ. Bruce Fields gss_krb5_padding(int blocksize, int length) 4414ae162cSJ. Bruce Fields { 4554ec3d46SJ. Bruce Fields return blocksize - (length % blocksize); 4614ae162cSJ. Bruce Fields } 4714ae162cSJ. Bruce Fields 4814ae162cSJ. Bruce Fields static inline void 4914ae162cSJ. Bruce Fields gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 5014ae162cSJ. Bruce Fields { 5114ae162cSJ. Bruce Fields int padding = gss_krb5_padding(blocksize, buf->len - offset); 5214ae162cSJ. Bruce Fields char *p; 5314ae162cSJ. Bruce Fields struct kvec *iov; 5414ae162cSJ. Bruce Fields 5514ae162cSJ. Bruce Fields if (buf->page_len || buf->tail[0].iov_len) 5614ae162cSJ. Bruce Fields iov = &buf->tail[0]; 5714ae162cSJ. Bruce Fields else 5814ae162cSJ. Bruce Fields iov = &buf->head[0]; 5914ae162cSJ. Bruce Fields p = iov->iov_base + iov->iov_len; 6014ae162cSJ. Bruce Fields iov->iov_len += padding; 6114ae162cSJ. Bruce Fields buf->len += padding; 6214ae162cSJ. Bruce Fields memset(p, padding, padding); 6314ae162cSJ. Bruce Fields } 6414ae162cSJ. Bruce Fields 6514ae162cSJ. Bruce Fields static inline int 6614ae162cSJ. Bruce Fields gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 6714ae162cSJ. Bruce Fields { 6814ae162cSJ. Bruce Fields u8 *ptr; 6914ae162cSJ. Bruce Fields u8 pad; 7067f97d83SChuck Lever size_t len = buf->len; 7114ae162cSJ. Bruce Fields 7214ae162cSJ. Bruce Fields if (len <= buf->head[0].iov_len) { 7314ae162cSJ. Bruce Fields pad = *(u8 *)(buf->head[0].iov_base + len - 1); 7414ae162cSJ. Bruce Fields if (pad > buf->head[0].iov_len) 7514ae162cSJ. Bruce Fields return -EINVAL; 7614ae162cSJ. Bruce Fields buf->head[0].iov_len -= pad; 7714ae162cSJ. Bruce Fields goto out; 7814ae162cSJ. Bruce Fields } else 7914ae162cSJ. Bruce Fields len -= buf->head[0].iov_len; 8014ae162cSJ. Bruce Fields if (len <= buf->page_len) { 8167f97d83SChuck Lever unsigned int last = (buf->page_base + len - 1) 8214ae162cSJ. Bruce Fields >>PAGE_CACHE_SHIFT; 8367f97d83SChuck Lever unsigned int offset = (buf->page_base + len - 1) 8414ae162cSJ. Bruce Fields & (PAGE_CACHE_SIZE - 1); 8587d918d6SJ. Bruce Fields ptr = kmap_atomic(buf->pages[last], KM_USER0); 8614ae162cSJ. Bruce Fields pad = *(ptr + offset); 8787d918d6SJ. Bruce Fields kunmap_atomic(ptr, KM_USER0); 8814ae162cSJ. Bruce Fields goto out; 8914ae162cSJ. Bruce Fields } else 9014ae162cSJ. Bruce Fields len -= buf->page_len; 9114ae162cSJ. Bruce Fields BUG_ON(len > buf->tail[0].iov_len); 9214ae162cSJ. Bruce Fields pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 9314ae162cSJ. Bruce Fields out: 9414ae162cSJ. Bruce Fields /* XXX: NOTE: we do not adjust the page lengths--they represent 9514ae162cSJ. Bruce Fields * a range of data in the real filesystem page cache, and we need 9614ae162cSJ. Bruce Fields * to know that range so the xdr code can properly place read data. 9714ae162cSJ. Bruce Fields * However adjusting the head length, as we do above, is harmless. 9814ae162cSJ. Bruce Fields * In the case of a request that fits into a single page, the server 9914ae162cSJ. Bruce Fields * also uses length and head length together to determine the original 10014ae162cSJ. Bruce Fields * start of the request to copy the request for deferal; so it's 10114ae162cSJ. Bruce Fields * easier on the server if we adjust head and tail length in tandem. 10214ae162cSJ. Bruce Fields * It's not really a problem that we don't fool with the page and 10314ae162cSJ. Bruce Fields * tail lengths, though--at worst badly formed xdr might lead the 10414ae162cSJ. Bruce Fields * server to attempt to parse the padding. 10514ae162cSJ. Bruce Fields * XXX: Document all these weird requirements for gss mechanism 10614ae162cSJ. Bruce Fields * wrap/unwrap functions. */ 10714ae162cSJ. Bruce Fields if (pad > blocksize) 10814ae162cSJ. Bruce Fields return -EINVAL; 10914ae162cSJ. Bruce Fields if (buf->len > pad) 11014ae162cSJ. Bruce Fields buf->len -= pad; 11114ae162cSJ. Bruce Fields else 11214ae162cSJ. Bruce Fields return -EINVAL; 11314ae162cSJ. Bruce Fields return 0; 11414ae162cSJ. Bruce Fields } 11514ae162cSJ. Bruce Fields 116934a95aaSKevin Coffman void 117934a95aaSKevin Coffman gss_krb5_make_confounder(char *p, u32 conflen) 11814ae162cSJ. Bruce Fields { 11914ae162cSJ. Bruce Fields static u64 i = 0; 12014ae162cSJ. Bruce Fields u64 *q = (u64 *)p; 12114ae162cSJ. Bruce Fields 12214ae162cSJ. Bruce Fields /* rfc1964 claims this should be "random". But all that's really 12314ae162cSJ. Bruce Fields * necessary is that it be unique. And not even that is necessary in 12414ae162cSJ. Bruce Fields * our case since our "gssapi" implementation exists only to support 12514ae162cSJ. Bruce Fields * rpcsec_gss, so we know that the only buffers we will ever encrypt 12614ae162cSJ. Bruce Fields * already begin with a unique sequence number. Just to hedge my bets 12714ae162cSJ. Bruce Fields * I'll make a half-hearted attempt at something unique, but ensuring 12814ae162cSJ. Bruce Fields * uniqueness would mean worrying about atomicity and rollover, and I 12914ae162cSJ. Bruce Fields * don't care enough. */ 13014ae162cSJ. Bruce Fields 131863a2488SKevin Coffman /* initialize to random value */ 132863a2488SKevin Coffman if (i == 0) { 133863a2488SKevin Coffman i = random32(); 134863a2488SKevin Coffman i = (i << 32) | random32(); 135863a2488SKevin Coffman } 136863a2488SKevin Coffman 137863a2488SKevin Coffman switch (conflen) { 138863a2488SKevin Coffman case 16: 139863a2488SKevin Coffman *q++ = i++; 140863a2488SKevin Coffman /* fall through */ 141863a2488SKevin Coffman case 8: 142863a2488SKevin Coffman *q++ = i++; 143863a2488SKevin Coffman break; 144863a2488SKevin Coffman default: 145863a2488SKevin Coffman BUG(); 146863a2488SKevin Coffman } 14714ae162cSJ. Bruce Fields } 14814ae162cSJ. Bruce Fields 14914ae162cSJ. Bruce Fields /* Assumptions: the head and tail of inbuf are ours to play with. 15014ae162cSJ. Bruce Fields * The pages, however, may be real pages in the page cache and we replace 15114ae162cSJ. Bruce Fields * them with scratch pages from **pages before writing to them. */ 15214ae162cSJ. Bruce Fields /* XXX: obviously the above should be documentation of wrap interface, 15314ae162cSJ. Bruce Fields * and shouldn't be in this kerberos-specific file. */ 15414ae162cSJ. Bruce Fields 15514ae162cSJ. Bruce Fields /* XXX factor out common code with seal/unseal. */ 15614ae162cSJ. Bruce Fields 1571ac3719aSKevin Coffman static u32 1581ac3719aSKevin Coffman gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, 15914ae162cSJ. Bruce Fields struct xdr_buf *buf, struct page **pages) 16014ae162cSJ. Bruce Fields { 16181d4a433SKevin Coffman char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 16281d4a433SKevin Coffman struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 16381d4a433SKevin Coffman .data = cksumdata}; 16414ae162cSJ. Bruce Fields int blocksize = 0, plainlen; 165d00953a5SKevin Coffman unsigned char *ptr, *msg_start; 16614ae162cSJ. Bruce Fields s32 now; 16714ae162cSJ. Bruce Fields int headlen; 16814ae162cSJ. Bruce Fields struct page **tmp_pages; 169eaa82edfSJ. Bruce Fields u32 seq_send; 170e1f6c07bSKevin Coffman u8 *cksumkey; 17114ae162cSJ. Bruce Fields 17281d4a433SKevin Coffman dprintk("RPC: %s\n", __func__); 17314ae162cSJ. Bruce Fields 17414ae162cSJ. Bruce Fields now = get_seconds(); 17514ae162cSJ. Bruce Fields 176378c6697SHerbert Xu blocksize = crypto_blkcipher_blocksize(kctx->enc); 17714ae162cSJ. Bruce Fields gss_krb5_add_padding(buf, offset, blocksize); 17814ae162cSJ. Bruce Fields BUG_ON((buf->len - offset) % blocksize); 17914ae162cSJ. Bruce Fields plainlen = blocksize + buf->len - offset; 18014ae162cSJ. Bruce Fields 18181d4a433SKevin Coffman headlen = g_token_size(&kctx->mech_used, 18281d4a433SKevin Coffman GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - 18314ae162cSJ. Bruce Fields (buf->len - offset); 18414ae162cSJ. Bruce Fields 18514ae162cSJ. Bruce Fields ptr = buf->head[0].iov_base + offset; 18614ae162cSJ. Bruce Fields /* shift data to make room for header. */ 187725f2865SKevin Coffman xdr_extend_head(buf, offset, headlen); 188725f2865SKevin Coffman 18914ae162cSJ. Bruce Fields /* XXX Would be cleverer to encrypt while copying. */ 19014ae162cSJ. Bruce Fields BUG_ON((buf->len - offset - headlen) % blocksize); 19114ae162cSJ. Bruce Fields 192d00953a5SKevin Coffman g_make_token_header(&kctx->mech_used, 19381d4a433SKevin Coffman GSS_KRB5_TOK_HDR_LEN + 19481d4a433SKevin Coffman kctx->gk5e->cksumlength + plainlen, &ptr); 19514ae162cSJ. Bruce Fields 19614ae162cSJ. Bruce Fields 197d00953a5SKevin Coffman /* ptr now at header described in rfc 1964, section 1.2.1: */ 198d00953a5SKevin Coffman ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 199d00953a5SKevin Coffman ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 20014ae162cSJ. Bruce Fields 20181d4a433SKevin Coffman msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; 20214ae162cSJ. Bruce Fields 20381d4a433SKevin Coffman *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); 204d00953a5SKevin Coffman memset(ptr + 4, 0xff, 4); 20581d4a433SKevin Coffman *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); 20614ae162cSJ. Bruce Fields 207934a95aaSKevin Coffman gss_krb5_make_confounder(msg_start, blocksize); 20814ae162cSJ. Bruce Fields 209e1f6c07bSKevin Coffman if (kctx->gk5e->keyed_cksum) 210e1f6c07bSKevin Coffman cksumkey = kctx->cksum; 211e1f6c07bSKevin Coffman else 212e1f6c07bSKevin Coffman cksumkey = NULL; 213e1f6c07bSKevin Coffman 21414ae162cSJ. Bruce Fields /* XXXJBF: UGH!: */ 21514ae162cSJ. Bruce Fields tmp_pages = buf->pages; 21614ae162cSJ. Bruce Fields buf->pages = pages; 217e1f6c07bSKevin Coffman if (make_checksum(kctx, ptr, 8, buf, offset + headlen - blocksize, 2188b237076SKevin Coffman cksumkey, KG_USAGE_SEAL, &md5cksum)) 21939a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 22014ae162cSJ. Bruce Fields buf->pages = tmp_pages; 22114ae162cSJ. Bruce Fields 222e1f6c07bSKevin Coffman memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 22314ae162cSJ. Bruce Fields 224eaa82edfSJ. Bruce Fields spin_lock(&krb5_seq_lock); 225eaa82edfSJ. Bruce Fields seq_send = kctx->seq_send++; 226eaa82edfSJ. Bruce Fields spin_unlock(&krb5_seq_lock); 227eaa82edfSJ. Bruce Fields 22814ae162cSJ. Bruce Fields /* XXX would probably be more efficient to compute checksum 22914ae162cSJ. Bruce Fields * and encrypt at the same time: */ 2301dbd9029SKevin Coffman if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, 231d00953a5SKevin Coffman seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 23239a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 23314ae162cSJ. Bruce Fields 23414ae162cSJ. Bruce Fields if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 23514ae162cSJ. Bruce Fields pages)) 23639a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 23714ae162cSJ. Bruce Fields 23894efa934SJ. Bruce Fields return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 23914ae162cSJ. Bruce Fields } 24014ae162cSJ. Bruce Fields 2411ac3719aSKevin Coffman static u32 2421ac3719aSKevin Coffman gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) 24314ae162cSJ. Bruce Fields { 24414ae162cSJ. Bruce Fields int signalg; 24514ae162cSJ. Bruce Fields int sealalg; 24681d4a433SKevin Coffman char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 24781d4a433SKevin Coffman struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 24881d4a433SKevin Coffman .data = cksumdata}; 24914ae162cSJ. Bruce Fields s32 now; 25014ae162cSJ. Bruce Fields int direction; 25114ae162cSJ. Bruce Fields s32 seqnum; 25214ae162cSJ. Bruce Fields unsigned char *ptr; 25314ae162cSJ. Bruce Fields int bodysize; 25414ae162cSJ. Bruce Fields void *data_start, *orig_start; 25514ae162cSJ. Bruce Fields int data_len; 25614ae162cSJ. Bruce Fields int blocksize; 25781d4a433SKevin Coffman int crypt_offset; 258e1f6c07bSKevin Coffman u8 *cksumkey; 25914ae162cSJ. Bruce Fields 26014ae162cSJ. Bruce Fields dprintk("RPC: gss_unwrap_kerberos\n"); 26114ae162cSJ. Bruce Fields 26214ae162cSJ. Bruce Fields ptr = (u8 *)buf->head[0].iov_base + offset; 26314ae162cSJ. Bruce Fields if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 26414ae162cSJ. Bruce Fields buf->len - offset)) 26539a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 26614ae162cSJ. Bruce Fields 267d00953a5SKevin Coffman if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || 268d00953a5SKevin Coffman (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) 26939a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 27014ae162cSJ. Bruce Fields 27114ae162cSJ. Bruce Fields /* XXX sanity-check bodysize?? */ 27214ae162cSJ. Bruce Fields 27314ae162cSJ. Bruce Fields /* get the sign and seal algorithms */ 27414ae162cSJ. Bruce Fields 275d00953a5SKevin Coffman signalg = ptr[2] + (ptr[3] << 8); 27681d4a433SKevin Coffman if (signalg != kctx->gk5e->signalg) 27739a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 27814ae162cSJ. Bruce Fields 279d00953a5SKevin Coffman sealalg = ptr[4] + (ptr[5] << 8); 28081d4a433SKevin Coffman if (sealalg != kctx->gk5e->sealalg) 28139a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 28294efa934SJ. Bruce Fields 283d00953a5SKevin Coffman if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 28439a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 28514ae162cSJ. Bruce Fields 28681d4a433SKevin Coffman /* 28781d4a433SKevin Coffman * Data starts after token header and checksum. ptr points 28881d4a433SKevin Coffman * to the beginning of the token header 28981d4a433SKevin Coffman */ 29081d4a433SKevin Coffman crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - 29181d4a433SKevin Coffman (unsigned char *)buf->head[0].iov_base; 29281d4a433SKevin Coffman if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) 29339a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 29414ae162cSJ. Bruce Fields 295e1f6c07bSKevin Coffman if (kctx->gk5e->keyed_cksum) 296e1f6c07bSKevin Coffman cksumkey = kctx->cksum; 297e1f6c07bSKevin Coffman else 298e1f6c07bSKevin Coffman cksumkey = NULL; 299e1f6c07bSKevin Coffman 300e1f6c07bSKevin Coffman if (make_checksum(kctx, ptr, 8, buf, crypt_offset, 3018b237076SKevin Coffman cksumkey, KG_USAGE_SEAL, &md5cksum)) 30239a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 30314ae162cSJ. Bruce Fields 304e1f6c07bSKevin Coffman if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, 305e1f6c07bSKevin Coffman kctx->gk5e->cksumlength)) 30639a21dd1SJ. Bruce Fields return GSS_S_BAD_SIG; 30714ae162cSJ. Bruce Fields 30814ae162cSJ. Bruce Fields /* it got through unscathed. Make sure the context is unexpired */ 30914ae162cSJ. Bruce Fields 31014ae162cSJ. Bruce Fields now = get_seconds(); 31114ae162cSJ. Bruce Fields 31214ae162cSJ. Bruce Fields if (now > kctx->endtime) 31339a21dd1SJ. Bruce Fields return GSS_S_CONTEXT_EXPIRED; 31414ae162cSJ. Bruce Fields 31514ae162cSJ. Bruce Fields /* do sequencing checks */ 31614ae162cSJ. Bruce Fields 3171dbd9029SKevin Coffman if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, 3181dbd9029SKevin Coffman ptr + 8, &direction, &seqnum)) 31939a21dd1SJ. Bruce Fields return GSS_S_BAD_SIG; 32014ae162cSJ. Bruce Fields 32114ae162cSJ. Bruce Fields if ((kctx->initiate && direction != 0xff) || 32214ae162cSJ. Bruce Fields (!kctx->initiate && direction != 0)) 32339a21dd1SJ. Bruce Fields return GSS_S_BAD_SIG; 32414ae162cSJ. Bruce Fields 32514ae162cSJ. Bruce Fields /* Copy the data back to the right position. XXX: Would probably be 32614ae162cSJ. Bruce Fields * better to copy and encrypt at the same time. */ 32714ae162cSJ. Bruce Fields 328378c6697SHerbert Xu blocksize = crypto_blkcipher_blocksize(kctx->enc); 32981d4a433SKevin Coffman data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 33081d4a433SKevin Coffman blocksize; 33114ae162cSJ. Bruce Fields orig_start = buf->head[0].iov_base + offset; 33214ae162cSJ. Bruce Fields data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 33314ae162cSJ. Bruce Fields memmove(orig_start, data_start, data_len); 33414ae162cSJ. Bruce Fields buf->head[0].iov_len -= (data_start - orig_start); 33514ae162cSJ. Bruce Fields buf->len -= (data_start - orig_start); 33614ae162cSJ. Bruce Fields 33714ae162cSJ. Bruce Fields if (gss_krb5_remove_padding(buf, blocksize)) 33839a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 33914ae162cSJ. Bruce Fields 34039a21dd1SJ. Bruce Fields return GSS_S_COMPLETE; 34114ae162cSJ. Bruce Fields } 3421ac3719aSKevin Coffman 343de9c17ebSKevin Coffman /* 344de9c17ebSKevin Coffman * We cannot currently handle tokens with rotated data. We need a 345de9c17ebSKevin Coffman * generalized routine to rotate the data in place. It is anticipated 346de9c17ebSKevin Coffman * that we won't encounter rotated data in the general case. 347de9c17ebSKevin Coffman */ 348de9c17ebSKevin Coffman static u32 349de9c17ebSKevin Coffman rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) 350de9c17ebSKevin Coffman { 351de9c17ebSKevin Coffman unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); 352de9c17ebSKevin Coffman 353de9c17ebSKevin Coffman if (realrrc == 0) 354de9c17ebSKevin Coffman return 0; 355de9c17ebSKevin Coffman 356de9c17ebSKevin Coffman dprintk("%s: cannot process token with rotated data: " 357de9c17ebSKevin Coffman "rrc %u, realrrc %u\n", __func__, rrc, realrrc); 358de9c17ebSKevin Coffman return 1; 359de9c17ebSKevin Coffman } 360de9c17ebSKevin Coffman 361de9c17ebSKevin Coffman static u32 362de9c17ebSKevin Coffman gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, 363de9c17ebSKevin Coffman struct xdr_buf *buf, struct page **pages) 364de9c17ebSKevin Coffman { 365de9c17ebSKevin Coffman int blocksize; 366de9c17ebSKevin Coffman u8 *ptr, *plainhdr; 367de9c17ebSKevin Coffman s32 now; 368de9c17ebSKevin Coffman u8 flags = 0x00; 369de9c17ebSKevin Coffman __be16 *be16ptr, ec = 0; 370de9c17ebSKevin Coffman __be64 *be64ptr; 371de9c17ebSKevin Coffman u32 err; 372de9c17ebSKevin Coffman 373de9c17ebSKevin Coffman dprintk("RPC: %s\n", __func__); 374de9c17ebSKevin Coffman 375de9c17ebSKevin Coffman if (kctx->gk5e->encrypt_v2 == NULL) 376de9c17ebSKevin Coffman return GSS_S_FAILURE; 377de9c17ebSKevin Coffman 378de9c17ebSKevin Coffman /* make room for gss token header */ 379de9c17ebSKevin Coffman if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) 380de9c17ebSKevin Coffman return GSS_S_FAILURE; 381de9c17ebSKevin Coffman 382de9c17ebSKevin Coffman /* construct gss token header */ 383de9c17ebSKevin Coffman ptr = plainhdr = buf->head[0].iov_base + offset; 384de9c17ebSKevin Coffman *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); 385de9c17ebSKevin Coffman *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); 386de9c17ebSKevin Coffman 387de9c17ebSKevin Coffman if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) 388de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; 389de9c17ebSKevin Coffman if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) 390de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; 391de9c17ebSKevin Coffman /* We always do confidentiality in wrap tokens */ 392de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_SEALED; 393de9c17ebSKevin Coffman 394de9c17ebSKevin Coffman *ptr++ = flags; 395de9c17ebSKevin Coffman *ptr++ = 0xff; 396de9c17ebSKevin Coffman be16ptr = (__be16 *)ptr; 397de9c17ebSKevin Coffman 398de9c17ebSKevin Coffman blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); 399de9c17ebSKevin Coffman *be16ptr++ = cpu_to_be16(ec); 400de9c17ebSKevin Coffman /* "inner" token header always uses 0 for RRC */ 401de9c17ebSKevin Coffman *be16ptr++ = cpu_to_be16(0); 402de9c17ebSKevin Coffman 403de9c17ebSKevin Coffman be64ptr = (__be64 *)be16ptr; 404de9c17ebSKevin Coffman spin_lock(&krb5_seq_lock); 405de9c17ebSKevin Coffman *be64ptr = cpu_to_be64(kctx->seq_send64++); 406de9c17ebSKevin Coffman spin_unlock(&krb5_seq_lock); 407de9c17ebSKevin Coffman 408de9c17ebSKevin Coffman err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages); 409de9c17ebSKevin Coffman if (err) 410de9c17ebSKevin Coffman return err; 411de9c17ebSKevin Coffman 412de9c17ebSKevin Coffman now = get_seconds(); 413de9c17ebSKevin Coffman return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 414de9c17ebSKevin Coffman } 415de9c17ebSKevin Coffman 416de9c17ebSKevin Coffman static u32 417de9c17ebSKevin Coffman gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) 418de9c17ebSKevin Coffman { 419de9c17ebSKevin Coffman s32 now; 420de9c17ebSKevin Coffman u64 seqnum; 421de9c17ebSKevin Coffman u8 *ptr; 422de9c17ebSKevin Coffman u8 flags = 0x00; 423de9c17ebSKevin Coffman u16 ec, rrc; 424de9c17ebSKevin Coffman int err; 425de9c17ebSKevin Coffman u32 headskip, tailskip; 426de9c17ebSKevin Coffman u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; 427de9c17ebSKevin Coffman unsigned int movelen; 428de9c17ebSKevin Coffman 429de9c17ebSKevin Coffman 430de9c17ebSKevin Coffman dprintk("RPC: %s\n", __func__); 431de9c17ebSKevin Coffman 432de9c17ebSKevin Coffman if (kctx->gk5e->decrypt_v2 == NULL) 433de9c17ebSKevin Coffman return GSS_S_FAILURE; 434de9c17ebSKevin Coffman 435de9c17ebSKevin Coffman ptr = buf->head[0].iov_base + offset; 436de9c17ebSKevin Coffman 437de9c17ebSKevin Coffman if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) 438de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 439de9c17ebSKevin Coffman 440de9c17ebSKevin Coffman flags = ptr[2]; 441de9c17ebSKevin Coffman if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || 442de9c17ebSKevin Coffman (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) 443de9c17ebSKevin Coffman return GSS_S_BAD_SIG; 444de9c17ebSKevin Coffman 445de9c17ebSKevin Coffman if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { 446de9c17ebSKevin Coffman dprintk("%s: token missing expected sealed flag\n", __func__); 447de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 448de9c17ebSKevin Coffman } 449de9c17ebSKevin Coffman 450de9c17ebSKevin Coffman if (ptr[3] != 0xff) 451de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 452de9c17ebSKevin Coffman 453de9c17ebSKevin Coffman ec = be16_to_cpup((__be16 *)(ptr + 4)); 454de9c17ebSKevin Coffman rrc = be16_to_cpup((__be16 *)(ptr + 6)); 455de9c17ebSKevin Coffman 456de9c17ebSKevin Coffman seqnum = be64_to_cpup((__be64 *)(ptr + 8)); 457de9c17ebSKevin Coffman 458de9c17ebSKevin Coffman if (rrc != 0) { 459de9c17ebSKevin Coffman err = rotate_left(kctx, offset, buf, rrc); 460de9c17ebSKevin Coffman if (err) 461de9c17ebSKevin Coffman return GSS_S_FAILURE; 462de9c17ebSKevin Coffman } 463de9c17ebSKevin Coffman 464de9c17ebSKevin Coffman err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, 465de9c17ebSKevin Coffman &headskip, &tailskip); 466de9c17ebSKevin Coffman if (err) 467de9c17ebSKevin Coffman return GSS_S_FAILURE; 468de9c17ebSKevin Coffman 469de9c17ebSKevin Coffman /* 470de9c17ebSKevin Coffman * Retrieve the decrypted gss token header and verify 471de9c17ebSKevin Coffman * it against the original 472de9c17ebSKevin Coffman */ 473de9c17ebSKevin Coffman err = read_bytes_from_xdr_buf(buf, 474de9c17ebSKevin Coffman buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, 475de9c17ebSKevin Coffman decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); 476de9c17ebSKevin Coffman if (err) { 477de9c17ebSKevin Coffman dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); 478de9c17ebSKevin Coffman return GSS_S_FAILURE; 479de9c17ebSKevin Coffman } 480de9c17ebSKevin Coffman if (memcmp(ptr, decrypted_hdr, 6) 481de9c17ebSKevin Coffman || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { 482de9c17ebSKevin Coffman dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); 483de9c17ebSKevin Coffman return GSS_S_FAILURE; 484de9c17ebSKevin Coffman } 485de9c17ebSKevin Coffman 486de9c17ebSKevin Coffman /* do sequencing checks */ 487de9c17ebSKevin Coffman 488de9c17ebSKevin Coffman /* it got through unscathed. Make sure the context is unexpired */ 489de9c17ebSKevin Coffman now = get_seconds(); 490de9c17ebSKevin Coffman if (now > kctx->endtime) 491de9c17ebSKevin Coffman return GSS_S_CONTEXT_EXPIRED; 492de9c17ebSKevin Coffman 493de9c17ebSKevin Coffman /* 494de9c17ebSKevin Coffman * Move the head data back to the right position in xdr_buf. 495de9c17ebSKevin Coffman * We ignore any "ec" data since it might be in the head or 496de9c17ebSKevin Coffman * the tail, and we really don't need to deal with it. 497de9c17ebSKevin Coffman * Note that buf->head[0].iov_len may indicate the available 498de9c17ebSKevin Coffman * head buffer space rather than that actually occupied. 499de9c17ebSKevin Coffman */ 500de9c17ebSKevin Coffman movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); 501de9c17ebSKevin Coffman movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; 502de9c17ebSKevin Coffman BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > 503de9c17ebSKevin Coffman buf->head[0].iov_len); 504de9c17ebSKevin Coffman memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); 505de9c17ebSKevin Coffman buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; 506de9c17ebSKevin Coffman buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; 507de9c17ebSKevin Coffman 508de9c17ebSKevin Coffman return GSS_S_COMPLETE; 509de9c17ebSKevin Coffman } 510de9c17ebSKevin Coffman 5111ac3719aSKevin Coffman u32 5121ac3719aSKevin Coffman gss_wrap_kerberos(struct gss_ctx *gctx, int offset, 5131ac3719aSKevin Coffman struct xdr_buf *buf, struct page **pages) 5141ac3719aSKevin Coffman { 5151ac3719aSKevin Coffman struct krb5_ctx *kctx = gctx->internal_ctx_id; 5161ac3719aSKevin Coffman 5171ac3719aSKevin Coffman switch (kctx->enctype) { 5181ac3719aSKevin Coffman default: 5191ac3719aSKevin Coffman BUG(); 5201ac3719aSKevin Coffman case ENCTYPE_DES_CBC_RAW: 521958142e9SKevin Coffman case ENCTYPE_DES3_CBC_RAW: 5221ac3719aSKevin Coffman return gss_wrap_kerberos_v1(kctx, offset, buf, pages); 523de9c17ebSKevin Coffman case ENCTYPE_AES128_CTS_HMAC_SHA1_96: 524de9c17ebSKevin Coffman case ENCTYPE_AES256_CTS_HMAC_SHA1_96: 525de9c17ebSKevin Coffman return gss_wrap_kerberos_v2(kctx, offset, buf, pages); 5261ac3719aSKevin Coffman } 5271ac3719aSKevin Coffman } 5281ac3719aSKevin Coffman 5291ac3719aSKevin Coffman u32 5301ac3719aSKevin Coffman gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) 5311ac3719aSKevin Coffman { 5321ac3719aSKevin Coffman struct krb5_ctx *kctx = gctx->internal_ctx_id; 5331ac3719aSKevin Coffman 5341ac3719aSKevin Coffman switch (kctx->enctype) { 5351ac3719aSKevin Coffman default: 5361ac3719aSKevin Coffman BUG(); 5371ac3719aSKevin Coffman case ENCTYPE_DES_CBC_RAW: 538958142e9SKevin Coffman case ENCTYPE_DES3_CBC_RAW: 5391ac3719aSKevin Coffman return gss_unwrap_kerberos_v1(kctx, offset, buf); 540de9c17ebSKevin Coffman case ENCTYPE_AES128_CTS_HMAC_SHA1_96: 541de9c17ebSKevin Coffman case ENCTYPE_AES256_CTS_HMAC_SHA1_96: 542de9c17ebSKevin Coffman return gss_unwrap_kerberos_v2(kctx, offset, buf); 5431ac3719aSKevin Coffman } 5441ac3719aSKevin Coffman } 5451ac3719aSKevin Coffman 546