181d4a433SKevin Coffman /* 281d4a433SKevin Coffman * COPYRIGHT (c) 2008 381d4a433SKevin Coffman * The Regents of the University of Michigan 481d4a433SKevin Coffman * ALL RIGHTS RESERVED 581d4a433SKevin Coffman * 681d4a433SKevin Coffman * Permission is granted to use, copy, create derivative works 781d4a433SKevin Coffman * and redistribute this software and such derivative works 881d4a433SKevin Coffman * for any purpose, so long as the name of The University of 981d4a433SKevin Coffman * Michigan is not used in any advertising or publicity 1081d4a433SKevin Coffman * pertaining to the use of distribution of this software 1181d4a433SKevin Coffman * without specific, written prior authorization. If the 1281d4a433SKevin Coffman * above copyright notice or any other identification of the 1381d4a433SKevin Coffman * University of Michigan is included in any copy of any 1481d4a433SKevin Coffman * portion of this software, then the disclaimer below must 1581d4a433SKevin Coffman * also be included. 1681d4a433SKevin Coffman * 1781d4a433SKevin Coffman * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION 1881d4a433SKevin Coffman * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY 1981d4a433SKevin Coffman * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF 2081d4a433SKevin Coffman * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING 2181d4a433SKevin Coffman * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF 2281d4a433SKevin Coffman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE 2381d4a433SKevin Coffman * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE 2481d4a433SKevin Coffman * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR 2581d4a433SKevin Coffman * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING 2681d4a433SKevin Coffman * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN 2781d4a433SKevin Coffman * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF 2881d4a433SKevin Coffman * SUCH DAMAGES. 2981d4a433SKevin Coffman */ 3081d4a433SKevin Coffman 3114ae162cSJ. Bruce Fields #include <linux/types.h> 3214ae162cSJ. Bruce Fields #include <linux/jiffies.h> 3314ae162cSJ. Bruce Fields #include <linux/sunrpc/gss_krb5.h> 3414ae162cSJ. Bruce Fields #include <linux/random.h> 3514ae162cSJ. Bruce Fields #include <linux/pagemap.h> 3614ae162cSJ. Bruce Fields #include <linux/crypto.h> 3714ae162cSJ. Bruce Fields 38f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 3914ae162cSJ. Bruce Fields # define RPCDBG_FACILITY RPCDBG_AUTH 4014ae162cSJ. Bruce Fields #endif 4114ae162cSJ. Bruce Fields 4214ae162cSJ. Bruce Fields static inline int 4314ae162cSJ. Bruce Fields gss_krb5_padding(int blocksize, int length) 4414ae162cSJ. Bruce Fields { 4554ec3d46SJ. Bruce Fields return blocksize - (length % blocksize); 4614ae162cSJ. Bruce Fields } 4714ae162cSJ. Bruce Fields 4814ae162cSJ. Bruce Fields static inline void 4914ae162cSJ. Bruce Fields gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 5014ae162cSJ. Bruce Fields { 5114ae162cSJ. Bruce Fields int padding = gss_krb5_padding(blocksize, buf->len - offset); 5214ae162cSJ. Bruce Fields char *p; 5314ae162cSJ. Bruce Fields struct kvec *iov; 5414ae162cSJ. Bruce Fields 5514ae162cSJ. Bruce Fields if (buf->page_len || buf->tail[0].iov_len) 5614ae162cSJ. Bruce Fields iov = &buf->tail[0]; 5714ae162cSJ. Bruce Fields else 5814ae162cSJ. Bruce Fields iov = &buf->head[0]; 5914ae162cSJ. Bruce Fields p = iov->iov_base + iov->iov_len; 6014ae162cSJ. Bruce Fields iov->iov_len += padding; 6114ae162cSJ. Bruce Fields buf->len += padding; 6214ae162cSJ. Bruce Fields memset(p, padding, padding); 6314ae162cSJ. Bruce Fields } 6414ae162cSJ. Bruce Fields 6514ae162cSJ. Bruce Fields static inline int 6614ae162cSJ. Bruce Fields gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 6714ae162cSJ. Bruce Fields { 6814ae162cSJ. Bruce Fields u8 *ptr; 6914ae162cSJ. Bruce Fields u8 pad; 7067f97d83SChuck Lever size_t len = buf->len; 7114ae162cSJ. Bruce Fields 7214ae162cSJ. Bruce Fields if (len <= buf->head[0].iov_len) { 7314ae162cSJ. Bruce Fields pad = *(u8 *)(buf->head[0].iov_base + len - 1); 7414ae162cSJ. Bruce Fields if (pad > buf->head[0].iov_len) 7514ae162cSJ. Bruce Fields return -EINVAL; 7614ae162cSJ. Bruce Fields buf->head[0].iov_len -= pad; 7714ae162cSJ. Bruce Fields goto out; 7814ae162cSJ. Bruce Fields } else 7914ae162cSJ. Bruce Fields len -= buf->head[0].iov_len; 8014ae162cSJ. Bruce Fields if (len <= buf->page_len) { 8167f97d83SChuck Lever unsigned int last = (buf->page_base + len - 1) 8214ae162cSJ. Bruce Fields >>PAGE_CACHE_SHIFT; 8367f97d83SChuck Lever unsigned int offset = (buf->page_base + len - 1) 8414ae162cSJ. Bruce Fields & (PAGE_CACHE_SIZE - 1); 85b8541786SCong Wang ptr = kmap_atomic(buf->pages[last]); 8614ae162cSJ. Bruce Fields pad = *(ptr + offset); 87b8541786SCong Wang kunmap_atomic(ptr); 8814ae162cSJ. Bruce Fields goto out; 8914ae162cSJ. Bruce Fields } else 9014ae162cSJ. Bruce Fields len -= buf->page_len; 9114ae162cSJ. Bruce Fields BUG_ON(len > buf->tail[0].iov_len); 9214ae162cSJ. Bruce Fields pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 9314ae162cSJ. Bruce Fields out: 9414ae162cSJ. Bruce Fields /* XXX: NOTE: we do not adjust the page lengths--they represent 9514ae162cSJ. Bruce Fields * a range of data in the real filesystem page cache, and we need 9614ae162cSJ. Bruce Fields * to know that range so the xdr code can properly place read data. 9714ae162cSJ. Bruce Fields * However adjusting the head length, as we do above, is harmless. 9814ae162cSJ. Bruce Fields * In the case of a request that fits into a single page, the server 9914ae162cSJ. Bruce Fields * also uses length and head length together to determine the original 10014ae162cSJ. Bruce Fields * start of the request to copy the request for deferal; so it's 10114ae162cSJ. Bruce Fields * easier on the server if we adjust head and tail length in tandem. 10214ae162cSJ. Bruce Fields * It's not really a problem that we don't fool with the page and 10314ae162cSJ. Bruce Fields * tail lengths, though--at worst badly formed xdr might lead the 10414ae162cSJ. Bruce Fields * server to attempt to parse the padding. 10514ae162cSJ. Bruce Fields * XXX: Document all these weird requirements for gss mechanism 10614ae162cSJ. Bruce Fields * wrap/unwrap functions. */ 10714ae162cSJ. Bruce Fields if (pad > blocksize) 10814ae162cSJ. Bruce Fields return -EINVAL; 10914ae162cSJ. Bruce Fields if (buf->len > pad) 11014ae162cSJ. Bruce Fields buf->len -= pad; 11114ae162cSJ. Bruce Fields else 11214ae162cSJ. Bruce Fields return -EINVAL; 11314ae162cSJ. Bruce Fields return 0; 11414ae162cSJ. Bruce Fields } 11514ae162cSJ. Bruce Fields 116934a95aaSKevin Coffman void 117934a95aaSKevin Coffman gss_krb5_make_confounder(char *p, u32 conflen) 11814ae162cSJ. Bruce Fields { 11914ae162cSJ. Bruce Fields static u64 i = 0; 12014ae162cSJ. Bruce Fields u64 *q = (u64 *)p; 12114ae162cSJ. Bruce Fields 12214ae162cSJ. Bruce Fields /* rfc1964 claims this should be "random". But all that's really 12314ae162cSJ. Bruce Fields * necessary is that it be unique. And not even that is necessary in 12414ae162cSJ. Bruce Fields * our case since our "gssapi" implementation exists only to support 12514ae162cSJ. Bruce Fields * rpcsec_gss, so we know that the only buffers we will ever encrypt 12614ae162cSJ. Bruce Fields * already begin with a unique sequence number. Just to hedge my bets 12714ae162cSJ. Bruce Fields * I'll make a half-hearted attempt at something unique, but ensuring 12814ae162cSJ. Bruce Fields * uniqueness would mean worrying about atomicity and rollover, and I 12914ae162cSJ. Bruce Fields * don't care enough. */ 13014ae162cSJ. Bruce Fields 131863a2488SKevin Coffman /* initialize to random value */ 132863a2488SKevin Coffman if (i == 0) { 133c86d2ddeSAkinobu Mita i = prandom_u32(); 134c86d2ddeSAkinobu Mita i = (i << 32) | prandom_u32(); 135863a2488SKevin Coffman } 136863a2488SKevin Coffman 137863a2488SKevin Coffman switch (conflen) { 138863a2488SKevin Coffman case 16: 139863a2488SKevin Coffman *q++ = i++; 140863a2488SKevin Coffman /* fall through */ 141863a2488SKevin Coffman case 8: 142863a2488SKevin Coffman *q++ = i++; 143863a2488SKevin Coffman break; 144863a2488SKevin Coffman default: 145863a2488SKevin Coffman BUG(); 146863a2488SKevin Coffman } 14714ae162cSJ. Bruce Fields } 14814ae162cSJ. Bruce Fields 14914ae162cSJ. Bruce Fields /* Assumptions: the head and tail of inbuf are ours to play with. 15014ae162cSJ. Bruce Fields * The pages, however, may be real pages in the page cache and we replace 15114ae162cSJ. Bruce Fields * them with scratch pages from **pages before writing to them. */ 15214ae162cSJ. Bruce Fields /* XXX: obviously the above should be documentation of wrap interface, 15314ae162cSJ. Bruce Fields * and shouldn't be in this kerberos-specific file. */ 15414ae162cSJ. Bruce Fields 15514ae162cSJ. Bruce Fields /* XXX factor out common code with seal/unseal. */ 15614ae162cSJ. Bruce Fields 1571ac3719aSKevin Coffman static u32 1581ac3719aSKevin Coffman gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, 15914ae162cSJ. Bruce Fields struct xdr_buf *buf, struct page **pages) 16014ae162cSJ. Bruce Fields { 16181d4a433SKevin Coffman char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 16281d4a433SKevin Coffman struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 16381d4a433SKevin Coffman .data = cksumdata}; 16414ae162cSJ. Bruce Fields int blocksize = 0, plainlen; 165d00953a5SKevin Coffman unsigned char *ptr, *msg_start; 16614ae162cSJ. Bruce Fields s32 now; 16714ae162cSJ. Bruce Fields int headlen; 16814ae162cSJ. Bruce Fields struct page **tmp_pages; 169eaa82edfSJ. Bruce Fields u32 seq_send; 170e1f6c07bSKevin Coffman u8 *cksumkey; 1715af46547SKevin Coffman u32 conflen = kctx->gk5e->conflen; 17214ae162cSJ. Bruce Fields 17381d4a433SKevin Coffman dprintk("RPC: %s\n", __func__); 17414ae162cSJ. Bruce Fields 17514ae162cSJ. Bruce Fields now = get_seconds(); 17614ae162cSJ. Bruce Fields 177378c6697SHerbert Xu blocksize = crypto_blkcipher_blocksize(kctx->enc); 17814ae162cSJ. Bruce Fields gss_krb5_add_padding(buf, offset, blocksize); 17914ae162cSJ. Bruce Fields BUG_ON((buf->len - offset) % blocksize); 1805af46547SKevin Coffman plainlen = conflen + buf->len - offset; 18114ae162cSJ. Bruce Fields 18281d4a433SKevin Coffman headlen = g_token_size(&kctx->mech_used, 18381d4a433SKevin Coffman GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - 18414ae162cSJ. Bruce Fields (buf->len - offset); 18514ae162cSJ. Bruce Fields 18614ae162cSJ. Bruce Fields ptr = buf->head[0].iov_base + offset; 18714ae162cSJ. Bruce Fields /* shift data to make room for header. */ 188725f2865SKevin Coffman xdr_extend_head(buf, offset, headlen); 189725f2865SKevin Coffman 19014ae162cSJ. Bruce Fields /* XXX Would be cleverer to encrypt while copying. */ 19114ae162cSJ. Bruce Fields BUG_ON((buf->len - offset - headlen) % blocksize); 19214ae162cSJ. Bruce Fields 193d00953a5SKevin Coffman g_make_token_header(&kctx->mech_used, 19481d4a433SKevin Coffman GSS_KRB5_TOK_HDR_LEN + 19581d4a433SKevin Coffman kctx->gk5e->cksumlength + plainlen, &ptr); 19614ae162cSJ. Bruce Fields 19714ae162cSJ. Bruce Fields 198d00953a5SKevin Coffman /* ptr now at header described in rfc 1964, section 1.2.1: */ 199d00953a5SKevin Coffman ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 200d00953a5SKevin Coffman ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 20114ae162cSJ. Bruce Fields 20281d4a433SKevin Coffman msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; 20314ae162cSJ. Bruce Fields 204b36e9c44SJeff Layton /* 205b36e9c44SJeff Layton * signalg and sealalg are stored as if they were converted from LE 206b36e9c44SJeff Layton * to host endian, even though they're opaque pairs of bytes according 207b36e9c44SJeff Layton * to the RFC. 208b36e9c44SJeff Layton */ 209b36e9c44SJeff Layton *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); 210b36e9c44SJeff Layton *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); 211b36e9c44SJeff Layton ptr[6] = 0xff; 212b36e9c44SJeff Layton ptr[7] = 0xff; 21314ae162cSJ. Bruce Fields 2145af46547SKevin Coffman gss_krb5_make_confounder(msg_start, conflen); 21514ae162cSJ. Bruce Fields 216e1f6c07bSKevin Coffman if (kctx->gk5e->keyed_cksum) 217e1f6c07bSKevin Coffman cksumkey = kctx->cksum; 218e1f6c07bSKevin Coffman else 219e1f6c07bSKevin Coffman cksumkey = NULL; 220e1f6c07bSKevin Coffman 22114ae162cSJ. Bruce Fields /* XXXJBF: UGH!: */ 22214ae162cSJ. Bruce Fields tmp_pages = buf->pages; 22314ae162cSJ. Bruce Fields buf->pages = pages; 2245af46547SKevin Coffman if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, 2258b237076SKevin Coffman cksumkey, KG_USAGE_SEAL, &md5cksum)) 22639a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 22714ae162cSJ. Bruce Fields buf->pages = tmp_pages; 22814ae162cSJ. Bruce Fields 229e1f6c07bSKevin Coffman memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 23014ae162cSJ. Bruce Fields 231eaa82edfSJ. Bruce Fields spin_lock(&krb5_seq_lock); 232eaa82edfSJ. Bruce Fields seq_send = kctx->seq_send++; 233eaa82edfSJ. Bruce Fields spin_unlock(&krb5_seq_lock); 234eaa82edfSJ. Bruce Fields 23514ae162cSJ. Bruce Fields /* XXX would probably be more efficient to compute checksum 23614ae162cSJ. Bruce Fields * and encrypt at the same time: */ 2371dbd9029SKevin Coffman if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, 238d00953a5SKevin Coffman seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 23939a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 24014ae162cSJ. Bruce Fields 241fffdaef2SKevin Coffman if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 242fffdaef2SKevin Coffman struct crypto_blkcipher *cipher; 243fffdaef2SKevin Coffman int err; 244fffdaef2SKevin Coffman cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, 245fffdaef2SKevin Coffman CRYPTO_ALG_ASYNC); 246fffdaef2SKevin Coffman if (IS_ERR(cipher)) 24739a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 24814ae162cSJ. Bruce Fields 249fffdaef2SKevin Coffman krb5_rc4_setup_enc_key(kctx, cipher, seq_send); 250fffdaef2SKevin Coffman 251fffdaef2SKevin Coffman err = gss_encrypt_xdr_buf(cipher, buf, 252fffdaef2SKevin Coffman offset + headlen - conflen, pages); 253fffdaef2SKevin Coffman crypto_free_blkcipher(cipher); 254fffdaef2SKevin Coffman if (err) 255fffdaef2SKevin Coffman return GSS_S_FAILURE; 256fffdaef2SKevin Coffman } else { 257fffdaef2SKevin Coffman if (gss_encrypt_xdr_buf(kctx->enc, buf, 258fffdaef2SKevin Coffman offset + headlen - conflen, pages)) 259fffdaef2SKevin Coffman return GSS_S_FAILURE; 260fffdaef2SKevin Coffman } 261fffdaef2SKevin Coffman 26294efa934SJ. Bruce Fields return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 26314ae162cSJ. Bruce Fields } 26414ae162cSJ. Bruce Fields 2651ac3719aSKevin Coffman static u32 2661ac3719aSKevin Coffman gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) 26714ae162cSJ. Bruce Fields { 26814ae162cSJ. Bruce Fields int signalg; 26914ae162cSJ. Bruce Fields int sealalg; 27081d4a433SKevin Coffman char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 27181d4a433SKevin Coffman struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 27281d4a433SKevin Coffman .data = cksumdata}; 27314ae162cSJ. Bruce Fields s32 now; 27414ae162cSJ. Bruce Fields int direction; 27514ae162cSJ. Bruce Fields s32 seqnum; 27614ae162cSJ. Bruce Fields unsigned char *ptr; 27714ae162cSJ. Bruce Fields int bodysize; 27814ae162cSJ. Bruce Fields void *data_start, *orig_start; 27914ae162cSJ. Bruce Fields int data_len; 28014ae162cSJ. Bruce Fields int blocksize; 2815af46547SKevin Coffman u32 conflen = kctx->gk5e->conflen; 28281d4a433SKevin Coffman int crypt_offset; 283e1f6c07bSKevin Coffman u8 *cksumkey; 28414ae162cSJ. Bruce Fields 28514ae162cSJ. Bruce Fields dprintk("RPC: gss_unwrap_kerberos\n"); 28614ae162cSJ. Bruce Fields 28714ae162cSJ. Bruce Fields ptr = (u8 *)buf->head[0].iov_base + offset; 28814ae162cSJ. Bruce Fields if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 28914ae162cSJ. Bruce Fields buf->len - offset)) 29039a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 29114ae162cSJ. Bruce Fields 292d00953a5SKevin Coffman if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || 293d00953a5SKevin Coffman (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) 29439a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 29514ae162cSJ. Bruce Fields 29614ae162cSJ. Bruce Fields /* XXX sanity-check bodysize?? */ 29714ae162cSJ. Bruce Fields 29814ae162cSJ. Bruce Fields /* get the sign and seal algorithms */ 29914ae162cSJ. Bruce Fields 300d00953a5SKevin Coffman signalg = ptr[2] + (ptr[3] << 8); 30181d4a433SKevin Coffman if (signalg != kctx->gk5e->signalg) 30239a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 30314ae162cSJ. Bruce Fields 304d00953a5SKevin Coffman sealalg = ptr[4] + (ptr[5] << 8); 30581d4a433SKevin Coffman if (sealalg != kctx->gk5e->sealalg) 30639a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 30794efa934SJ. Bruce Fields 308d00953a5SKevin Coffman if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 30939a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 31014ae162cSJ. Bruce Fields 31181d4a433SKevin Coffman /* 31281d4a433SKevin Coffman * Data starts after token header and checksum. ptr points 31381d4a433SKevin Coffman * to the beginning of the token header 31481d4a433SKevin Coffman */ 31581d4a433SKevin Coffman crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - 31681d4a433SKevin Coffman (unsigned char *)buf->head[0].iov_base; 317fffdaef2SKevin Coffman 318fffdaef2SKevin Coffman /* 319fffdaef2SKevin Coffman * Need plaintext seqnum to derive encryption key for arcfour-hmac 320fffdaef2SKevin Coffman */ 321fffdaef2SKevin Coffman if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, 322fffdaef2SKevin Coffman ptr + 8, &direction, &seqnum)) 323fffdaef2SKevin Coffman return GSS_S_BAD_SIG; 324fffdaef2SKevin Coffman 325fffdaef2SKevin Coffman if ((kctx->initiate && direction != 0xff) || 326fffdaef2SKevin Coffman (!kctx->initiate && direction != 0)) 327fffdaef2SKevin Coffman return GSS_S_BAD_SIG; 328fffdaef2SKevin Coffman 329fffdaef2SKevin Coffman if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) { 330fffdaef2SKevin Coffman struct crypto_blkcipher *cipher; 331fffdaef2SKevin Coffman int err; 332fffdaef2SKevin Coffman 333fffdaef2SKevin Coffman cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0, 334fffdaef2SKevin Coffman CRYPTO_ALG_ASYNC); 335fffdaef2SKevin Coffman if (IS_ERR(cipher)) 336fffdaef2SKevin Coffman return GSS_S_FAILURE; 337fffdaef2SKevin Coffman 338fffdaef2SKevin Coffman krb5_rc4_setup_enc_key(kctx, cipher, seqnum); 339fffdaef2SKevin Coffman 340fffdaef2SKevin Coffman err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset); 341fffdaef2SKevin Coffman crypto_free_blkcipher(cipher); 342fffdaef2SKevin Coffman if (err) 343fffdaef2SKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 344fffdaef2SKevin Coffman } else { 34581d4a433SKevin Coffman if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) 34639a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 347fffdaef2SKevin Coffman } 34814ae162cSJ. Bruce Fields 349e1f6c07bSKevin Coffman if (kctx->gk5e->keyed_cksum) 350e1f6c07bSKevin Coffman cksumkey = kctx->cksum; 351e1f6c07bSKevin Coffman else 352e1f6c07bSKevin Coffman cksumkey = NULL; 353e1f6c07bSKevin Coffman 354e1f6c07bSKevin Coffman if (make_checksum(kctx, ptr, 8, buf, crypt_offset, 3558b237076SKevin Coffman cksumkey, KG_USAGE_SEAL, &md5cksum)) 35639a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 35714ae162cSJ. Bruce Fields 358e1f6c07bSKevin Coffman if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, 359e1f6c07bSKevin Coffman kctx->gk5e->cksumlength)) 36039a21dd1SJ. Bruce Fields return GSS_S_BAD_SIG; 36114ae162cSJ. Bruce Fields 36214ae162cSJ. Bruce Fields /* it got through unscathed. Make sure the context is unexpired */ 36314ae162cSJ. Bruce Fields 36414ae162cSJ. Bruce Fields now = get_seconds(); 36514ae162cSJ. Bruce Fields 36614ae162cSJ. Bruce Fields if (now > kctx->endtime) 36739a21dd1SJ. Bruce Fields return GSS_S_CONTEXT_EXPIRED; 36814ae162cSJ. Bruce Fields 36914ae162cSJ. Bruce Fields /* do sequencing checks */ 37014ae162cSJ. Bruce Fields 37114ae162cSJ. Bruce Fields /* Copy the data back to the right position. XXX: Would probably be 37214ae162cSJ. Bruce Fields * better to copy and encrypt at the same time. */ 37314ae162cSJ. Bruce Fields 374378c6697SHerbert Xu blocksize = crypto_blkcipher_blocksize(kctx->enc); 37581d4a433SKevin Coffman data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 3765af46547SKevin Coffman conflen; 37714ae162cSJ. Bruce Fields orig_start = buf->head[0].iov_base + offset; 37814ae162cSJ. Bruce Fields data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 37914ae162cSJ. Bruce Fields memmove(orig_start, data_start, data_len); 38014ae162cSJ. Bruce Fields buf->head[0].iov_len -= (data_start - orig_start); 38114ae162cSJ. Bruce Fields buf->len -= (data_start - orig_start); 38214ae162cSJ. Bruce Fields 38314ae162cSJ. Bruce Fields if (gss_krb5_remove_padding(buf, blocksize)) 38439a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 38514ae162cSJ. Bruce Fields 38639a21dd1SJ. Bruce Fields return GSS_S_COMPLETE; 38714ae162cSJ. Bruce Fields } 3881ac3719aSKevin Coffman 389de9c17ebSKevin Coffman /* 390c52226daSJ. Bruce Fields * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need 391c52226daSJ. Bruce Fields * to do more than that, we shift repeatedly. Kevin Coffman reports 392c52226daSJ. Bruce Fields * seeing 28 bytes as the value used by Microsoft clients and servers 393c52226daSJ. Bruce Fields * with AES, so this constant is chosen to allow handling 28 in one pass 394c52226daSJ. Bruce Fields * without using too much stack space. 395c52226daSJ. Bruce Fields * 396c52226daSJ. Bruce Fields * If that proves to a problem perhaps we could use a more clever 397c52226daSJ. Bruce Fields * algorithm. 398de9c17ebSKevin Coffman */ 399c52226daSJ. Bruce Fields #define LOCAL_BUF_LEN 32u 400c52226daSJ. Bruce Fields 401c52226daSJ. Bruce Fields static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift) 402de9c17ebSKevin Coffman { 403c52226daSJ. Bruce Fields char head[LOCAL_BUF_LEN]; 404c52226daSJ. Bruce Fields char tmp[LOCAL_BUF_LEN]; 405c52226daSJ. Bruce Fields unsigned int this_len, i; 406de9c17ebSKevin Coffman 407c52226daSJ. Bruce Fields BUG_ON(shift > LOCAL_BUF_LEN); 408de9c17ebSKevin Coffman 409c52226daSJ. Bruce Fields read_bytes_from_xdr_buf(buf, 0, head, shift); 410c52226daSJ. Bruce Fields for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) { 411c52226daSJ. Bruce Fields this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift)); 412c52226daSJ. Bruce Fields read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len); 413c52226daSJ. Bruce Fields write_bytes_to_xdr_buf(buf, i, tmp, this_len); 414c52226daSJ. Bruce Fields } 415c52226daSJ. Bruce Fields write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); 416c52226daSJ. Bruce Fields } 417c52226daSJ. Bruce Fields 418c52226daSJ. Bruce Fields static void _rotate_left(struct xdr_buf *buf, unsigned int shift) 419c52226daSJ. Bruce Fields { 420c52226daSJ. Bruce Fields int shifted = 0; 421c52226daSJ. Bruce Fields int this_shift; 422c52226daSJ. Bruce Fields 423c52226daSJ. Bruce Fields shift %= buf->len; 424c52226daSJ. Bruce Fields while (shifted < shift) { 425c52226daSJ. Bruce Fields this_shift = min(shift - shifted, LOCAL_BUF_LEN); 426c52226daSJ. Bruce Fields rotate_buf_a_little(buf, this_shift); 427c52226daSJ. Bruce Fields shifted += this_shift; 428c52226daSJ. Bruce Fields } 429c52226daSJ. Bruce Fields } 430c52226daSJ. Bruce Fields 431c52226daSJ. Bruce Fields static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift) 432c52226daSJ. Bruce Fields { 433c52226daSJ. Bruce Fields struct xdr_buf subbuf; 434c52226daSJ. Bruce Fields 435c52226daSJ. Bruce Fields xdr_buf_subsegment(buf, &subbuf, base, buf->len - base); 436c52226daSJ. Bruce Fields _rotate_left(&subbuf, shift); 437de9c17ebSKevin Coffman } 438de9c17ebSKevin Coffman 439de9c17ebSKevin Coffman static u32 440de9c17ebSKevin Coffman gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, 441de9c17ebSKevin Coffman struct xdr_buf *buf, struct page **pages) 442de9c17ebSKevin Coffman { 443de9c17ebSKevin Coffman int blocksize; 444de9c17ebSKevin Coffman u8 *ptr, *plainhdr; 445de9c17ebSKevin Coffman s32 now; 446de9c17ebSKevin Coffman u8 flags = 0x00; 447b36e9c44SJeff Layton __be16 *be16ptr; 448de9c17ebSKevin Coffman __be64 *be64ptr; 449de9c17ebSKevin Coffman u32 err; 450de9c17ebSKevin Coffman 451de9c17ebSKevin Coffman dprintk("RPC: %s\n", __func__); 452de9c17ebSKevin Coffman 453de9c17ebSKevin Coffman if (kctx->gk5e->encrypt_v2 == NULL) 454de9c17ebSKevin Coffman return GSS_S_FAILURE; 455de9c17ebSKevin Coffman 456de9c17ebSKevin Coffman /* make room for gss token header */ 457de9c17ebSKevin Coffman if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) 458de9c17ebSKevin Coffman return GSS_S_FAILURE; 459de9c17ebSKevin Coffman 460de9c17ebSKevin Coffman /* construct gss token header */ 461de9c17ebSKevin Coffman ptr = plainhdr = buf->head[0].iov_base + offset; 462de9c17ebSKevin Coffman *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); 463de9c17ebSKevin Coffman *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); 464de9c17ebSKevin Coffman 465de9c17ebSKevin Coffman if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) 466de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; 467de9c17ebSKevin Coffman if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) 468de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; 469de9c17ebSKevin Coffman /* We always do confidentiality in wrap tokens */ 470de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_SEALED; 471de9c17ebSKevin Coffman 472de9c17ebSKevin Coffman *ptr++ = flags; 473de9c17ebSKevin Coffman *ptr++ = 0xff; 474de9c17ebSKevin Coffman be16ptr = (__be16 *)ptr; 475de9c17ebSKevin Coffman 476de9c17ebSKevin Coffman blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); 477b36e9c44SJeff Layton *be16ptr++ = 0; 478de9c17ebSKevin Coffman /* "inner" token header always uses 0 for RRC */ 479b36e9c44SJeff Layton *be16ptr++ = 0; 480de9c17ebSKevin Coffman 481de9c17ebSKevin Coffman be64ptr = (__be64 *)be16ptr; 482de9c17ebSKevin Coffman spin_lock(&krb5_seq_lock); 483de9c17ebSKevin Coffman *be64ptr = cpu_to_be64(kctx->seq_send64++); 484de9c17ebSKevin Coffman spin_unlock(&krb5_seq_lock); 485de9c17ebSKevin Coffman 486ec25422cSJeff Layton err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages); 487de9c17ebSKevin Coffman if (err) 488de9c17ebSKevin Coffman return err; 489de9c17ebSKevin Coffman 490de9c17ebSKevin Coffman now = get_seconds(); 491de9c17ebSKevin Coffman return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 492de9c17ebSKevin Coffman } 493de9c17ebSKevin Coffman 494de9c17ebSKevin Coffman static u32 495de9c17ebSKevin Coffman gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) 496de9c17ebSKevin Coffman { 497de9c17ebSKevin Coffman s32 now; 498de9c17ebSKevin Coffman u8 *ptr; 499de9c17ebSKevin Coffman u8 flags = 0x00; 500de9c17ebSKevin Coffman u16 ec, rrc; 501de9c17ebSKevin Coffman int err; 502de9c17ebSKevin Coffman u32 headskip, tailskip; 503de9c17ebSKevin Coffman u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; 504de9c17ebSKevin Coffman unsigned int movelen; 505de9c17ebSKevin Coffman 506de9c17ebSKevin Coffman 507de9c17ebSKevin Coffman dprintk("RPC: %s\n", __func__); 508de9c17ebSKevin Coffman 509de9c17ebSKevin Coffman if (kctx->gk5e->decrypt_v2 == NULL) 510de9c17ebSKevin Coffman return GSS_S_FAILURE; 511de9c17ebSKevin Coffman 512de9c17ebSKevin Coffman ptr = buf->head[0].iov_base + offset; 513de9c17ebSKevin Coffman 514de9c17ebSKevin Coffman if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) 515de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 516de9c17ebSKevin Coffman 517de9c17ebSKevin Coffman flags = ptr[2]; 518de9c17ebSKevin Coffman if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || 519de9c17ebSKevin Coffman (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) 520de9c17ebSKevin Coffman return GSS_S_BAD_SIG; 521de9c17ebSKevin Coffman 522de9c17ebSKevin Coffman if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { 523de9c17ebSKevin Coffman dprintk("%s: token missing expected sealed flag\n", __func__); 524de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 525de9c17ebSKevin Coffman } 526de9c17ebSKevin Coffman 527de9c17ebSKevin Coffman if (ptr[3] != 0xff) 528de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 529de9c17ebSKevin Coffman 530de9c17ebSKevin Coffman ec = be16_to_cpup((__be16 *)(ptr + 4)); 531de9c17ebSKevin Coffman rrc = be16_to_cpup((__be16 *)(ptr + 6)); 532de9c17ebSKevin Coffman 5335d6baef9SJ. Bruce Fields /* 5345d6baef9SJ. Bruce Fields * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss 5355d6baef9SJ. Bruce Fields * doesn't want it checked; see page 6 of rfc 2203. 5365d6baef9SJ. Bruce Fields */ 537de9c17ebSKevin Coffman 538c52226daSJ. Bruce Fields if (rrc != 0) 539c52226daSJ. Bruce Fields rotate_left(offset + 16, buf, rrc); 540de9c17ebSKevin Coffman 541de9c17ebSKevin Coffman err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, 542de9c17ebSKevin Coffman &headskip, &tailskip); 543de9c17ebSKevin Coffman if (err) 544de9c17ebSKevin Coffman return GSS_S_FAILURE; 545de9c17ebSKevin Coffman 546de9c17ebSKevin Coffman /* 547de9c17ebSKevin Coffman * Retrieve the decrypted gss token header and verify 548de9c17ebSKevin Coffman * it against the original 549de9c17ebSKevin Coffman */ 550de9c17ebSKevin Coffman err = read_bytes_from_xdr_buf(buf, 551de9c17ebSKevin Coffman buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, 552de9c17ebSKevin Coffman decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); 553de9c17ebSKevin Coffman if (err) { 554de9c17ebSKevin Coffman dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); 555de9c17ebSKevin Coffman return GSS_S_FAILURE; 556de9c17ebSKevin Coffman } 557de9c17ebSKevin Coffman if (memcmp(ptr, decrypted_hdr, 6) 558de9c17ebSKevin Coffman || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { 559de9c17ebSKevin Coffman dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); 560de9c17ebSKevin Coffman return GSS_S_FAILURE; 561de9c17ebSKevin Coffman } 562de9c17ebSKevin Coffman 563de9c17ebSKevin Coffman /* do sequencing checks */ 564de9c17ebSKevin Coffman 565de9c17ebSKevin Coffman /* it got through unscathed. Make sure the context is unexpired */ 566de9c17ebSKevin Coffman now = get_seconds(); 567de9c17ebSKevin Coffman if (now > kctx->endtime) 568de9c17ebSKevin Coffman return GSS_S_CONTEXT_EXPIRED; 569de9c17ebSKevin Coffman 570de9c17ebSKevin Coffman /* 571de9c17ebSKevin Coffman * Move the head data back to the right position in xdr_buf. 572de9c17ebSKevin Coffman * We ignore any "ec" data since it might be in the head or 573de9c17ebSKevin Coffman * the tail, and we really don't need to deal with it. 574de9c17ebSKevin Coffman * Note that buf->head[0].iov_len may indicate the available 575de9c17ebSKevin Coffman * head buffer space rather than that actually occupied. 576de9c17ebSKevin Coffman */ 577de9c17ebSKevin Coffman movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); 578de9c17ebSKevin Coffman movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; 579de9c17ebSKevin Coffman BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > 580de9c17ebSKevin Coffman buf->head[0].iov_len); 581de9c17ebSKevin Coffman memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); 582de9c17ebSKevin Coffman buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; 583de9c17ebSKevin Coffman buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; 584de9c17ebSKevin Coffman 585cf4c024bSJeff Layton /* Trim off the trailing "extra count" and checksum blob */ 586cf4c024bSJeff Layton xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip); 587de9c17ebSKevin Coffman return GSS_S_COMPLETE; 588de9c17ebSKevin Coffman } 589de9c17ebSKevin Coffman 5901ac3719aSKevin Coffman u32 5911ac3719aSKevin Coffman gss_wrap_kerberos(struct gss_ctx *gctx, int offset, 5921ac3719aSKevin Coffman struct xdr_buf *buf, struct page **pages) 5931ac3719aSKevin Coffman { 5941ac3719aSKevin Coffman struct krb5_ctx *kctx = gctx->internal_ctx_id; 5951ac3719aSKevin Coffman 5961ac3719aSKevin Coffman switch (kctx->enctype) { 5971ac3719aSKevin Coffman default: 5981ac3719aSKevin Coffman BUG(); 5991ac3719aSKevin Coffman case ENCTYPE_DES_CBC_RAW: 600958142e9SKevin Coffman case ENCTYPE_DES3_CBC_RAW: 601fffdaef2SKevin Coffman case ENCTYPE_ARCFOUR_HMAC: 6021ac3719aSKevin Coffman return gss_wrap_kerberos_v1(kctx, offset, buf, pages); 603de9c17ebSKevin Coffman case ENCTYPE_AES128_CTS_HMAC_SHA1_96: 604de9c17ebSKevin Coffman case ENCTYPE_AES256_CTS_HMAC_SHA1_96: 605de9c17ebSKevin Coffman return gss_wrap_kerberos_v2(kctx, offset, buf, pages); 6061ac3719aSKevin Coffman } 6071ac3719aSKevin Coffman } 6081ac3719aSKevin Coffman 6091ac3719aSKevin Coffman u32 6101ac3719aSKevin Coffman gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) 6111ac3719aSKevin Coffman { 6121ac3719aSKevin Coffman struct krb5_ctx *kctx = gctx->internal_ctx_id; 6131ac3719aSKevin Coffman 6141ac3719aSKevin Coffman switch (kctx->enctype) { 6151ac3719aSKevin Coffman default: 6161ac3719aSKevin Coffman BUG(); 6171ac3719aSKevin Coffman case ENCTYPE_DES_CBC_RAW: 618958142e9SKevin Coffman case ENCTYPE_DES3_CBC_RAW: 619fffdaef2SKevin Coffman case ENCTYPE_ARCFOUR_HMAC: 6201ac3719aSKevin Coffman return gss_unwrap_kerberos_v1(kctx, offset, buf); 621de9c17ebSKevin Coffman case ENCTYPE_AES128_CTS_HMAC_SHA1_96: 622de9c17ebSKevin Coffman case ENCTYPE_AES256_CTS_HMAC_SHA1_96: 623de9c17ebSKevin Coffman return gss_unwrap_kerberos_v2(kctx, offset, buf); 6241ac3719aSKevin Coffman } 6251ac3719aSKevin Coffman } 6261ac3719aSKevin Coffman 627