181d4a433SKevin Coffman /* 281d4a433SKevin Coffman * COPYRIGHT (c) 2008 381d4a433SKevin Coffman * The Regents of the University of Michigan 481d4a433SKevin Coffman * ALL RIGHTS RESERVED 581d4a433SKevin Coffman * 681d4a433SKevin Coffman * Permission is granted to use, copy, create derivative works 781d4a433SKevin Coffman * and redistribute this software and such derivative works 881d4a433SKevin Coffman * for any purpose, so long as the name of The University of 981d4a433SKevin Coffman * Michigan is not used in any advertising or publicity 1081d4a433SKevin Coffman * pertaining to the use of distribution of this software 1181d4a433SKevin Coffman * without specific, written prior authorization. If the 1281d4a433SKevin Coffman * above copyright notice or any other identification of the 1381d4a433SKevin Coffman * University of Michigan is included in any copy of any 1481d4a433SKevin Coffman * portion of this software, then the disclaimer below must 1581d4a433SKevin Coffman * also be included. 1681d4a433SKevin Coffman * 1781d4a433SKevin Coffman * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION 1881d4a433SKevin Coffman * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY 1981d4a433SKevin Coffman * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF 2081d4a433SKevin Coffman * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING 2181d4a433SKevin Coffman * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF 2281d4a433SKevin Coffman * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE 2381d4a433SKevin Coffman * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE 2481d4a433SKevin Coffman * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR 2581d4a433SKevin Coffman * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING 2681d4a433SKevin Coffman * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN 2781d4a433SKevin Coffman * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF 2881d4a433SKevin Coffman * SUCH DAMAGES. 2981d4a433SKevin Coffman */ 3081d4a433SKevin Coffman 313b5cf20cSHerbert Xu #include <crypto/skcipher.h> 3214ae162cSJ. Bruce Fields #include <linux/types.h> 3314ae162cSJ. Bruce Fields #include <linux/jiffies.h> 3414ae162cSJ. Bruce Fields #include <linux/sunrpc/gss_krb5.h> 3514ae162cSJ. Bruce Fields #include <linux/pagemap.h> 3614ae162cSJ. Bruce Fields 377f675ca7SChuck Lever #include "gss_krb5_internal.h" 387f675ca7SChuck Lever 39f895b252SJeff Layton #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) 4014ae162cSJ. Bruce Fields # define RPCDBG_FACILITY RPCDBG_AUTH 4114ae162cSJ. Bruce Fields #endif 4214ae162cSJ. Bruce Fields 43dfe9a123SChuck Lever #if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED) 44dfe9a123SChuck Lever 4514ae162cSJ. Bruce Fields static inline int 4614ae162cSJ. Bruce Fields gss_krb5_padding(int blocksize, int length) 4714ae162cSJ. Bruce Fields { 4854ec3d46SJ. Bruce Fields return blocksize - (length % blocksize); 4914ae162cSJ. Bruce Fields } 5014ae162cSJ. Bruce Fields 5114ae162cSJ. Bruce Fields static inline void 5214ae162cSJ. Bruce Fields gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 5314ae162cSJ. Bruce Fields { 5414ae162cSJ. Bruce Fields int padding = gss_krb5_padding(blocksize, buf->len - offset); 5514ae162cSJ. Bruce Fields char *p; 5614ae162cSJ. Bruce Fields struct kvec *iov; 5714ae162cSJ. Bruce Fields 5814ae162cSJ. Bruce Fields if (buf->page_len || buf->tail[0].iov_len) 5914ae162cSJ. Bruce Fields iov = &buf->tail[0]; 6014ae162cSJ. Bruce Fields else 6114ae162cSJ. Bruce Fields iov = &buf->head[0]; 6214ae162cSJ. Bruce Fields p = iov->iov_base + iov->iov_len; 6314ae162cSJ. Bruce Fields iov->iov_len += padding; 6414ae162cSJ. Bruce Fields buf->len += padding; 6514ae162cSJ. Bruce Fields memset(p, padding, padding); 6614ae162cSJ. Bruce Fields } 6714ae162cSJ. Bruce Fields 6814ae162cSJ. Bruce Fields static inline int 6914ae162cSJ. Bruce Fields gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 7014ae162cSJ. Bruce Fields { 7114ae162cSJ. Bruce Fields u8 *ptr; 7214ae162cSJ. Bruce Fields u8 pad; 7367f97d83SChuck Lever size_t len = buf->len; 7414ae162cSJ. Bruce Fields 7514ae162cSJ. Bruce Fields if (len <= buf->head[0].iov_len) { 7614ae162cSJ. Bruce Fields pad = *(u8 *)(buf->head[0].iov_base + len - 1); 7714ae162cSJ. Bruce Fields if (pad > buf->head[0].iov_len) 7814ae162cSJ. Bruce Fields return -EINVAL; 7914ae162cSJ. Bruce Fields buf->head[0].iov_len -= pad; 8014ae162cSJ. Bruce Fields goto out; 8114ae162cSJ. Bruce Fields } else 8214ae162cSJ. Bruce Fields len -= buf->head[0].iov_len; 8314ae162cSJ. Bruce Fields if (len <= buf->page_len) { 8467f97d83SChuck Lever unsigned int last = (buf->page_base + len - 1) 8509cbfeafSKirill A. Shutemov >>PAGE_SHIFT; 8667f97d83SChuck Lever unsigned int offset = (buf->page_base + len - 1) 8709cbfeafSKirill A. Shutemov & (PAGE_SIZE - 1); 88b8541786SCong Wang ptr = kmap_atomic(buf->pages[last]); 8914ae162cSJ. Bruce Fields pad = *(ptr + offset); 90b8541786SCong Wang kunmap_atomic(ptr); 9114ae162cSJ. Bruce Fields goto out; 9214ae162cSJ. Bruce Fields } else 9314ae162cSJ. Bruce Fields len -= buf->page_len; 9414ae162cSJ. Bruce Fields BUG_ON(len > buf->tail[0].iov_len); 9514ae162cSJ. Bruce Fields pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 9614ae162cSJ. Bruce Fields out: 9714ae162cSJ. Bruce Fields /* XXX: NOTE: we do not adjust the page lengths--they represent 9814ae162cSJ. Bruce Fields * a range of data in the real filesystem page cache, and we need 9914ae162cSJ. Bruce Fields * to know that range so the xdr code can properly place read data. 10014ae162cSJ. Bruce Fields * However adjusting the head length, as we do above, is harmless. 10114ae162cSJ. Bruce Fields * In the case of a request that fits into a single page, the server 10214ae162cSJ. Bruce Fields * also uses length and head length together to determine the original 10314ae162cSJ. Bruce Fields * start of the request to copy the request for deferal; so it's 10414ae162cSJ. Bruce Fields * easier on the server if we adjust head and tail length in tandem. 10514ae162cSJ. Bruce Fields * It's not really a problem that we don't fool with the page and 10614ae162cSJ. Bruce Fields * tail lengths, though--at worst badly formed xdr might lead the 10714ae162cSJ. Bruce Fields * server to attempt to parse the padding. 10814ae162cSJ. Bruce Fields * XXX: Document all these weird requirements for gss mechanism 10914ae162cSJ. Bruce Fields * wrap/unwrap functions. */ 11014ae162cSJ. Bruce Fields if (pad > blocksize) 11114ae162cSJ. Bruce Fields return -EINVAL; 11214ae162cSJ. Bruce Fields if (buf->len > pad) 11314ae162cSJ. Bruce Fields buf->len -= pad; 11414ae162cSJ. Bruce Fields else 11514ae162cSJ. Bruce Fields return -EINVAL; 11614ae162cSJ. Bruce Fields return 0; 11714ae162cSJ. Bruce Fields } 11814ae162cSJ. Bruce Fields 11914ae162cSJ. Bruce Fields /* Assumptions: the head and tail of inbuf are ours to play with. 12014ae162cSJ. Bruce Fields * The pages, however, may be real pages in the page cache and we replace 12114ae162cSJ. Bruce Fields * them with scratch pages from **pages before writing to them. */ 12214ae162cSJ. Bruce Fields /* XXX: obviously the above should be documentation of wrap interface, 12314ae162cSJ. Bruce Fields * and shouldn't be in this kerberos-specific file. */ 12414ae162cSJ. Bruce Fields 12514ae162cSJ. Bruce Fields /* XXX factor out common code with seal/unseal. */ 12614ae162cSJ. Bruce Fields 127e01b2c79SChuck Lever u32 128e01b2c79SChuck Lever gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset, 12914ae162cSJ. Bruce Fields struct xdr_buf *buf, struct page **pages) 13014ae162cSJ. Bruce Fields { 13181d4a433SKevin Coffman char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 13281d4a433SKevin Coffman struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 13381d4a433SKevin Coffman .data = cksumdata}; 13414ae162cSJ. Bruce Fields int blocksize = 0, plainlen; 135d00953a5SKevin Coffman unsigned char *ptr, *msg_start; 136294ec5b8SArnd Bergmann time64_t now; 13714ae162cSJ. Bruce Fields int headlen; 13814ae162cSJ. Bruce Fields struct page **tmp_pages; 139eaa82edfSJ. Bruce Fields u32 seq_send; 140e1f6c07bSKevin Coffman u8 *cksumkey; 1414be416a5SChuck Lever u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc); 14214ae162cSJ. Bruce Fields 14381d4a433SKevin Coffman dprintk("RPC: %s\n", __func__); 14414ae162cSJ. Bruce Fields 145294ec5b8SArnd Bergmann now = ktime_get_real_seconds(); 14614ae162cSJ. Bruce Fields 147e9e575b8SKees Cook blocksize = crypto_sync_skcipher_blocksize(kctx->enc); 14814ae162cSJ. Bruce Fields gss_krb5_add_padding(buf, offset, blocksize); 14914ae162cSJ. Bruce Fields BUG_ON((buf->len - offset) % blocksize); 1505af46547SKevin Coffman plainlen = conflen + buf->len - offset; 15114ae162cSJ. Bruce Fields 15281d4a433SKevin Coffman headlen = g_token_size(&kctx->mech_used, 15381d4a433SKevin Coffman GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - 15414ae162cSJ. Bruce Fields (buf->len - offset); 15514ae162cSJ. Bruce Fields 15614ae162cSJ. Bruce Fields ptr = buf->head[0].iov_base + offset; 15714ae162cSJ. Bruce Fields /* shift data to make room for header. */ 158725f2865SKevin Coffman xdr_extend_head(buf, offset, headlen); 159725f2865SKevin Coffman 16014ae162cSJ. Bruce Fields /* XXX Would be cleverer to encrypt while copying. */ 16114ae162cSJ. Bruce Fields BUG_ON((buf->len - offset - headlen) % blocksize); 16214ae162cSJ. Bruce Fields 163d00953a5SKevin Coffman g_make_token_header(&kctx->mech_used, 16481d4a433SKevin Coffman GSS_KRB5_TOK_HDR_LEN + 16581d4a433SKevin Coffman kctx->gk5e->cksumlength + plainlen, &ptr); 16614ae162cSJ. Bruce Fields 16714ae162cSJ. Bruce Fields 168d00953a5SKevin Coffman /* ptr now at header described in rfc 1964, section 1.2.1: */ 169d00953a5SKevin Coffman ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 170d00953a5SKevin Coffman ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 17114ae162cSJ. Bruce Fields 17281d4a433SKevin Coffman msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; 17314ae162cSJ. Bruce Fields 174b36e9c44SJeff Layton /* 175b36e9c44SJeff Layton * signalg and sealalg are stored as if they were converted from LE 176b36e9c44SJeff Layton * to host endian, even though they're opaque pairs of bytes according 177b36e9c44SJeff Layton * to the RFC. 178b36e9c44SJeff Layton */ 179b36e9c44SJeff Layton *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); 180b36e9c44SJeff Layton *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); 181b36e9c44SJeff Layton ptr[6] = 0xff; 182b36e9c44SJeff Layton ptr[7] = 0xff; 18314ae162cSJ. Bruce Fields 1847f675ca7SChuck Lever krb5_make_confounder(msg_start, conflen); 18514ae162cSJ. Bruce Fields 186e1f6c07bSKevin Coffman if (kctx->gk5e->keyed_cksum) 187e1f6c07bSKevin Coffman cksumkey = kctx->cksum; 188e1f6c07bSKevin Coffman else 189e1f6c07bSKevin Coffman cksumkey = NULL; 190e1f6c07bSKevin Coffman 19114ae162cSJ. Bruce Fields /* XXXJBF: UGH!: */ 19214ae162cSJ. Bruce Fields tmp_pages = buf->pages; 19314ae162cSJ. Bruce Fields buf->pages = pages; 1945af46547SKevin Coffman if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, 1958b237076SKevin Coffman cksumkey, KG_USAGE_SEAL, &md5cksum)) 19639a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 19714ae162cSJ. Bruce Fields buf->pages = tmp_pages; 19814ae162cSJ. Bruce Fields 199e1f6c07bSKevin Coffman memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 20014ae162cSJ. Bruce Fields 201c3be6577SPaul Burton seq_send = atomic_fetch_inc(&kctx->seq_send); 202eaa82edfSJ. Bruce Fields 20314ae162cSJ. Bruce Fields /* XXX would probably be more efficient to compute checksum 20414ae162cSJ. Bruce Fields * and encrypt at the same time: */ 2051dbd9029SKevin Coffman if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, 206d00953a5SKevin Coffman seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 20739a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 20814ae162cSJ. Bruce Fields 209fffdaef2SKevin Coffman if (gss_encrypt_xdr_buf(kctx->enc, buf, 210fffdaef2SKevin Coffman offset + headlen - conflen, pages)) 211fffdaef2SKevin Coffman return GSS_S_FAILURE; 212fffdaef2SKevin Coffman 21394efa934SJ. Bruce Fields return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 21414ae162cSJ. Bruce Fields } 21514ae162cSJ. Bruce Fields 216e01b2c79SChuck Lever u32 217e01b2c79SChuck Lever gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len, 218a7e429a6SChuck Lever struct xdr_buf *buf, unsigned int *slack, 219a7e429a6SChuck Lever unsigned int *align) 22014ae162cSJ. Bruce Fields { 22114ae162cSJ. Bruce Fields int signalg; 22214ae162cSJ. Bruce Fields int sealalg; 22381d4a433SKevin Coffman char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 22481d4a433SKevin Coffman struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 22581d4a433SKevin Coffman .data = cksumdata}; 226294ec5b8SArnd Bergmann time64_t now; 22714ae162cSJ. Bruce Fields int direction; 22814ae162cSJ. Bruce Fields s32 seqnum; 22914ae162cSJ. Bruce Fields unsigned char *ptr; 23014ae162cSJ. Bruce Fields int bodysize; 23114ae162cSJ. Bruce Fields void *data_start, *orig_start; 23214ae162cSJ. Bruce Fields int data_len; 23314ae162cSJ. Bruce Fields int blocksize; 2344be416a5SChuck Lever u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc); 23581d4a433SKevin Coffman int crypt_offset; 236e1f6c07bSKevin Coffman u8 *cksumkey; 237a7e429a6SChuck Lever unsigned int saved_len = buf->len; 23814ae162cSJ. Bruce Fields 23914ae162cSJ. Bruce Fields dprintk("RPC: gss_unwrap_kerberos\n"); 24014ae162cSJ. Bruce Fields 24114ae162cSJ. Bruce Fields ptr = (u8 *)buf->head[0].iov_base + offset; 24214ae162cSJ. Bruce Fields if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 24331c9590aSChuck Lever len - offset)) 24439a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 24514ae162cSJ. Bruce Fields 246d00953a5SKevin Coffman if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || 247d00953a5SKevin Coffman (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) 24839a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 24914ae162cSJ. Bruce Fields 25014ae162cSJ. Bruce Fields /* XXX sanity-check bodysize?? */ 25114ae162cSJ. Bruce Fields 25214ae162cSJ. Bruce Fields /* get the sign and seal algorithms */ 25314ae162cSJ. Bruce Fields 254d00953a5SKevin Coffman signalg = ptr[2] + (ptr[3] << 8); 25581d4a433SKevin Coffman if (signalg != kctx->gk5e->signalg) 25639a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 25714ae162cSJ. Bruce Fields 258d00953a5SKevin Coffman sealalg = ptr[4] + (ptr[5] << 8); 25981d4a433SKevin Coffman if (sealalg != kctx->gk5e->sealalg) 26039a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 26194efa934SJ. Bruce Fields 262d00953a5SKevin Coffman if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 26339a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 26414ae162cSJ. Bruce Fields 26581d4a433SKevin Coffman /* 26681d4a433SKevin Coffman * Data starts after token header and checksum. ptr points 26781d4a433SKevin Coffman * to the beginning of the token header 26881d4a433SKevin Coffman */ 26981d4a433SKevin Coffman crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - 27081d4a433SKevin Coffman (unsigned char *)buf->head[0].iov_base; 271fffdaef2SKevin Coffman 27231c9590aSChuck Lever buf->len = len; 27381d4a433SKevin Coffman if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) 27439a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 27514ae162cSJ. Bruce Fields 276e1f6c07bSKevin Coffman if (kctx->gk5e->keyed_cksum) 277e1f6c07bSKevin Coffman cksumkey = kctx->cksum; 278e1f6c07bSKevin Coffman else 279e1f6c07bSKevin Coffman cksumkey = NULL; 280e1f6c07bSKevin Coffman 281e1f6c07bSKevin Coffman if (make_checksum(kctx, ptr, 8, buf, crypt_offset, 2828b237076SKevin Coffman cksumkey, KG_USAGE_SEAL, &md5cksum)) 28339a21dd1SJ. Bruce Fields return GSS_S_FAILURE; 28414ae162cSJ. Bruce Fields 285e1f6c07bSKevin Coffman if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, 286e1f6c07bSKevin Coffman kctx->gk5e->cksumlength)) 28739a21dd1SJ. Bruce Fields return GSS_S_BAD_SIG; 28814ae162cSJ. Bruce Fields 28914ae162cSJ. Bruce Fields /* it got through unscathed. Make sure the context is unexpired */ 29014ae162cSJ. Bruce Fields 291294ec5b8SArnd Bergmann now = ktime_get_real_seconds(); 29214ae162cSJ. Bruce Fields 29314ae162cSJ. Bruce Fields if (now > kctx->endtime) 29439a21dd1SJ. Bruce Fields return GSS_S_CONTEXT_EXPIRED; 29514ae162cSJ. Bruce Fields 29614ae162cSJ. Bruce Fields /* do sequencing checks */ 29714ae162cSJ. Bruce Fields 298e33d2a7bSArd Biesheuvel if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, 299e33d2a7bSArd Biesheuvel ptr + 8, &direction, &seqnum)) 300e33d2a7bSArd Biesheuvel return GSS_S_BAD_SIG; 301e33d2a7bSArd Biesheuvel 302e33d2a7bSArd Biesheuvel if ((kctx->initiate && direction != 0xff) || 303e33d2a7bSArd Biesheuvel (!kctx->initiate && direction != 0)) 304e33d2a7bSArd Biesheuvel return GSS_S_BAD_SIG; 305e33d2a7bSArd Biesheuvel 30614ae162cSJ. Bruce Fields /* Copy the data back to the right position. XXX: Would probably be 30714ae162cSJ. Bruce Fields * better to copy and encrypt at the same time. */ 30814ae162cSJ. Bruce Fields 309e9e575b8SKees Cook blocksize = crypto_sync_skcipher_blocksize(kctx->enc); 31081d4a433SKevin Coffman data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 3115af46547SKevin Coffman conflen; 31214ae162cSJ. Bruce Fields orig_start = buf->head[0].iov_base + offset; 31314ae162cSJ. Bruce Fields data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 31414ae162cSJ. Bruce Fields memmove(orig_start, data_start, data_len); 31514ae162cSJ. Bruce Fields buf->head[0].iov_len -= (data_start - orig_start); 31631c9590aSChuck Lever buf->len = len - (data_start - orig_start); 31714ae162cSJ. Bruce Fields 31814ae162cSJ. Bruce Fields if (gss_krb5_remove_padding(buf, blocksize)) 31939a21dd1SJ. Bruce Fields return GSS_S_DEFECTIVE_TOKEN; 32014ae162cSJ. Bruce Fields 321a7e429a6SChuck Lever /* slack must include room for krb5 padding */ 322a7e429a6SChuck Lever *slack = XDR_QUADLEN(saved_len - buf->len); 323a7e429a6SChuck Lever /* The GSS blob always precedes the RPC message payload */ 324a7e429a6SChuck Lever *align = *slack; 32539a21dd1SJ. Bruce Fields return GSS_S_COMPLETE; 32614ae162cSJ. Bruce Fields } 3271ac3719aSKevin Coffman 328dfe9a123SChuck Lever #endif 329dfe9a123SChuck Lever 330de9c17ebSKevin Coffman /* 331c52226daSJ. Bruce Fields * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need 332c52226daSJ. Bruce Fields * to do more than that, we shift repeatedly. Kevin Coffman reports 333c52226daSJ. Bruce Fields * seeing 28 bytes as the value used by Microsoft clients and servers 334c52226daSJ. Bruce Fields * with AES, so this constant is chosen to allow handling 28 in one pass 335c52226daSJ. Bruce Fields * without using too much stack space. 336c52226daSJ. Bruce Fields * 337c52226daSJ. Bruce Fields * If that proves to a problem perhaps we could use a more clever 338c52226daSJ. Bruce Fields * algorithm. 339de9c17ebSKevin Coffman */ 340c52226daSJ. Bruce Fields #define LOCAL_BUF_LEN 32u 341c52226daSJ. Bruce Fields 342c52226daSJ. Bruce Fields static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift) 343de9c17ebSKevin Coffman { 344c52226daSJ. Bruce Fields char head[LOCAL_BUF_LEN]; 345c52226daSJ. Bruce Fields char tmp[LOCAL_BUF_LEN]; 346c52226daSJ. Bruce Fields unsigned int this_len, i; 347de9c17ebSKevin Coffman 348c52226daSJ. Bruce Fields BUG_ON(shift > LOCAL_BUF_LEN); 349de9c17ebSKevin Coffman 350c52226daSJ. Bruce Fields read_bytes_from_xdr_buf(buf, 0, head, shift); 351c52226daSJ. Bruce Fields for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) { 352c52226daSJ. Bruce Fields this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift)); 353c52226daSJ. Bruce Fields read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len); 354c52226daSJ. Bruce Fields write_bytes_to_xdr_buf(buf, i, tmp, this_len); 355c52226daSJ. Bruce Fields } 356c52226daSJ. Bruce Fields write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift); 357c52226daSJ. Bruce Fields } 358c52226daSJ. Bruce Fields 359c52226daSJ. Bruce Fields static void _rotate_left(struct xdr_buf *buf, unsigned int shift) 360c52226daSJ. Bruce Fields { 361c52226daSJ. Bruce Fields int shifted = 0; 362c52226daSJ. Bruce Fields int this_shift; 363c52226daSJ. Bruce Fields 364c52226daSJ. Bruce Fields shift %= buf->len; 365c52226daSJ. Bruce Fields while (shifted < shift) { 366c52226daSJ. Bruce Fields this_shift = min(shift - shifted, LOCAL_BUF_LEN); 367c52226daSJ. Bruce Fields rotate_buf_a_little(buf, this_shift); 368c52226daSJ. Bruce Fields shifted += this_shift; 369c52226daSJ. Bruce Fields } 370c52226daSJ. Bruce Fields } 371c52226daSJ. Bruce Fields 372c52226daSJ. Bruce Fields static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift) 373c52226daSJ. Bruce Fields { 374c52226daSJ. Bruce Fields struct xdr_buf subbuf; 375c52226daSJ. Bruce Fields 376c52226daSJ. Bruce Fields xdr_buf_subsegment(buf, &subbuf, base, buf->len - base); 377c52226daSJ. Bruce Fields _rotate_left(&subbuf, shift); 378de9c17ebSKevin Coffman } 379de9c17ebSKevin Coffman 380e01b2c79SChuck Lever u32 381e01b2c79SChuck Lever gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset, 382de9c17ebSKevin Coffman struct xdr_buf *buf, struct page **pages) 383de9c17ebSKevin Coffman { 384ab22e2cbSColin Ian King u8 *ptr; 385294ec5b8SArnd Bergmann time64_t now; 386de9c17ebSKevin Coffman u8 flags = 0x00; 387b36e9c44SJeff Layton __be16 *be16ptr; 388de9c17ebSKevin Coffman __be64 *be64ptr; 389de9c17ebSKevin Coffman u32 err; 390de9c17ebSKevin Coffman 391de9c17ebSKevin Coffman dprintk("RPC: %s\n", __func__); 392de9c17ebSKevin Coffman 393de9c17ebSKevin Coffman /* make room for gss token header */ 394de9c17ebSKevin Coffman if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) 395de9c17ebSKevin Coffman return GSS_S_FAILURE; 396de9c17ebSKevin Coffman 397de9c17ebSKevin Coffman /* construct gss token header */ 398ab22e2cbSColin Ian King ptr = buf->head[0].iov_base + offset; 399de9c17ebSKevin Coffman *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); 400de9c17ebSKevin Coffman *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); 401de9c17ebSKevin Coffman 402de9c17ebSKevin Coffman if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) 403de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; 404de9c17ebSKevin Coffman if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) 405de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; 406de9c17ebSKevin Coffman /* We always do confidentiality in wrap tokens */ 407de9c17ebSKevin Coffman flags |= KG2_TOKEN_FLAG_SEALED; 408de9c17ebSKevin Coffman 409de9c17ebSKevin Coffman *ptr++ = flags; 410de9c17ebSKevin Coffman *ptr++ = 0xff; 411de9c17ebSKevin Coffman be16ptr = (__be16 *)ptr; 412de9c17ebSKevin Coffman 413b36e9c44SJeff Layton *be16ptr++ = 0; 414de9c17ebSKevin Coffman /* "inner" token header always uses 0 for RRC */ 415b36e9c44SJeff Layton *be16ptr++ = 0; 416de9c17ebSKevin Coffman 417de9c17ebSKevin Coffman be64ptr = (__be64 *)be16ptr; 418c3be6577SPaul Burton *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64)); 419de9c17ebSKevin Coffman 420*ae6ad5d0SChuck Lever err = (*kctx->gk5e->encrypt)(kctx, offset, buf, pages); 421de9c17ebSKevin Coffman if (err) 422de9c17ebSKevin Coffman return err; 423de9c17ebSKevin Coffman 424294ec5b8SArnd Bergmann now = ktime_get_real_seconds(); 425de9c17ebSKevin Coffman return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 426de9c17ebSKevin Coffman } 427de9c17ebSKevin Coffman 428e01b2c79SChuck Lever u32 429e01b2c79SChuck Lever gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len, 430a7e429a6SChuck Lever struct xdr_buf *buf, unsigned int *slack, 431a7e429a6SChuck Lever unsigned int *align) 432de9c17ebSKevin Coffman { 433294ec5b8SArnd Bergmann time64_t now; 434de9c17ebSKevin Coffman u8 *ptr; 435de9c17ebSKevin Coffman u8 flags = 0x00; 436de9c17ebSKevin Coffman u16 ec, rrc; 437de9c17ebSKevin Coffman int err; 438de9c17ebSKevin Coffman u32 headskip, tailskip; 439de9c17ebSKevin Coffman u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; 440de9c17ebSKevin Coffman unsigned int movelen; 441de9c17ebSKevin Coffman 442de9c17ebSKevin Coffman 443de9c17ebSKevin Coffman dprintk("RPC: %s\n", __func__); 444de9c17ebSKevin Coffman 445de9c17ebSKevin Coffman ptr = buf->head[0].iov_base + offset; 446de9c17ebSKevin Coffman 447de9c17ebSKevin Coffman if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) 448de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 449de9c17ebSKevin Coffman 450de9c17ebSKevin Coffman flags = ptr[2]; 451de9c17ebSKevin Coffman if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || 452de9c17ebSKevin Coffman (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) 453de9c17ebSKevin Coffman return GSS_S_BAD_SIG; 454de9c17ebSKevin Coffman 455de9c17ebSKevin Coffman if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { 456de9c17ebSKevin Coffman dprintk("%s: token missing expected sealed flag\n", __func__); 457de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 458de9c17ebSKevin Coffman } 459de9c17ebSKevin Coffman 460de9c17ebSKevin Coffman if (ptr[3] != 0xff) 461de9c17ebSKevin Coffman return GSS_S_DEFECTIVE_TOKEN; 462de9c17ebSKevin Coffman 463de9c17ebSKevin Coffman ec = be16_to_cpup((__be16 *)(ptr + 4)); 464de9c17ebSKevin Coffman rrc = be16_to_cpup((__be16 *)(ptr + 6)); 465de9c17ebSKevin Coffman 4665d6baef9SJ. Bruce Fields /* 4675d6baef9SJ. Bruce Fields * NOTE: the sequence number at ptr + 8 is skipped, rpcsec_gss 4685d6baef9SJ. Bruce Fields * doesn't want it checked; see page 6 of rfc 2203. 4695d6baef9SJ. Bruce Fields */ 470de9c17ebSKevin Coffman 471c52226daSJ. Bruce Fields if (rrc != 0) 472c52226daSJ. Bruce Fields rotate_left(offset + 16, buf, rrc); 473de9c17ebSKevin Coffman 474*ae6ad5d0SChuck Lever err = (*kctx->gk5e->decrypt)(kctx, offset, len, buf, 475de9c17ebSKevin Coffman &headskip, &tailskip); 476de9c17ebSKevin Coffman if (err) 477de9c17ebSKevin Coffman return GSS_S_FAILURE; 478de9c17ebSKevin Coffman 479de9c17ebSKevin Coffman /* 480de9c17ebSKevin Coffman * Retrieve the decrypted gss token header and verify 481de9c17ebSKevin Coffman * it against the original 482de9c17ebSKevin Coffman */ 483de9c17ebSKevin Coffman err = read_bytes_from_xdr_buf(buf, 48431c9590aSChuck Lever len - GSS_KRB5_TOK_HDR_LEN - tailskip, 485de9c17ebSKevin Coffman decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); 486de9c17ebSKevin Coffman if (err) { 487de9c17ebSKevin Coffman dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); 488de9c17ebSKevin Coffman return GSS_S_FAILURE; 489de9c17ebSKevin Coffman } 490de9c17ebSKevin Coffman if (memcmp(ptr, decrypted_hdr, 6) 491de9c17ebSKevin Coffman || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { 492de9c17ebSKevin Coffman dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); 493de9c17ebSKevin Coffman return GSS_S_FAILURE; 494de9c17ebSKevin Coffman } 495de9c17ebSKevin Coffman 496de9c17ebSKevin Coffman /* do sequencing checks */ 497de9c17ebSKevin Coffman 498de9c17ebSKevin Coffman /* it got through unscathed. Make sure the context is unexpired */ 499294ec5b8SArnd Bergmann now = ktime_get_real_seconds(); 500de9c17ebSKevin Coffman if (now > kctx->endtime) 501de9c17ebSKevin Coffman return GSS_S_CONTEXT_EXPIRED; 502de9c17ebSKevin Coffman 503de9c17ebSKevin Coffman /* 504de9c17ebSKevin Coffman * Move the head data back to the right position in xdr_buf. 505de9c17ebSKevin Coffman * We ignore any "ec" data since it might be in the head or 506de9c17ebSKevin Coffman * the tail, and we really don't need to deal with it. 507de9c17ebSKevin Coffman * Note that buf->head[0].iov_len may indicate the available 508de9c17ebSKevin Coffman * head buffer space rather than that actually occupied. 509de9c17ebSKevin Coffman */ 51031c9590aSChuck Lever movelen = min_t(unsigned int, buf->head[0].iov_len, len); 511de9c17ebSKevin Coffman movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; 5120a8e7b7dSChuck Lever BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > 5130a8e7b7dSChuck Lever buf->head[0].iov_len); 514de9c17ebSKevin Coffman memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); 515de9c17ebSKevin Coffman buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; 516986a4b63SChuck Lever buf->len = len - (GSS_KRB5_TOK_HDR_LEN + headskip); 517de9c17ebSKevin Coffman 518cf4c024bSJeff Layton /* Trim off the trailing "extra count" and checksum blob */ 5190a8e7b7dSChuck Lever xdr_buf_trim(buf, ec + GSS_KRB5_TOK_HDR_LEN + tailskip); 520241b1f41SChuck Lever 521a7e429a6SChuck Lever *align = XDR_QUADLEN(GSS_KRB5_TOK_HDR_LEN + headskip); 522a7e429a6SChuck Lever *slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip); 523de9c17ebSKevin Coffman return GSS_S_COMPLETE; 524de9c17ebSKevin Coffman } 525