1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Neil Brown <neilb@cse.unsw.edu.au>
4  * J. Bruce Fields <bfields@umich.edu>
5  * Andy Adamson <andros@umich.edu>
6  * Dug Song <dugsong@monkey.org>
7  *
8  * RPCSEC_GSS server authentication.
9  * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
10  * (gssapi)
11  *
12  * The RPCSEC_GSS involves three stages:
13  *  1/ context creation
14  *  2/ data exchange
15  *  3/ context destruction
16  *
17  * Context creation is handled largely by upcalls to user-space.
18  *  In particular, GSS_Accept_sec_context is handled by an upcall
19  * Data exchange is handled entirely within the kernel
20  *  In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
21  * Context destruction is handled in-kernel
22  *  GSS_Delete_sec_context is in-kernel
23  *
24  * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
25  * The context handle and gss_token are used as a key into the rpcsec_init cache.
26  * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
27  * being major_status, minor_status, context_handle, reply_token.
28  * These are sent back to the client.
29  * Sequence window management is handled by the kernel.  The window size if currently
30  * a compile time constant.
31  *
32  * When user-space is happy that a context is established, it places an entry
33  * in the rpcsec_context cache. The key for this cache is the context_handle.
34  * The content includes:
35  *   uid/gidlist - for determining access rights
36  *   mechanism type
37  *   mechanism specific information, such as a key
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <linux/module.h>
44 #include <linux/pagemap.h>
45 #include <linux/user_namespace.h>
46 
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/gss_err.h>
49 #include <linux/sunrpc/svcauth.h>
50 #include <linux/sunrpc/svcauth_gss.h>
51 #include <linux/sunrpc/cache.h>
52 #include <linux/sunrpc/gss_krb5.h>
53 
54 #include <trace/events/rpcgss.h>
55 
56 #include "gss_rpc_upcall.h"
57 
58 /*
59  * Unfortunately there isn't a maximum checksum size exported via the
60  * GSS API. Manufacture one based on GSS mechanisms supported by this
61  * implementation.
62  */
63 #define GSS_MAX_CKSUMSIZE (GSS_KRB5_TOK_HDR_LEN + GSS_KRB5_MAX_CKSUM_LEN)
64 
65 /*
66  * This value may be increased in the future to accommodate other
67  * usage of the scratch buffer.
68  */
69 #define GSS_SCRATCH_SIZE GSS_MAX_CKSUMSIZE
70 
71 struct gss_svc_data {
72 	/* decoded gss client cred: */
73 	struct rpc_gss_wire_cred	clcred;
74 	/* save a pointer to the beginning of the encoded verifier,
75 	 * for use in encryption/checksumming in svcauth_gss_release: */
76 	__be32				*verf_start;
77 	struct rsc			*rsci;
78 
79 	/* for temporary results */
80 	__be32				gsd_seq_num;
81 	u8				gsd_scratch[GSS_SCRATCH_SIZE];
82 };
83 
84 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
85  * into replies.
86  *
87  * Key is context handle (\x if empty) and gss_token.
88  * Content is major_status minor_status (integers) context_handle, reply_token.
89  *
90  */
91 
92 static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
93 {
94 	return a->len == b->len && 0 == memcmp(a->data, b->data, a->len);
95 }
96 
97 #define	RSI_HASHBITS	6
98 #define	RSI_HASHMAX	(1<<RSI_HASHBITS)
99 
100 struct rsi {
101 	struct cache_head	h;
102 	struct xdr_netobj	in_handle, in_token;
103 	struct xdr_netobj	out_handle, out_token;
104 	int			major_status, minor_status;
105 	struct rcu_head		rcu_head;
106 };
107 
108 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
109 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item);
110 
111 static void rsi_free(struct rsi *rsii)
112 {
113 	kfree(rsii->in_handle.data);
114 	kfree(rsii->in_token.data);
115 	kfree(rsii->out_handle.data);
116 	kfree(rsii->out_token.data);
117 }
118 
119 static void rsi_free_rcu(struct rcu_head *head)
120 {
121 	struct rsi *rsii = container_of(head, struct rsi, rcu_head);
122 
123 	rsi_free(rsii);
124 	kfree(rsii);
125 }
126 
127 static void rsi_put(struct kref *ref)
128 {
129 	struct rsi *rsii = container_of(ref, struct rsi, h.ref);
130 
131 	call_rcu(&rsii->rcu_head, rsi_free_rcu);
132 }
133 
134 static inline int rsi_hash(struct rsi *item)
135 {
136 	return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
137 	     ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
138 }
139 
140 static int rsi_match(struct cache_head *a, struct cache_head *b)
141 {
142 	struct rsi *item = container_of(a, struct rsi, h);
143 	struct rsi *tmp = container_of(b, struct rsi, h);
144 	return netobj_equal(&item->in_handle, &tmp->in_handle) &&
145 	       netobj_equal(&item->in_token, &tmp->in_token);
146 }
147 
148 static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
149 {
150 	dst->len = len;
151 	dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL);
152 	if (len && !dst->data)
153 		return -ENOMEM;
154 	return 0;
155 }
156 
157 static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
158 {
159 	return dup_to_netobj(dst, src->data, src->len);
160 }
161 
162 static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
163 {
164 	struct rsi *new = container_of(cnew, struct rsi, h);
165 	struct rsi *item = container_of(citem, struct rsi, h);
166 
167 	new->out_handle.data = NULL;
168 	new->out_handle.len = 0;
169 	new->out_token.data = NULL;
170 	new->out_token.len = 0;
171 	new->in_handle.len = item->in_handle.len;
172 	item->in_handle.len = 0;
173 	new->in_token.len = item->in_token.len;
174 	item->in_token.len = 0;
175 	new->in_handle.data = item->in_handle.data;
176 	item->in_handle.data = NULL;
177 	new->in_token.data = item->in_token.data;
178 	item->in_token.data = NULL;
179 }
180 
181 static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
182 {
183 	struct rsi *new = container_of(cnew, struct rsi, h);
184 	struct rsi *item = container_of(citem, struct rsi, h);
185 
186 	BUG_ON(new->out_handle.data || new->out_token.data);
187 	new->out_handle.len = item->out_handle.len;
188 	item->out_handle.len = 0;
189 	new->out_token.len = item->out_token.len;
190 	item->out_token.len = 0;
191 	new->out_handle.data = item->out_handle.data;
192 	item->out_handle.data = NULL;
193 	new->out_token.data = item->out_token.data;
194 	item->out_token.data = NULL;
195 
196 	new->major_status = item->major_status;
197 	new->minor_status = item->minor_status;
198 }
199 
200 static struct cache_head *rsi_alloc(void)
201 {
202 	struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
203 	if (rsii)
204 		return &rsii->h;
205 	else
206 		return NULL;
207 }
208 
209 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
210 {
211 	return sunrpc_cache_pipe_upcall_timeout(cd, h);
212 }
213 
214 static void rsi_request(struct cache_detail *cd,
215 		       struct cache_head *h,
216 		       char **bpp, int *blen)
217 {
218 	struct rsi *rsii = container_of(h, struct rsi, h);
219 
220 	qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
221 	qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
222 	(*bpp)[-1] = '\n';
223 	WARN_ONCE(*blen < 0,
224 		  "RPCSEC/GSS credential too large - please use gssproxy\n");
225 }
226 
227 static int rsi_parse(struct cache_detail *cd,
228 		    char *mesg, int mlen)
229 {
230 	/* context token expiry major minor context token */
231 	char *buf = mesg;
232 	char *ep;
233 	int len;
234 	struct rsi rsii, *rsip = NULL;
235 	time64_t expiry;
236 	int status = -EINVAL;
237 
238 	memset(&rsii, 0, sizeof(rsii));
239 	/* handle */
240 	len = qword_get(&mesg, buf, mlen);
241 	if (len < 0)
242 		goto out;
243 	status = -ENOMEM;
244 	if (dup_to_netobj(&rsii.in_handle, buf, len))
245 		goto out;
246 
247 	/* token */
248 	len = qword_get(&mesg, buf, mlen);
249 	status = -EINVAL;
250 	if (len < 0)
251 		goto out;
252 	status = -ENOMEM;
253 	if (dup_to_netobj(&rsii.in_token, buf, len))
254 		goto out;
255 
256 	rsip = rsi_lookup(cd, &rsii);
257 	if (!rsip)
258 		goto out;
259 
260 	rsii.h.flags = 0;
261 	/* expiry */
262 	expiry = get_expiry(&mesg);
263 	status = -EINVAL;
264 	if (expiry == 0)
265 		goto out;
266 
267 	/* major/minor */
268 	len = qword_get(&mesg, buf, mlen);
269 	if (len <= 0)
270 		goto out;
271 	rsii.major_status = simple_strtoul(buf, &ep, 10);
272 	if (*ep)
273 		goto out;
274 	len = qword_get(&mesg, buf, mlen);
275 	if (len <= 0)
276 		goto out;
277 	rsii.minor_status = simple_strtoul(buf, &ep, 10);
278 	if (*ep)
279 		goto out;
280 
281 	/* out_handle */
282 	len = qword_get(&mesg, buf, mlen);
283 	if (len < 0)
284 		goto out;
285 	status = -ENOMEM;
286 	if (dup_to_netobj(&rsii.out_handle, buf, len))
287 		goto out;
288 
289 	/* out_token */
290 	len = qword_get(&mesg, buf, mlen);
291 	status = -EINVAL;
292 	if (len < 0)
293 		goto out;
294 	status = -ENOMEM;
295 	if (dup_to_netobj(&rsii.out_token, buf, len))
296 		goto out;
297 	rsii.h.expiry_time = expiry;
298 	rsip = rsi_update(cd, &rsii, rsip);
299 	status = 0;
300 out:
301 	rsi_free(&rsii);
302 	if (rsip)
303 		cache_put(&rsip->h, cd);
304 	else
305 		status = -ENOMEM;
306 	return status;
307 }
308 
309 static const struct cache_detail rsi_cache_template = {
310 	.owner		= THIS_MODULE,
311 	.hash_size	= RSI_HASHMAX,
312 	.name           = "auth.rpcsec.init",
313 	.cache_put      = rsi_put,
314 	.cache_upcall	= rsi_upcall,
315 	.cache_request  = rsi_request,
316 	.cache_parse    = rsi_parse,
317 	.match		= rsi_match,
318 	.init		= rsi_init,
319 	.update		= update_rsi,
320 	.alloc		= rsi_alloc,
321 };
322 
323 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
324 {
325 	struct cache_head *ch;
326 	int hash = rsi_hash(item);
327 
328 	ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
329 	if (ch)
330 		return container_of(ch, struct rsi, h);
331 	else
332 		return NULL;
333 }
334 
335 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old)
336 {
337 	struct cache_head *ch;
338 	int hash = rsi_hash(new);
339 
340 	ch = sunrpc_cache_update(cd, &new->h,
341 				 &old->h, hash);
342 	if (ch)
343 		return container_of(ch, struct rsi, h);
344 	else
345 		return NULL;
346 }
347 
348 
349 /*
350  * The rpcsec_context cache is used to store a context that is
351  * used in data exchange.
352  * The key is a context handle. The content is:
353  *  uid, gidlist, mechanism, service-set, mech-specific-data
354  */
355 
356 #define	RSC_HASHBITS	10
357 #define	RSC_HASHMAX	(1<<RSC_HASHBITS)
358 
359 #define GSS_SEQ_WIN	128
360 
361 struct gss_svc_seq_data {
362 	/* highest seq number seen so far: */
363 	u32			sd_max;
364 	/* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
365 	 * sd_win is nonzero iff sequence number i has been seen already: */
366 	unsigned long		sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
367 	spinlock_t		sd_lock;
368 };
369 
370 struct rsc {
371 	struct cache_head	h;
372 	struct xdr_netobj	handle;
373 	struct svc_cred		cred;
374 	struct gss_svc_seq_data	seqdata;
375 	struct gss_ctx		*mechctx;
376 	struct rcu_head		rcu_head;
377 };
378 
379 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
380 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item);
381 
382 static void rsc_free(struct rsc *rsci)
383 {
384 	kfree(rsci->handle.data);
385 	if (rsci->mechctx)
386 		gss_delete_sec_context(&rsci->mechctx);
387 	free_svc_cred(&rsci->cred);
388 }
389 
390 static void rsc_free_rcu(struct rcu_head *head)
391 {
392 	struct rsc *rsci = container_of(head, struct rsc, rcu_head);
393 
394 	kfree(rsci->handle.data);
395 	kfree(rsci);
396 }
397 
398 static void rsc_put(struct kref *ref)
399 {
400 	struct rsc *rsci = container_of(ref, struct rsc, h.ref);
401 
402 	if (rsci->mechctx)
403 		gss_delete_sec_context(&rsci->mechctx);
404 	free_svc_cred(&rsci->cred);
405 	call_rcu(&rsci->rcu_head, rsc_free_rcu);
406 }
407 
408 static inline int
409 rsc_hash(struct rsc *rsci)
410 {
411 	return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
412 }
413 
414 static int
415 rsc_match(struct cache_head *a, struct cache_head *b)
416 {
417 	struct rsc *new = container_of(a, struct rsc, h);
418 	struct rsc *tmp = container_of(b, struct rsc, h);
419 
420 	return netobj_equal(&new->handle, &tmp->handle);
421 }
422 
423 static void
424 rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
425 {
426 	struct rsc *new = container_of(cnew, struct rsc, h);
427 	struct rsc *tmp = container_of(ctmp, struct rsc, h);
428 
429 	new->handle.len = tmp->handle.len;
430 	tmp->handle.len = 0;
431 	new->handle.data = tmp->handle.data;
432 	tmp->handle.data = NULL;
433 	new->mechctx = NULL;
434 	init_svc_cred(&new->cred);
435 }
436 
437 static void
438 update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
439 {
440 	struct rsc *new = container_of(cnew, struct rsc, h);
441 	struct rsc *tmp = container_of(ctmp, struct rsc, h);
442 
443 	new->mechctx = tmp->mechctx;
444 	tmp->mechctx = NULL;
445 	memset(&new->seqdata, 0, sizeof(new->seqdata));
446 	spin_lock_init(&new->seqdata.sd_lock);
447 	new->cred = tmp->cred;
448 	init_svc_cred(&tmp->cred);
449 }
450 
451 static struct cache_head *
452 rsc_alloc(void)
453 {
454 	struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
455 	if (rsci)
456 		return &rsci->h;
457 	else
458 		return NULL;
459 }
460 
461 static int rsc_upcall(struct cache_detail *cd, struct cache_head *h)
462 {
463 	return -EINVAL;
464 }
465 
466 static int rsc_parse(struct cache_detail *cd,
467 		     char *mesg, int mlen)
468 {
469 	/* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
470 	char *buf = mesg;
471 	int id;
472 	int len, rv;
473 	struct rsc rsci, *rscp = NULL;
474 	time64_t expiry;
475 	int status = -EINVAL;
476 	struct gss_api_mech *gm = NULL;
477 
478 	memset(&rsci, 0, sizeof(rsci));
479 	/* context handle */
480 	len = qword_get(&mesg, buf, mlen);
481 	if (len < 0) goto out;
482 	status = -ENOMEM;
483 	if (dup_to_netobj(&rsci.handle, buf, len))
484 		goto out;
485 
486 	rsci.h.flags = 0;
487 	/* expiry */
488 	expiry = get_expiry(&mesg);
489 	status = -EINVAL;
490 	if (expiry == 0)
491 		goto out;
492 
493 	rscp = rsc_lookup(cd, &rsci);
494 	if (!rscp)
495 		goto out;
496 
497 	/* uid, or NEGATIVE */
498 	rv = get_int(&mesg, &id);
499 	if (rv == -EINVAL)
500 		goto out;
501 	if (rv == -ENOENT)
502 		set_bit(CACHE_NEGATIVE, &rsci.h.flags);
503 	else {
504 		int N, i;
505 
506 		/*
507 		 * NOTE: we skip uid_valid()/gid_valid() checks here:
508 		 * instead, * -1 id's are later mapped to the
509 		 * (export-specific) anonymous id by nfsd_setuser.
510 		 *
511 		 * (But supplementary gid's get no such special
512 		 * treatment so are checked for validity here.)
513 		 */
514 		/* uid */
515 		rsci.cred.cr_uid = make_kuid(current_user_ns(), id);
516 
517 		/* gid */
518 		if (get_int(&mesg, &id))
519 			goto out;
520 		rsci.cred.cr_gid = make_kgid(current_user_ns(), id);
521 
522 		/* number of additional gid's */
523 		if (get_int(&mesg, &N))
524 			goto out;
525 		if (N < 0 || N > NGROUPS_MAX)
526 			goto out;
527 		status = -ENOMEM;
528 		rsci.cred.cr_group_info = groups_alloc(N);
529 		if (rsci.cred.cr_group_info == NULL)
530 			goto out;
531 
532 		/* gid's */
533 		status = -EINVAL;
534 		for (i=0; i<N; i++) {
535 			kgid_t kgid;
536 			if (get_int(&mesg, &id))
537 				goto out;
538 			kgid = make_kgid(current_user_ns(), id);
539 			if (!gid_valid(kgid))
540 				goto out;
541 			rsci.cred.cr_group_info->gid[i] = kgid;
542 		}
543 		groups_sort(rsci.cred.cr_group_info);
544 
545 		/* mech name */
546 		len = qword_get(&mesg, buf, mlen);
547 		if (len < 0)
548 			goto out;
549 		gm = rsci.cred.cr_gss_mech = gss_mech_get_by_name(buf);
550 		status = -EOPNOTSUPP;
551 		if (!gm)
552 			goto out;
553 
554 		status = -EINVAL;
555 		/* mech-specific data: */
556 		len = qword_get(&mesg, buf, mlen);
557 		if (len < 0)
558 			goto out;
559 		status = gss_import_sec_context(buf, len, gm, &rsci.mechctx,
560 						NULL, GFP_KERNEL);
561 		if (status)
562 			goto out;
563 
564 		/* get client name */
565 		len = qword_get(&mesg, buf, mlen);
566 		if (len > 0) {
567 			rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
568 			if (!rsci.cred.cr_principal) {
569 				status = -ENOMEM;
570 				goto out;
571 			}
572 		}
573 
574 	}
575 	rsci.h.expiry_time = expiry;
576 	rscp = rsc_update(cd, &rsci, rscp);
577 	status = 0;
578 out:
579 	rsc_free(&rsci);
580 	if (rscp)
581 		cache_put(&rscp->h, cd);
582 	else
583 		status = -ENOMEM;
584 	return status;
585 }
586 
587 static const struct cache_detail rsc_cache_template = {
588 	.owner		= THIS_MODULE,
589 	.hash_size	= RSC_HASHMAX,
590 	.name		= "auth.rpcsec.context",
591 	.cache_put	= rsc_put,
592 	.cache_upcall	= rsc_upcall,
593 	.cache_parse	= rsc_parse,
594 	.match		= rsc_match,
595 	.init		= rsc_init,
596 	.update		= update_rsc,
597 	.alloc		= rsc_alloc,
598 };
599 
600 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
601 {
602 	struct cache_head *ch;
603 	int hash = rsc_hash(item);
604 
605 	ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
606 	if (ch)
607 		return container_of(ch, struct rsc, h);
608 	else
609 		return NULL;
610 }
611 
612 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old)
613 {
614 	struct cache_head *ch;
615 	int hash = rsc_hash(new);
616 
617 	ch = sunrpc_cache_update(cd, &new->h,
618 				 &old->h, hash);
619 	if (ch)
620 		return container_of(ch, struct rsc, h);
621 	else
622 		return NULL;
623 }
624 
625 
626 static struct rsc *
627 gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
628 {
629 	struct rsc rsci;
630 	struct rsc *found;
631 
632 	memset(&rsci, 0, sizeof(rsci));
633 	if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
634 		return NULL;
635 	found = rsc_lookup(cd, &rsci);
636 	rsc_free(&rsci);
637 	if (!found)
638 		return NULL;
639 	if (cache_check(cd, &found->h, NULL))
640 		return NULL;
641 	return found;
642 }
643 
644 /**
645  * gss_check_seq_num - GSS sequence number window check
646  * @rqstp: RPC Call to use when reporting errors
647  * @rsci: cached GSS context state (updated on return)
648  * @seq_num: sequence number to check
649  *
650  * Implements sequence number algorithm as specified in
651  * RFC 2203, Section 5.3.3.1. "Context Management".
652  *
653  * Return values:
654  *   %true: @rqstp's GSS sequence number is inside the window
655  *   %false: @rqstp's GSS sequence number is outside the window
656  */
657 static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
658 			      u32 seq_num)
659 {
660 	struct gss_svc_seq_data *sd = &rsci->seqdata;
661 	bool result = false;
662 
663 	spin_lock(&sd->sd_lock);
664 	if (seq_num > sd->sd_max) {
665 		if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
666 			memset(sd->sd_win, 0, sizeof(sd->sd_win));
667 			sd->sd_max = seq_num;
668 		} else while (sd->sd_max < seq_num) {
669 			sd->sd_max++;
670 			__clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win);
671 		}
672 		__set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
673 		goto ok;
674 	} else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
675 		goto toolow;
676 	}
677 	if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
678 		goto alreadyseen;
679 
680 ok:
681 	result = true;
682 out:
683 	spin_unlock(&sd->sd_lock);
684 	return result;
685 
686 toolow:
687 	trace_rpcgss_svc_seqno_low(rqstp, seq_num,
688 				   sd->sd_max - GSS_SEQ_WIN,
689 				   sd->sd_max);
690 	goto out;
691 alreadyseen:
692 	trace_rpcgss_svc_seqno_seen(rqstp, seq_num);
693 	goto out;
694 }
695 
696 static inline u32 round_up_to_quad(u32 i)
697 {
698 	return (i + 3 ) & ~3;
699 }
700 
701 static inline int
702 svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o)
703 {
704 	u8 *p;
705 
706 	if (resv->iov_len + 4 > PAGE_SIZE)
707 		return -1;
708 	svc_putnl(resv, o->len);
709 	p = resv->iov_base + resv->iov_len;
710 	resv->iov_len += round_up_to_quad(o->len);
711 	if (resv->iov_len > PAGE_SIZE)
712 		return -1;
713 	memcpy(p, o->data, o->len);
714 	memset(p + o->len, 0, round_up_to_quad(o->len) - o->len);
715 	return 0;
716 }
717 
718 /*
719  * Decode and verify a Call's verifier field. For RPC_AUTH_GSS Calls,
720  * the body of this field contains a variable length checksum.
721  *
722  * GSS-specific auth_stat values are mandated by RFC 2203 Section
723  * 5.3.3.3.
724  */
725 static int
726 svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
727 			  __be32 *rpcstart, struct rpc_gss_wire_cred *gc)
728 {
729 	struct xdr_stream	*xdr = &rqstp->rq_arg_stream;
730 	struct gss_ctx		*ctx_id = rsci->mechctx;
731 	u32			flavor, maj_stat;
732 	struct xdr_buf		rpchdr;
733 	struct xdr_netobj	checksum;
734 	struct kvec		iov;
735 
736 	/*
737 	 * Compute the checksum of the incoming Call from the
738 	 * XID field to credential field:
739 	 */
740 	iov.iov_base = rpcstart;
741 	iov.iov_len = (u8 *)xdr->p - (u8 *)rpcstart;
742 	xdr_buf_from_iov(&iov, &rpchdr);
743 
744 	/* Call's verf field: */
745 	if (xdr_stream_decode_opaque_auth(xdr, &flavor,
746 					  (void **)&checksum.data,
747 					  &checksum.len) < 0) {
748 		rqstp->rq_auth_stat = rpc_autherr_badverf;
749 		return SVC_DENIED;
750 	}
751 	if (flavor != RPC_AUTH_GSS) {
752 		rqstp->rq_auth_stat = rpc_autherr_badverf;
753 		return SVC_DENIED;
754 	}
755 
756 	if (rqstp->rq_deferred)
757 		return SVC_OK;
758 	maj_stat = gss_verify_mic(ctx_id, &rpchdr, &checksum);
759 	if (maj_stat != GSS_S_COMPLETE) {
760 		trace_rpcgss_svc_mic(rqstp, maj_stat);
761 		rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
762 		return SVC_DENIED;
763 	}
764 
765 	if (gc->gc_seq > MAXSEQ) {
766 		trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq);
767 		rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
768 		return SVC_DENIED;
769 	}
770 	if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq))
771 		return SVC_DROP;
772 	return SVC_OK;
773 }
774 
775 static int
776 gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
777 {
778 	__be32			*xdr_seq;
779 	u32			maj_stat;
780 	struct xdr_buf		verf_data;
781 	struct xdr_netobj	mic;
782 	__be32			*p;
783 	struct kvec		iov;
784 	int err = -1;
785 
786 	svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
787 	xdr_seq = kmalloc(4, GFP_KERNEL);
788 	if (!xdr_seq)
789 		return -ENOMEM;
790 	*xdr_seq = htonl(seq);
791 
792 	iov.iov_base = xdr_seq;
793 	iov.iov_len = 4;
794 	xdr_buf_from_iov(&iov, &verf_data);
795 	p = rqstp->rq_res.head->iov_base + rqstp->rq_res.head->iov_len;
796 	mic.data = (u8 *)(p + 1);
797 	maj_stat = gss_get_mic(ctx_id, &verf_data, &mic);
798 	if (maj_stat != GSS_S_COMPLETE)
799 		goto out;
800 	*p++ = htonl(mic.len);
801 	memset((u8 *)p + mic.len, 0, round_up_to_quad(mic.len) - mic.len);
802 	p += XDR_QUADLEN(mic.len);
803 	if (!xdr_ressize_check(rqstp, p))
804 		goto out;
805 	err = 0;
806 out:
807 	kfree(xdr_seq);
808 	return err;
809 }
810 
811 /*
812  * Construct and encode a Reply's verifier field. The verifier's body
813  * field contains a variable-length checksum of the GSS sequence
814  * number.
815  */
816 static bool
817 svcauth_gss_encode_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
818 {
819 	struct gss_svc_data	*gsd = rqstp->rq_auth_data;
820 	u32			maj_stat;
821 	struct xdr_buf		verf_data;
822 	struct xdr_netobj	checksum;
823 	struct kvec		iov;
824 
825 	gsd->gsd_seq_num = cpu_to_be32(seq);
826 	iov.iov_base = &gsd->gsd_seq_num;
827 	iov.iov_len = XDR_UNIT;
828 	xdr_buf_from_iov(&iov, &verf_data);
829 
830 	checksum.data = gsd->gsd_scratch;
831 	maj_stat = gss_get_mic(ctx_id, &verf_data, &checksum);
832 	if (maj_stat != GSS_S_COMPLETE)
833 		goto bad_mic;
834 
835 	return xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, RPC_AUTH_GSS,
836 					     checksum.data, checksum.len) > 0;
837 
838 bad_mic:
839 	trace_rpcgss_svc_get_mic(rqstp, maj_stat);
840 	return false;
841 }
842 
843 struct gss_domain {
844 	struct auth_domain	h;
845 	u32			pseudoflavor;
846 };
847 
848 static struct auth_domain *
849 find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
850 {
851 	char *name;
852 
853 	name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
854 	if (!name)
855 		return NULL;
856 	return auth_domain_find(name);
857 }
858 
859 static struct auth_ops svcauthops_gss;
860 
861 u32 svcauth_gss_flavor(struct auth_domain *dom)
862 {
863 	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
864 
865 	return gd->pseudoflavor;
866 }
867 
868 EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
869 
870 struct auth_domain *
871 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
872 {
873 	struct gss_domain	*new;
874 	struct auth_domain	*test;
875 	int			stat = -ENOMEM;
876 
877 	new = kmalloc(sizeof(*new), GFP_KERNEL);
878 	if (!new)
879 		goto out;
880 	kref_init(&new->h.ref);
881 	new->h.name = kstrdup(name, GFP_KERNEL);
882 	if (!new->h.name)
883 		goto out_free_dom;
884 	new->h.flavour = &svcauthops_gss;
885 	new->pseudoflavor = pseudoflavor;
886 
887 	test = auth_domain_lookup(name, &new->h);
888 	if (test != &new->h) {
889 		pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
890 			name);
891 		stat = -EADDRINUSE;
892 		auth_domain_put(test);
893 		goto out_free_name;
894 	}
895 	return test;
896 
897 out_free_name:
898 	kfree(new->h.name);
899 out_free_dom:
900 	kfree(new);
901 out:
902 	return ERR_PTR(stat);
903 }
904 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
905 
906 /*
907  * RFC 2203, Section 5.3.2.2
908  *
909  *	struct rpc_gss_integ_data {
910  *		opaque databody_integ<>;
911  *		opaque checksum<>;
912  *	};
913  *
914  *	struct rpc_gss_data_t {
915  *		unsigned int seq_num;
916  *		proc_req_arg_t arg;
917  *	};
918  */
919 static noinline_for_stack int
920 svcauth_gss_unwrap_integ(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
921 {
922 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
923 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
924 	u32 len, offset, seq_num, maj_stat;
925 	struct xdr_buf *buf = xdr->buf;
926 	struct xdr_buf databody_integ;
927 	struct xdr_netobj checksum;
928 
929 	/* NFS READ normally uses splice to send data in-place. However
930 	 * the data in cache can change after the reply's MIC is computed
931 	 * but before the RPC reply is sent. To prevent the client from
932 	 * rejecting the server-computed MIC in this somewhat rare case,
933 	 * do not use splice with the GSS integrity service.
934 	 */
935 	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
936 
937 	/* Did we already verify the signature on the original pass through? */
938 	if (rqstp->rq_deferred)
939 		return 0;
940 
941 	if (xdr_stream_decode_u32(xdr, &len) < 0)
942 		goto unwrap_failed;
943 	if (len & 3)
944 		goto unwrap_failed;
945 	offset = xdr_stream_pos(xdr);
946 	if (xdr_buf_subsegment(buf, &databody_integ, offset, len))
947 		goto unwrap_failed;
948 
949 	/*
950 	 * The xdr_stream now points to the @seq_num field. The next
951 	 * XDR data item is the @arg field, which contains the clear
952 	 * text RPC program payload. The checksum, which follows the
953 	 * @arg field, is located and decoded without updating the
954 	 * xdr_stream.
955 	 */
956 
957 	offset += len;
958 	if (xdr_decode_word(buf, offset, &checksum.len))
959 		goto unwrap_failed;
960 	if (checksum.len > sizeof(gsd->gsd_scratch))
961 		goto unwrap_failed;
962 	checksum.data = gsd->gsd_scratch;
963 	if (read_bytes_from_xdr_buf(buf, offset + XDR_UNIT, checksum.data,
964 				    checksum.len))
965 		goto unwrap_failed;
966 
967 	maj_stat = gss_verify_mic(ctx, &databody_integ, &checksum);
968 	if (maj_stat != GSS_S_COMPLETE)
969 		goto bad_mic;
970 
971 	/* The received seqno is protected by the checksum. */
972 	if (xdr_stream_decode_u32(xdr, &seq_num) < 0)
973 		goto unwrap_failed;
974 	if (seq_num != seq)
975 		goto bad_seqno;
976 
977 	xdr_truncate_decode(xdr, XDR_UNIT + checksum.len);
978 	return 0;
979 
980 unwrap_failed:
981 	trace_rpcgss_svc_unwrap_failed(rqstp);
982 	return -EINVAL;
983 bad_seqno:
984 	trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num);
985 	return -EINVAL;
986 bad_mic:
987 	trace_rpcgss_svc_mic(rqstp, maj_stat);
988 	return -EINVAL;
989 }
990 
991 /*
992  * RFC 2203, Section 5.3.2.3
993  *
994  *	struct rpc_gss_priv_data {
995  *		opaque databody_priv<>
996  *	};
997  *
998  *	struct rpc_gss_data_t {
999  *		unsigned int seq_num;
1000  *		proc_req_arg_t arg;
1001  *	};
1002  */
1003 static noinline_for_stack int
1004 svcauth_gss_unwrap_priv(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
1005 {
1006 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1007 	u32 len, maj_stat, seq_num, offset;
1008 	struct xdr_buf *buf = xdr->buf;
1009 	unsigned int saved_len;
1010 
1011 	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1012 
1013 	if (xdr_stream_decode_u32(xdr, &len) < 0)
1014 		goto unwrap_failed;
1015 	if (rqstp->rq_deferred) {
1016 		/* Already decrypted last time through! The sequence number
1017 		 * check at out_seq is unnecessary but harmless: */
1018 		goto out_seq;
1019 	}
1020 	if (len > xdr_stream_remaining(xdr))
1021 		goto unwrap_failed;
1022 	offset = xdr_stream_pos(xdr);
1023 
1024 	saved_len = buf->len;
1025 	maj_stat = gss_unwrap(ctx, offset, offset + len, buf);
1026 	if (maj_stat != GSS_S_COMPLETE)
1027 		goto bad_unwrap;
1028 	xdr->nwords -= XDR_QUADLEN(saved_len - buf->len);
1029 
1030 out_seq:
1031 	/* gss_unwrap() decrypted the sequence number. */
1032 	if (xdr_stream_decode_u32(xdr, &seq_num) < 0)
1033 		goto unwrap_failed;
1034 	if (seq_num != seq)
1035 		goto bad_seqno;
1036 	return 0;
1037 
1038 unwrap_failed:
1039 	trace_rpcgss_svc_unwrap_failed(rqstp);
1040 	return -EINVAL;
1041 bad_seqno:
1042 	trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num);
1043 	return -EINVAL;
1044 bad_unwrap:
1045 	trace_rpcgss_svc_unwrap(rqstp, maj_stat);
1046 	return -EINVAL;
1047 }
1048 
1049 static int
1050 svcauth_gss_set_client(struct svc_rqst *rqstp)
1051 {
1052 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
1053 	struct rsc *rsci = svcdata->rsci;
1054 	struct rpc_gss_wire_cred *gc = &svcdata->clcred;
1055 	int stat;
1056 
1057 	rqstp->rq_auth_stat = rpc_autherr_badcred;
1058 
1059 	/*
1060 	 * A gss export can be specified either by:
1061 	 * 	export	*(sec=krb5,rw)
1062 	 * or by
1063 	 * 	export gss/krb5(rw)
1064 	 * The latter is deprecated; but for backwards compatibility reasons
1065 	 * the nfsd code will still fall back on trying it if the former
1066 	 * doesn't work; so we try to make both available to nfsd, below.
1067 	 */
1068 	rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
1069 	if (rqstp->rq_gssclient == NULL)
1070 		return SVC_DENIED;
1071 	stat = svcauth_unix_set_client(rqstp);
1072 	if (stat == SVC_DROP || stat == SVC_CLOSE)
1073 		return stat;
1074 
1075 	rqstp->rq_auth_stat = rpc_auth_ok;
1076 	return SVC_OK;
1077 }
1078 
1079 static bool
1080 svcauth_gss_proc_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
1081 			   struct xdr_netobj *out_handle, int *major_status,
1082 			   u32 seq_num)
1083 {
1084 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
1085 	struct rsc *rsci;
1086 	bool rc;
1087 
1088 	if (*major_status != GSS_S_COMPLETE)
1089 		goto null_verifier;
1090 	rsci = gss_svc_searchbyctx(cd, out_handle);
1091 	if (rsci == NULL) {
1092 		*major_status = GSS_S_NO_CONTEXT;
1093 		goto null_verifier;
1094 	}
1095 
1096 	rc = svcauth_gss_encode_verf(rqstp, rsci->mechctx, seq_num);
1097 	cache_put(&rsci->h, cd);
1098 	return rc;
1099 
1100 null_verifier:
1101 	return xdr_stream_encode_opaque_auth(xdr, RPC_AUTH_NULL, NULL, 0) > 0;
1102 }
1103 
1104 static void gss_free_in_token_pages(struct gssp_in_token *in_token)
1105 {
1106 	u32 inlen;
1107 	int i;
1108 
1109 	i = 0;
1110 	inlen = in_token->page_len;
1111 	while (inlen) {
1112 		if (in_token->pages[i])
1113 			put_page(in_token->pages[i]);
1114 		inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
1115 	}
1116 
1117 	kfree(in_token->pages);
1118 	in_token->pages = NULL;
1119 }
1120 
1121 static int gss_read_proxy_verf(struct svc_rqst *rqstp,
1122 			       struct rpc_gss_wire_cred *gc,
1123 			       struct xdr_netobj *in_handle,
1124 			       struct gssp_in_token *in_token)
1125 {
1126 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1127 	unsigned int length, pgto_offs, pgfrom_offs;
1128 	int pages, i, pgto, pgfrom;
1129 	size_t to_offs, from_offs;
1130 	u32 inlen;
1131 
1132 	if (dup_netobj(in_handle, &gc->gc_ctx))
1133 		return SVC_CLOSE;
1134 
1135 	/*
1136 	 *  RFC 2203 Section 5.2.2
1137 	 *
1138 	 *	struct rpc_gss_init_arg {
1139 	 *		opaque gss_token<>;
1140 	 *	};
1141 	 */
1142 	if (xdr_stream_decode_u32(xdr, &inlen) < 0)
1143 		goto out_denied_free;
1144 	if (inlen > xdr_stream_remaining(xdr))
1145 		goto out_denied_free;
1146 
1147 	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
1148 	in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
1149 	if (!in_token->pages)
1150 		goto out_denied_free;
1151 	in_token->page_base = 0;
1152 	in_token->page_len = inlen;
1153 	for (i = 0; i < pages; i++) {
1154 		in_token->pages[i] = alloc_page(GFP_KERNEL);
1155 		if (!in_token->pages[i]) {
1156 			gss_free_in_token_pages(in_token);
1157 			goto out_denied_free;
1158 		}
1159 	}
1160 
1161 	length = min_t(unsigned int, inlen, (char *)xdr->end - (char *)xdr->p);
1162 	memcpy(page_address(in_token->pages[0]), xdr->p, length);
1163 	inlen -= length;
1164 
1165 	to_offs = length;
1166 	from_offs = rqstp->rq_arg.page_base;
1167 	while (inlen) {
1168 		pgto = to_offs >> PAGE_SHIFT;
1169 		pgfrom = from_offs >> PAGE_SHIFT;
1170 		pgto_offs = to_offs & ~PAGE_MASK;
1171 		pgfrom_offs = from_offs & ~PAGE_MASK;
1172 
1173 		length = min_t(unsigned int, inlen,
1174 			 min_t(unsigned int, PAGE_SIZE - pgto_offs,
1175 			       PAGE_SIZE - pgfrom_offs));
1176 		memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
1177 		       page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
1178 		       length);
1179 
1180 		to_offs += length;
1181 		from_offs += length;
1182 		inlen -= length;
1183 	}
1184 	return 0;
1185 
1186 out_denied_free:
1187 	kfree(in_handle->data);
1188 	return SVC_DENIED;
1189 }
1190 
1191 /*
1192  * RFC 2203, Section 5.2.3.1.
1193  *
1194  *	struct rpc_gss_init_res {
1195  *		opaque handle<>;
1196  *		unsigned int gss_major;
1197  *		unsigned int gss_minor;
1198  *		unsigned int seq_window;
1199  *		opaque gss_token<>;
1200  *	};
1201  */
1202 static bool
1203 svcxdr_encode_gss_init_res(struct xdr_stream *xdr,
1204 			   struct xdr_netobj *handle,
1205 			   struct xdr_netobj *gss_token,
1206 			   unsigned int major_status,
1207 			   unsigned int minor_status, u32 seq_num)
1208 {
1209 	if (xdr_stream_encode_opaque(xdr, handle->data, handle->len) < 0)
1210 		return false;
1211 	if (xdr_stream_encode_u32(xdr, major_status) < 0)
1212 		return false;
1213 	if (xdr_stream_encode_u32(xdr, minor_status) < 0)
1214 		return false;
1215 	if (xdr_stream_encode_u32(xdr, seq_num) < 0)
1216 		return false;
1217 	if (xdr_stream_encode_opaque(xdr, gss_token->data, gss_token->len) < 0)
1218 		return false;
1219 	return true;
1220 }
1221 
1222 /*
1223  * Having read the cred already and found we're in the context
1224  * initiation case, read the verifier and initiate (or check the results
1225  * of) upcalls to userspace for help with context initiation.  If
1226  * the upcall results are available, write the verifier and result.
1227  * Otherwise, drop the request pending an answer to the upcall.
1228  */
1229 static int
1230 svcauth_gss_legacy_init(struct svc_rqst *rqstp,
1231 			struct rpc_gss_wire_cred *gc)
1232 {
1233 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1234 	struct rsi *rsip, rsikey;
1235 	__be32 *p;
1236 	u32 len;
1237 	int ret;
1238 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1239 
1240 	memset(&rsikey, 0, sizeof(rsikey));
1241 	if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
1242 		return SVC_CLOSE;
1243 
1244 	/*
1245 	 *  RFC 2203 Section 5.2.2
1246 	 *
1247 	 *	struct rpc_gss_init_arg {
1248 	 *		opaque gss_token<>;
1249 	 *	};
1250 	 */
1251 	if (xdr_stream_decode_u32(xdr, &len) < 0) {
1252 		kfree(rsikey.in_handle.data);
1253 		return SVC_DENIED;
1254 	}
1255 	p = xdr_inline_decode(xdr, len);
1256 	if (!p) {
1257 		kfree(rsikey.in_handle.data);
1258 		return SVC_DENIED;
1259 	}
1260 	rsikey.in_token.data = kmalloc(len, GFP_KERNEL);
1261 	if (ZERO_OR_NULL_PTR(rsikey.in_token.data)) {
1262 		kfree(rsikey.in_handle.data);
1263 		return SVC_CLOSE;
1264 	}
1265 	memcpy(rsikey.in_token.data, p, len);
1266 	rsikey.in_token.len = len;
1267 
1268 	/* Perform upcall, or find upcall result: */
1269 	rsip = rsi_lookup(sn->rsi_cache, &rsikey);
1270 	rsi_free(&rsikey);
1271 	if (!rsip)
1272 		return SVC_CLOSE;
1273 	if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
1274 		/* No upcall result: */
1275 		return SVC_CLOSE;
1276 
1277 	ret = SVC_CLOSE;
1278 	if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &rsip->out_handle,
1279 					&rsip->major_status, GSS_SEQ_WIN))
1280 		goto out;
1281 	if (xdr_stream_encode_u32(&rqstp->rq_res_stream, RPC_SUCCESS) < 0)
1282 		goto out;
1283 	if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &rsip->out_handle,
1284 					&rsip->out_token, rsip->major_status,
1285 					rsip->minor_status, GSS_SEQ_WIN))
1286 		goto out;
1287 
1288 	ret = SVC_COMPLETE;
1289 out:
1290 	cache_put(&rsip->h, sn->rsi_cache);
1291 	return ret;
1292 }
1293 
1294 static int gss_proxy_save_rsc(struct cache_detail *cd,
1295 				struct gssp_upcall_data *ud,
1296 				uint64_t *handle)
1297 {
1298 	struct rsc rsci, *rscp = NULL;
1299 	static atomic64_t ctxhctr;
1300 	long long ctxh;
1301 	struct gss_api_mech *gm = NULL;
1302 	time64_t expiry;
1303 	int status;
1304 
1305 	memset(&rsci, 0, sizeof(rsci));
1306 	/* context handle */
1307 	status = -ENOMEM;
1308 	/* the handle needs to be just a unique id,
1309 	 * use a static counter */
1310 	ctxh = atomic64_inc_return(&ctxhctr);
1311 
1312 	/* make a copy for the caller */
1313 	*handle = ctxh;
1314 
1315 	/* make a copy for the rsc cache */
1316 	if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t)))
1317 		goto out;
1318 	rscp = rsc_lookup(cd, &rsci);
1319 	if (!rscp)
1320 		goto out;
1321 
1322 	/* creds */
1323 	if (!ud->found_creds) {
1324 		/* userspace seem buggy, we should always get at least a
1325 		 * mapping to nobody */
1326 		goto out;
1327 	} else {
1328 		struct timespec64 boot;
1329 
1330 		/* steal creds */
1331 		rsci.cred = ud->creds;
1332 		memset(&ud->creds, 0, sizeof(struct svc_cred));
1333 
1334 		status = -EOPNOTSUPP;
1335 		/* get mech handle from OID */
1336 		gm = gss_mech_get_by_OID(&ud->mech_oid);
1337 		if (!gm)
1338 			goto out;
1339 		rsci.cred.cr_gss_mech = gm;
1340 
1341 		status = -EINVAL;
1342 		/* mech-specific data: */
1343 		status = gss_import_sec_context(ud->out_handle.data,
1344 						ud->out_handle.len,
1345 						gm, &rsci.mechctx,
1346 						&expiry, GFP_KERNEL);
1347 		if (status)
1348 			goto out;
1349 
1350 		getboottime64(&boot);
1351 		expiry -= boot.tv_sec;
1352 	}
1353 
1354 	rsci.h.expiry_time = expiry;
1355 	rscp = rsc_update(cd, &rsci, rscp);
1356 	status = 0;
1357 out:
1358 	rsc_free(&rsci);
1359 	if (rscp)
1360 		cache_put(&rscp->h, cd);
1361 	else
1362 		status = -ENOMEM;
1363 	return status;
1364 }
1365 
1366 static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
1367 				  struct rpc_gss_wire_cred *gc)
1368 {
1369 	struct xdr_netobj cli_handle;
1370 	struct gssp_upcall_data ud;
1371 	uint64_t handle;
1372 	int status;
1373 	int ret;
1374 	struct net *net = SVC_NET(rqstp);
1375 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1376 
1377 	memset(&ud, 0, sizeof(ud));
1378 	ret = gss_read_proxy_verf(rqstp, gc, &ud.in_handle, &ud.in_token);
1379 	if (ret)
1380 		return ret;
1381 
1382 	ret = SVC_CLOSE;
1383 
1384 	/* Perform synchronous upcall to gss-proxy */
1385 	status = gssp_accept_sec_context_upcall(net, &ud);
1386 	if (status)
1387 		goto out;
1388 
1389 	trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status);
1390 
1391 	switch (ud.major_status) {
1392 	case GSS_S_CONTINUE_NEEDED:
1393 		cli_handle = ud.out_handle;
1394 		break;
1395 	case GSS_S_COMPLETE:
1396 		status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
1397 		if (status)
1398 			goto out;
1399 		cli_handle.data = (u8 *)&handle;
1400 		cli_handle.len = sizeof(handle);
1401 		break;
1402 	default:
1403 		goto out;
1404 	}
1405 
1406 	if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &cli_handle,
1407 					&ud.major_status, GSS_SEQ_WIN))
1408 		goto out;
1409 	if (xdr_stream_encode_u32(&rqstp->rq_res_stream, RPC_SUCCESS) < 0)
1410 		goto out;
1411 	if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &cli_handle,
1412 					&ud.out_token, ud.major_status,
1413 					ud.minor_status, GSS_SEQ_WIN))
1414 		goto out;
1415 
1416 	ret = SVC_COMPLETE;
1417 out:
1418 	gss_free_in_token_pages(&ud.in_token);
1419 	gssp_free_upcall_data(&ud);
1420 	return ret;
1421 }
1422 
1423 /*
1424  * Try to set the sn->use_gss_proxy variable to a new value. We only allow
1425  * it to be changed if it's currently undefined (-1). If it's any other value
1426  * then return -EBUSY unless the type wouldn't have changed anyway.
1427  */
1428 static int set_gss_proxy(struct net *net, int type)
1429 {
1430 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1431 	int ret;
1432 
1433 	WARN_ON_ONCE(type != 0 && type != 1);
1434 	ret = cmpxchg(&sn->use_gss_proxy, -1, type);
1435 	if (ret != -1 && ret != type)
1436 		return -EBUSY;
1437 	return 0;
1438 }
1439 
1440 static bool use_gss_proxy(struct net *net)
1441 {
1442 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1443 
1444 	/* If use_gss_proxy is still undefined, then try to disable it */
1445 	if (sn->use_gss_proxy == -1)
1446 		set_gss_proxy(net, 0);
1447 	return sn->use_gss_proxy;
1448 }
1449 
1450 static noinline_for_stack int
1451 svcauth_gss_proc_init(struct svc_rqst *rqstp, struct rpc_gss_wire_cred *gc)
1452 {
1453 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1454 	u32 flavor, len;
1455 	void *body;
1456 
1457 	svcxdr_init_encode(rqstp);
1458 
1459 	/* Call's verf field: */
1460 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
1461 		return SVC_GARBAGE;
1462 	if (flavor != RPC_AUTH_NULL || len != 0) {
1463 		rqstp->rq_auth_stat = rpc_autherr_badverf;
1464 		return SVC_DENIED;
1465 	}
1466 
1467 	if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) {
1468 		rqstp->rq_auth_stat = rpc_autherr_badcred;
1469 		return SVC_DENIED;
1470 	}
1471 
1472 	if (!use_gss_proxy(SVC_NET(rqstp)))
1473 		return svcauth_gss_legacy_init(rqstp, gc);
1474 	return svcauth_gss_proxy_init(rqstp, gc);
1475 }
1476 
1477 #ifdef CONFIG_PROC_FS
1478 
1479 static ssize_t write_gssp(struct file *file, const char __user *buf,
1480 			 size_t count, loff_t *ppos)
1481 {
1482 	struct net *net = pde_data(file_inode(file));
1483 	char tbuf[20];
1484 	unsigned long i;
1485 	int res;
1486 
1487 	if (*ppos || count > sizeof(tbuf)-1)
1488 		return -EINVAL;
1489 	if (copy_from_user(tbuf, buf, count))
1490 		return -EFAULT;
1491 
1492 	tbuf[count] = 0;
1493 	res = kstrtoul(tbuf, 0, &i);
1494 	if (res)
1495 		return res;
1496 	if (i != 1)
1497 		return -EINVAL;
1498 	res = set_gssp_clnt(net);
1499 	if (res)
1500 		return res;
1501 	res = set_gss_proxy(net, 1);
1502 	if (res)
1503 		return res;
1504 	return count;
1505 }
1506 
1507 static ssize_t read_gssp(struct file *file, char __user *buf,
1508 			 size_t count, loff_t *ppos)
1509 {
1510 	struct net *net = pde_data(file_inode(file));
1511 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1512 	unsigned long p = *ppos;
1513 	char tbuf[10];
1514 	size_t len;
1515 
1516 	snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
1517 	len = strlen(tbuf);
1518 	if (p >= len)
1519 		return 0;
1520 	len -= p;
1521 	if (len > count)
1522 		len = count;
1523 	if (copy_to_user(buf, (void *)(tbuf+p), len))
1524 		return -EFAULT;
1525 	*ppos += len;
1526 	return len;
1527 }
1528 
1529 static const struct proc_ops use_gss_proxy_proc_ops = {
1530 	.proc_open	= nonseekable_open,
1531 	.proc_write	= write_gssp,
1532 	.proc_read	= read_gssp,
1533 };
1534 
1535 static int create_use_gss_proxy_proc_entry(struct net *net)
1536 {
1537 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1538 	struct proc_dir_entry **p = &sn->use_gssp_proc;
1539 
1540 	sn->use_gss_proxy = -1;
1541 	*p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
1542 			      sn->proc_net_rpc,
1543 			      &use_gss_proxy_proc_ops, net);
1544 	if (!*p)
1545 		return -ENOMEM;
1546 	init_gssp_clnt(sn);
1547 	return 0;
1548 }
1549 
1550 static void destroy_use_gss_proxy_proc_entry(struct net *net)
1551 {
1552 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1553 
1554 	if (sn->use_gssp_proc) {
1555 		remove_proc_entry("use-gss-proxy", sn->proc_net_rpc);
1556 		clear_gssp_clnt(sn);
1557 	}
1558 }
1559 #else /* CONFIG_PROC_FS */
1560 
1561 static int create_use_gss_proxy_proc_entry(struct net *net)
1562 {
1563 	return 0;
1564 }
1565 
1566 static void destroy_use_gss_proxy_proc_entry(struct net *net) {}
1567 
1568 #endif /* CONFIG_PROC_FS */
1569 
1570 /*
1571  * The Call's credential body should contain a struct rpc_gss_cred_t.
1572  *
1573  * RFC 2203 Section 5
1574  *
1575  *	struct rpc_gss_cred_t {
1576  *		union switch (unsigned int version) {
1577  *		case RPCSEC_GSS_VERS_1:
1578  *			struct {
1579  *				rpc_gss_proc_t gss_proc;
1580  *				unsigned int seq_num;
1581  *				rpc_gss_service_t service;
1582  *				opaque handle<>;
1583  *			} rpc_gss_cred_vers_1_t;
1584  *		}
1585  *	};
1586  */
1587 static bool
1588 svcauth_gss_decode_credbody(struct xdr_stream *xdr,
1589 			    struct rpc_gss_wire_cred *gc,
1590 			    __be32 **rpcstart)
1591 {
1592 	ssize_t handle_len;
1593 	u32 body_len;
1594 	__be32 *p;
1595 
1596 	p = xdr_inline_decode(xdr, XDR_UNIT);
1597 	if (!p)
1598 		return false;
1599 	/*
1600 	 * start of rpc packet is 7 u32's back from here:
1601 	 * xid direction rpcversion prog vers proc flavour
1602 	 */
1603 	*rpcstart = p - 7;
1604 	body_len = be32_to_cpup(p);
1605 	if (body_len > RPC_MAX_AUTH_SIZE)
1606 		return false;
1607 
1608 	/* struct rpc_gss_cred_t */
1609 	if (xdr_stream_decode_u32(xdr, &gc->gc_v) < 0)
1610 		return false;
1611 	if (xdr_stream_decode_u32(xdr, &gc->gc_proc) < 0)
1612 		return false;
1613 	if (xdr_stream_decode_u32(xdr, &gc->gc_seq) < 0)
1614 		return false;
1615 	if (xdr_stream_decode_u32(xdr, &gc->gc_svc) < 0)
1616 		return false;
1617 	handle_len = xdr_stream_decode_opaque_inline(xdr,
1618 						     (void **)&gc->gc_ctx.data,
1619 						     body_len);
1620 	if (handle_len < 0)
1621 		return false;
1622 	if (body_len != XDR_UNIT * 5 + xdr_align_size(handle_len))
1623 		return false;
1624 
1625 	gc->gc_ctx.len = handle_len;
1626 	return true;
1627 }
1628 
1629 /**
1630  * svcauth_gss_accept - Decode and validate incoming RPC_AUTH_GSS credential
1631  * @rqstp: RPC transaction
1632  *
1633  * Return values:
1634  *   %SVC_OK: Success
1635  *   %SVC_COMPLETE: GSS context lifetime event
1636  *   %SVC_DENIED: Credential or verifier is not valid
1637  *   %SVC_GARBAGE: Failed to decode credential or verifier
1638  *   %SVC_CLOSE: Temporary failure
1639  *
1640  * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531).
1641  */
1642 static int
1643 svcauth_gss_accept(struct svc_rqst *rqstp)
1644 {
1645 	struct kvec	*resv = &rqstp->rq_res.head[0];
1646 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
1647 	__be32		*rpcstart;
1648 	struct rpc_gss_wire_cred *gc;
1649 	struct rsc	*rsci = NULL;
1650 	__be32		*reject_stat = resv->iov_base + resv->iov_len;
1651 	int		ret;
1652 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1653 
1654 	rqstp->rq_auth_stat = rpc_autherr_badcred;
1655 	if (!svcdata)
1656 		svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
1657 	if (!svcdata)
1658 		goto auth_err;
1659 	rqstp->rq_auth_data = svcdata;
1660 	svcdata->verf_start = NULL;
1661 	svcdata->rsci = NULL;
1662 	gc = &svcdata->clcred;
1663 
1664 	if (!svcauth_gss_decode_credbody(&rqstp->rq_arg_stream, gc, &rpcstart))
1665 		goto auth_err;
1666 	if (gc->gc_v != RPC_GSS_VERSION)
1667 		goto auth_err;
1668 
1669 	switch (gc->gc_proc) {
1670 	case RPC_GSS_PROC_INIT:
1671 	case RPC_GSS_PROC_CONTINUE_INIT:
1672 		if (rqstp->rq_proc != 0)
1673 			goto auth_err;
1674 		return svcauth_gss_proc_init(rqstp, gc);
1675 	case RPC_GSS_PROC_DESTROY:
1676 		if (rqstp->rq_proc != 0)
1677 			goto auth_err;
1678 		fallthrough;
1679 	case RPC_GSS_PROC_DATA:
1680 		rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
1681 		rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
1682 		if (!rsci)
1683 			goto auth_err;
1684 		switch (svcauth_gss_verify_header(rqstp, rsci, rpcstart, gc)) {
1685 		case SVC_OK:
1686 			break;
1687 		case SVC_DENIED:
1688 			goto auth_err;
1689 		case SVC_DROP:
1690 			goto drop;
1691 		}
1692 		break;
1693 	default:
1694 		if (rqstp->rq_proc != 0)
1695 			goto auth_err;
1696 		rqstp->rq_auth_stat = rpc_autherr_rejectedcred;
1697 		goto auth_err;
1698 	}
1699 
1700 	/* now act upon the command: */
1701 	switch (gc->gc_proc) {
1702 	case RPC_GSS_PROC_DESTROY:
1703 		if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1704 			goto auth_err;
1705 		/* Delete the entry from the cache_list and call cache_put */
1706 		sunrpc_cache_unhash(sn->rsc_cache, &rsci->h);
1707 		if (resv->iov_len + 4 > PAGE_SIZE)
1708 			goto drop;
1709 		svc_putnl(resv, RPC_SUCCESS);
1710 		goto complete;
1711 	case RPC_GSS_PROC_DATA:
1712 		rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
1713 		svcdata->verf_start = resv->iov_base + resv->iov_len;
1714 		if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
1715 			goto auth_err;
1716 		rqstp->rq_cred = rsci->cred;
1717 		get_group_info(rsci->cred.cr_group_info);
1718 		rqstp->rq_auth_stat = rpc_autherr_badcred;
1719 		switch (gc->gc_svc) {
1720 		case RPC_GSS_SVC_NONE:
1721 			svcxdr_init_encode(rqstp);
1722 			break;
1723 		case RPC_GSS_SVC_INTEGRITY:
1724 			svcxdr_init_encode(rqstp);
1725 			/* placeholders for body length and seq. number: */
1726 			xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2);
1727 			if (svcauth_gss_unwrap_integ(rqstp, gc->gc_seq,
1728 						     rsci->mechctx))
1729 				goto garbage_args;
1730 			svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE);
1731 			break;
1732 		case RPC_GSS_SVC_PRIVACY:
1733 			svcxdr_init_encode(rqstp);
1734 			/* placeholders for body length and seq. number: */
1735 			xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2);
1736 			if (svcauth_gss_unwrap_priv(rqstp, gc->gc_seq,
1737 						    rsci->mechctx))
1738 				goto garbage_args;
1739 			svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE * 2);
1740 			break;
1741 		default:
1742 			goto auth_err;
1743 		}
1744 		svcdata->rsci = rsci;
1745 		cache_get(&rsci->h);
1746 		rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
1747 					rsci->mechctx->mech_type,
1748 					GSS_C_QOP_DEFAULT,
1749 					gc->gc_svc);
1750 		ret = SVC_OK;
1751 		trace_rpcgss_svc_authenticate(rqstp, gc);
1752 		goto out;
1753 	}
1754 garbage_args:
1755 	ret = SVC_GARBAGE;
1756 	goto out;
1757 auth_err:
1758 	/* Restore write pointer to its original value: */
1759 	xdr_ressize_check(rqstp, reject_stat);
1760 	ret = SVC_DENIED;
1761 	goto out;
1762 complete:
1763 	ret = SVC_COMPLETE;
1764 	goto out;
1765 drop:
1766 	ret = SVC_CLOSE;
1767 out:
1768 	if (rsci)
1769 		cache_put(&rsci->h, sn->rsc_cache);
1770 	return ret;
1771 }
1772 
1773 static __be32 *
1774 svcauth_gss_prepare_to_wrap(struct svc_rqst *rqstp, struct gss_svc_data *gsd)
1775 {
1776 	struct xdr_buf *resbuf = &rqstp->rq_res;
1777 	__be32 *p;
1778 	u32 verf_len;
1779 
1780 	p = gsd->verf_start;
1781 	gsd->verf_start = NULL;
1782 
1783 	/* AUTH_ERROR replies are not wrapped. */
1784 	if (rqstp->rq_auth_stat != rpc_auth_ok)
1785 		return NULL;
1786 
1787 	/* Skip the verifier: */
1788 	p += 1;
1789 	verf_len = ntohl(*p++);
1790 	p += XDR_QUADLEN(verf_len);
1791 	/* move accept_stat to right place: */
1792 	memcpy(p, p + 2, 4);
1793 	/* Also don't wrap if the accept stat is nonzero: */
1794 	if (*p != rpc_success) {
1795 		resbuf->head[0].iov_len -= 2 * 4;
1796 		return NULL;
1797 	}
1798 	p++;
1799 	return p;
1800 }
1801 
1802 /*
1803  * RFC 2203, Section 5.3.2.2
1804  *
1805  *	struct rpc_gss_integ_data {
1806  *		opaque databody_integ<>;
1807  *		opaque checksum<>;
1808  *	};
1809  *
1810  *	struct rpc_gss_data_t {
1811  *		unsigned int seq_num;
1812  *		proc_req_arg_t arg;
1813  *	};
1814  *
1815  * The RPC Reply message has already been XDR-encoded. rq_res_stream
1816  * is now positioned so that the checksum can be written just past
1817  * the RPC Reply message.
1818  */
1819 static int svcauth_gss_wrap_integ(struct svc_rqst *rqstp)
1820 {
1821 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1822 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
1823 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
1824 	struct xdr_buf *buf = xdr->buf;
1825 	struct xdr_buf databody_integ;
1826 	struct xdr_netobj checksum;
1827 	u32 offset, len, maj_stat;
1828 	__be32 *p;
1829 
1830 	p = svcauth_gss_prepare_to_wrap(rqstp, gsd);
1831 	if (p == NULL)
1832 		goto out;
1833 
1834 	offset = (u8 *)(p + 1) - (u8 *)buf->head[0].iov_base;
1835 	len = buf->len - offset;
1836 	if (xdr_buf_subsegment(buf, &databody_integ, offset, len))
1837 		goto wrap_failed;
1838 	/* Buffer space for these has already been reserved in
1839 	 * svcauth_gss_accept(). */
1840 	*p++ = cpu_to_be32(len);
1841 	*p = cpu_to_be32(gc->gc_seq);
1842 
1843 	checksum.data = gsd->gsd_scratch;
1844 	maj_stat = gss_get_mic(gsd->rsci->mechctx, &databody_integ, &checksum);
1845 	if (maj_stat != GSS_S_COMPLETE)
1846 		goto bad_mic;
1847 
1848 	if (xdr_stream_encode_opaque(xdr, checksum.data, checksum.len) < 0)
1849 		goto wrap_failed;
1850 	xdr_commit_encode(xdr);
1851 
1852 out:
1853 	return 0;
1854 
1855 bad_mic:
1856 	trace_rpcgss_svc_get_mic(rqstp, maj_stat);
1857 	return -EINVAL;
1858 wrap_failed:
1859 	trace_rpcgss_svc_wrap_failed(rqstp);
1860 	return -EINVAL;
1861 }
1862 
1863 /*
1864  * RFC 2203, Section 5.3.2.3
1865  *
1866  *	struct rpc_gss_priv_data {
1867  *		opaque databody_priv<>
1868  *	};
1869  *
1870  *	struct rpc_gss_data_t {
1871  *		unsigned int seq_num;
1872  *		proc_req_arg_t arg;
1873  *	};
1874  *
1875  * gss_wrap() expands the size of the RPC message payload in the
1876  * response buffer. The main purpose of svcauth_gss_wrap_priv()
1877  * is to ensure there is adequate space in the response buffer to
1878  * avoid overflow during the wrap.
1879  */
1880 static int svcauth_gss_wrap_priv(struct svc_rqst *rqstp)
1881 {
1882 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1883 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
1884 	struct xdr_buf *buf = &rqstp->rq_res;
1885 	struct kvec *head = buf->head;
1886 	struct kvec *tail = buf->tail;
1887 	u32 offset, pad, maj_stat;
1888 	__be32 *p, *lenp;
1889 
1890 	p = svcauth_gss_prepare_to_wrap(rqstp, gsd);
1891 	if (p == NULL)
1892 		return 0;
1893 
1894 	lenp = p++;
1895 	offset = (u8 *)p - (u8 *)head->iov_base;
1896 	/* Buffer space for this field has already been reserved
1897 	 * in svcauth_gss_accept(). */
1898 	*p = cpu_to_be32(gc->gc_seq);
1899 
1900 	/*
1901 	 * If there is currently tail data, make sure there is
1902 	 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
1903 	 * the page, and move the current tail data such that
1904 	 * there is RPC_MAX_AUTH_SIZE slack space available in
1905 	 * both the head and tail.
1906 	 */
1907 	if (tail->iov_base) {
1908 		if (tail->iov_base >= head->iov_base + PAGE_SIZE)
1909 			goto wrap_failed;
1910 		if (tail->iov_base < head->iov_base)
1911 			goto wrap_failed;
1912 		if (tail->iov_len + head->iov_len
1913 				+ 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1914 			goto wrap_failed;
1915 		memmove(tail->iov_base + RPC_MAX_AUTH_SIZE, tail->iov_base,
1916 			tail->iov_len);
1917 		tail->iov_base += RPC_MAX_AUTH_SIZE;
1918 	}
1919 	/*
1920 	 * If there is no current tail data, make sure there is
1921 	 * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
1922 	 * allotted page, and set up tail information such that there
1923 	 * is RPC_MAX_AUTH_SIZE slack space available in both the
1924 	 * head and tail.
1925 	 */
1926 	if (!tail->iov_base) {
1927 		if (head->iov_len + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1928 			goto wrap_failed;
1929 		tail->iov_base = head->iov_base
1930 			+ head->iov_len + RPC_MAX_AUTH_SIZE;
1931 		tail->iov_len = 0;
1932 	}
1933 
1934 	maj_stat = gss_wrap(gsd->rsci->mechctx, offset, buf, buf->pages);
1935 	if (maj_stat != GSS_S_COMPLETE)
1936 		goto bad_wrap;
1937 
1938 	*lenp = cpu_to_be32(buf->len - offset);
1939 	pad = xdr_pad_size(buf->len - offset);
1940 	p = (__be32 *)(tail->iov_base + tail->iov_len);
1941 	memset(p, 0, pad);
1942 	tail->iov_len += pad;
1943 	buf->len += pad;
1944 
1945 	return 0;
1946 wrap_failed:
1947 	trace_rpcgss_svc_wrap_failed(rqstp);
1948 	return -EINVAL;
1949 bad_wrap:
1950 	trace_rpcgss_svc_wrap(rqstp, maj_stat);
1951 	return -ENOMEM;
1952 }
1953 
1954 /**
1955  * svcauth_gss_release - Wrap payload and release resources
1956  * @rqstp: RPC transaction context
1957  *
1958  * Return values:
1959  *    %0: the Reply is ready to be sent
1960  *    %-ENOMEM: failed to allocate memory
1961  *    %-EINVAL: encoding error
1962  *
1963  * XXX: These return values do not match the return values documented
1964  *      for the auth_ops ->release method in linux/sunrpc/svcauth.h.
1965  */
1966 static int
1967 svcauth_gss_release(struct svc_rqst *rqstp)
1968 {
1969 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1970 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1971 	struct rpc_gss_wire_cred *gc;
1972 	int stat;
1973 
1974 	if (!gsd)
1975 		goto out;
1976 	gc = &gsd->clcred;
1977 	if (gc->gc_proc != RPC_GSS_PROC_DATA)
1978 		goto out;
1979 	/* Release can be called twice, but we only wrap once. */
1980 	if (gsd->verf_start == NULL)
1981 		goto out;
1982 
1983 	switch (gc->gc_svc) {
1984 	case RPC_GSS_SVC_NONE:
1985 		break;
1986 	case RPC_GSS_SVC_INTEGRITY:
1987 		stat = svcauth_gss_wrap_integ(rqstp);
1988 		if (stat)
1989 			goto out_err;
1990 		break;
1991 	case RPC_GSS_SVC_PRIVACY:
1992 		stat = svcauth_gss_wrap_priv(rqstp);
1993 		if (stat)
1994 			goto out_err;
1995 		break;
1996 	/*
1997 	 * For any other gc_svc value, svcauth_gss_accept() already set
1998 	 * the auth_error appropriately; just fall through:
1999 	 */
2000 	}
2001 
2002 out:
2003 	stat = 0;
2004 out_err:
2005 	if (rqstp->rq_client)
2006 		auth_domain_put(rqstp->rq_client);
2007 	rqstp->rq_client = NULL;
2008 	if (rqstp->rq_gssclient)
2009 		auth_domain_put(rqstp->rq_gssclient);
2010 	rqstp->rq_gssclient = NULL;
2011 	if (rqstp->rq_cred.cr_group_info)
2012 		put_group_info(rqstp->rq_cred.cr_group_info);
2013 	rqstp->rq_cred.cr_group_info = NULL;
2014 	if (gsd && gsd->rsci) {
2015 		cache_put(&gsd->rsci->h, sn->rsc_cache);
2016 		gsd->rsci = NULL;
2017 	}
2018 	return stat;
2019 }
2020 
2021 static void
2022 svcauth_gss_domain_release_rcu(struct rcu_head *head)
2023 {
2024 	struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
2025 	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
2026 
2027 	kfree(dom->name);
2028 	kfree(gd);
2029 }
2030 
2031 static void
2032 svcauth_gss_domain_release(struct auth_domain *dom)
2033 {
2034 	call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
2035 }
2036 
2037 static struct auth_ops svcauthops_gss = {
2038 	.name		= "rpcsec_gss",
2039 	.owner		= THIS_MODULE,
2040 	.flavour	= RPC_AUTH_GSS,
2041 	.accept		= svcauth_gss_accept,
2042 	.release	= svcauth_gss_release,
2043 	.domain_release = svcauth_gss_domain_release,
2044 	.set_client	= svcauth_gss_set_client,
2045 };
2046 
2047 static int rsi_cache_create_net(struct net *net)
2048 {
2049 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2050 	struct cache_detail *cd;
2051 	int err;
2052 
2053 	cd = cache_create_net(&rsi_cache_template, net);
2054 	if (IS_ERR(cd))
2055 		return PTR_ERR(cd);
2056 	err = cache_register_net(cd, net);
2057 	if (err) {
2058 		cache_destroy_net(cd, net);
2059 		return err;
2060 	}
2061 	sn->rsi_cache = cd;
2062 	return 0;
2063 }
2064 
2065 static void rsi_cache_destroy_net(struct net *net)
2066 {
2067 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2068 	struct cache_detail *cd = sn->rsi_cache;
2069 
2070 	sn->rsi_cache = NULL;
2071 	cache_purge(cd);
2072 	cache_unregister_net(cd, net);
2073 	cache_destroy_net(cd, net);
2074 }
2075 
2076 static int rsc_cache_create_net(struct net *net)
2077 {
2078 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2079 	struct cache_detail *cd;
2080 	int err;
2081 
2082 	cd = cache_create_net(&rsc_cache_template, net);
2083 	if (IS_ERR(cd))
2084 		return PTR_ERR(cd);
2085 	err = cache_register_net(cd, net);
2086 	if (err) {
2087 		cache_destroy_net(cd, net);
2088 		return err;
2089 	}
2090 	sn->rsc_cache = cd;
2091 	return 0;
2092 }
2093 
2094 static void rsc_cache_destroy_net(struct net *net)
2095 {
2096 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2097 	struct cache_detail *cd = sn->rsc_cache;
2098 
2099 	sn->rsc_cache = NULL;
2100 	cache_purge(cd);
2101 	cache_unregister_net(cd, net);
2102 	cache_destroy_net(cd, net);
2103 }
2104 
2105 int
2106 gss_svc_init_net(struct net *net)
2107 {
2108 	int rv;
2109 
2110 	rv = rsc_cache_create_net(net);
2111 	if (rv)
2112 		return rv;
2113 	rv = rsi_cache_create_net(net);
2114 	if (rv)
2115 		goto out1;
2116 	rv = create_use_gss_proxy_proc_entry(net);
2117 	if (rv)
2118 		goto out2;
2119 	return 0;
2120 out2:
2121 	rsi_cache_destroy_net(net);
2122 out1:
2123 	rsc_cache_destroy_net(net);
2124 	return rv;
2125 }
2126 
2127 void
2128 gss_svc_shutdown_net(struct net *net)
2129 {
2130 	destroy_use_gss_proxy_proc_entry(net);
2131 	rsi_cache_destroy_net(net);
2132 	rsc_cache_destroy_net(net);
2133 }
2134 
2135 int
2136 gss_svc_init(void)
2137 {
2138 	return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
2139 }
2140 
2141 void
2142 gss_svc_shutdown(void)
2143 {
2144 	svc_auth_unregister(RPC_AUTH_GSS);
2145 }
2146