1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Neil Brown <neilb@cse.unsw.edu.au>
4  * J. Bruce Fields <bfields@umich.edu>
5  * Andy Adamson <andros@umich.edu>
6  * Dug Song <dugsong@monkey.org>
7  *
8  * RPCSEC_GSS server authentication.
9  * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078
10  * (gssapi)
11  *
12  * The RPCSEC_GSS involves three stages:
13  *  1/ context creation
14  *  2/ data exchange
15  *  3/ context destruction
16  *
17  * Context creation is handled largely by upcalls to user-space.
18  *  In particular, GSS_Accept_sec_context is handled by an upcall
19  * Data exchange is handled entirely within the kernel
20  *  In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel.
21  * Context destruction is handled in-kernel
22  *  GSS_Delete_sec_context is in-kernel
23  *
24  * Context creation is initiated by a RPCSEC_GSS_INIT request arriving.
25  * The context handle and gss_token are used as a key into the rpcsec_init cache.
26  * The content of this cache includes some of the outputs of GSS_Accept_sec_context,
27  * being major_status, minor_status, context_handle, reply_token.
28  * These are sent back to the client.
29  * Sequence window management is handled by the kernel.  The window size if currently
30  * a compile time constant.
31  *
32  * When user-space is happy that a context is established, it places an entry
33  * in the rpcsec_context cache. The key for this cache is the context_handle.
34  * The content includes:
35  *   uid/gidlist - for determining access rights
36  *   mechanism type
37  *   mechanism specific information, such as a key
38  *
39  */
40 
41 #include <linux/slab.h>
42 #include <linux/types.h>
43 #include <linux/module.h>
44 #include <linux/pagemap.h>
45 #include <linux/user_namespace.h>
46 
47 #include <linux/sunrpc/auth_gss.h>
48 #include <linux/sunrpc/gss_err.h>
49 #include <linux/sunrpc/svcauth.h>
50 #include <linux/sunrpc/svcauth_gss.h>
51 #include <linux/sunrpc/cache.h>
52 #include <linux/sunrpc/gss_krb5.h>
53 
54 #include <trace/events/rpcgss.h>
55 
56 #include "gss_rpc_upcall.h"
57 
58 /*
59  * Unfortunately there isn't a maximum checksum size exported via the
60  * GSS API. Manufacture one based on GSS mechanisms supported by this
61  * implementation.
62  */
63 #define GSS_MAX_CKSUMSIZE (GSS_KRB5_TOK_HDR_LEN + GSS_KRB5_MAX_CKSUM_LEN)
64 
65 /*
66  * This value may be increased in the future to accommodate other
67  * usage of the scratch buffer.
68  */
69 #define GSS_SCRATCH_SIZE GSS_MAX_CKSUMSIZE
70 
71 struct gss_svc_data {
72 	/* decoded gss client cred: */
73 	struct rpc_gss_wire_cred	clcred;
74 	u32				gsd_databody_offset;
75 	struct rsc			*rsci;
76 
77 	/* for temporary results */
78 	__be32				gsd_seq_num;
79 	u8				gsd_scratch[GSS_SCRATCH_SIZE];
80 };
81 
82 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests
83  * into replies.
84  *
85  * Key is context handle (\x if empty) and gss_token.
86  * Content is major_status minor_status (integers) context_handle, reply_token.
87  *
88  */
89 
90 static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b)
91 {
92 	return a->len == b->len && 0 == memcmp(a->data, b->data, a->len);
93 }
94 
95 #define	RSI_HASHBITS	6
96 #define	RSI_HASHMAX	(1<<RSI_HASHBITS)
97 
98 struct rsi {
99 	struct cache_head	h;
100 	struct xdr_netobj	in_handle, in_token;
101 	struct xdr_netobj	out_handle, out_token;
102 	int			major_status, minor_status;
103 	struct rcu_head		rcu_head;
104 };
105 
106 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old);
107 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item);
108 
109 static void rsi_free(struct rsi *rsii)
110 {
111 	kfree(rsii->in_handle.data);
112 	kfree(rsii->in_token.data);
113 	kfree(rsii->out_handle.data);
114 	kfree(rsii->out_token.data);
115 }
116 
117 static void rsi_free_rcu(struct rcu_head *head)
118 {
119 	struct rsi *rsii = container_of(head, struct rsi, rcu_head);
120 
121 	rsi_free(rsii);
122 	kfree(rsii);
123 }
124 
125 static void rsi_put(struct kref *ref)
126 {
127 	struct rsi *rsii = container_of(ref, struct rsi, h.ref);
128 
129 	call_rcu(&rsii->rcu_head, rsi_free_rcu);
130 }
131 
132 static inline int rsi_hash(struct rsi *item)
133 {
134 	return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS)
135 	     ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS);
136 }
137 
138 static int rsi_match(struct cache_head *a, struct cache_head *b)
139 {
140 	struct rsi *item = container_of(a, struct rsi, h);
141 	struct rsi *tmp = container_of(b, struct rsi, h);
142 	return netobj_equal(&item->in_handle, &tmp->in_handle) &&
143 	       netobj_equal(&item->in_token, &tmp->in_token);
144 }
145 
146 static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len)
147 {
148 	dst->len = len;
149 	dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL);
150 	if (len && !dst->data)
151 		return -ENOMEM;
152 	return 0;
153 }
154 
155 static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src)
156 {
157 	return dup_to_netobj(dst, src->data, src->len);
158 }
159 
160 static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
161 {
162 	struct rsi *new = container_of(cnew, struct rsi, h);
163 	struct rsi *item = container_of(citem, struct rsi, h);
164 
165 	new->out_handle.data = NULL;
166 	new->out_handle.len = 0;
167 	new->out_token.data = NULL;
168 	new->out_token.len = 0;
169 	new->in_handle.len = item->in_handle.len;
170 	item->in_handle.len = 0;
171 	new->in_token.len = item->in_token.len;
172 	item->in_token.len = 0;
173 	new->in_handle.data = item->in_handle.data;
174 	item->in_handle.data = NULL;
175 	new->in_token.data = item->in_token.data;
176 	item->in_token.data = NULL;
177 }
178 
179 static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
180 {
181 	struct rsi *new = container_of(cnew, struct rsi, h);
182 	struct rsi *item = container_of(citem, struct rsi, h);
183 
184 	BUG_ON(new->out_handle.data || new->out_token.data);
185 	new->out_handle.len = item->out_handle.len;
186 	item->out_handle.len = 0;
187 	new->out_token.len = item->out_token.len;
188 	item->out_token.len = 0;
189 	new->out_handle.data = item->out_handle.data;
190 	item->out_handle.data = NULL;
191 	new->out_token.data = item->out_token.data;
192 	item->out_token.data = NULL;
193 
194 	new->major_status = item->major_status;
195 	new->minor_status = item->minor_status;
196 }
197 
198 static struct cache_head *rsi_alloc(void)
199 {
200 	struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
201 	if (rsii)
202 		return &rsii->h;
203 	else
204 		return NULL;
205 }
206 
207 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
208 {
209 	return sunrpc_cache_pipe_upcall_timeout(cd, h);
210 }
211 
212 static void rsi_request(struct cache_detail *cd,
213 		       struct cache_head *h,
214 		       char **bpp, int *blen)
215 {
216 	struct rsi *rsii = container_of(h, struct rsi, h);
217 
218 	qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len);
219 	qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len);
220 	(*bpp)[-1] = '\n';
221 	WARN_ONCE(*blen < 0,
222 		  "RPCSEC/GSS credential too large - please use gssproxy\n");
223 }
224 
225 static int rsi_parse(struct cache_detail *cd,
226 		    char *mesg, int mlen)
227 {
228 	/* context token expiry major minor context token */
229 	char *buf = mesg;
230 	char *ep;
231 	int len;
232 	struct rsi rsii, *rsip = NULL;
233 	time64_t expiry;
234 	int status = -EINVAL;
235 
236 	memset(&rsii, 0, sizeof(rsii));
237 	/* handle */
238 	len = qword_get(&mesg, buf, mlen);
239 	if (len < 0)
240 		goto out;
241 	status = -ENOMEM;
242 	if (dup_to_netobj(&rsii.in_handle, buf, len))
243 		goto out;
244 
245 	/* token */
246 	len = qword_get(&mesg, buf, mlen);
247 	status = -EINVAL;
248 	if (len < 0)
249 		goto out;
250 	status = -ENOMEM;
251 	if (dup_to_netobj(&rsii.in_token, buf, len))
252 		goto out;
253 
254 	rsip = rsi_lookup(cd, &rsii);
255 	if (!rsip)
256 		goto out;
257 
258 	rsii.h.flags = 0;
259 	/* expiry */
260 	status = get_expiry(&mesg, &expiry);
261 	if (status)
262 		goto out;
263 
264 	status = -EINVAL;
265 	/* major/minor */
266 	len = qword_get(&mesg, buf, mlen);
267 	if (len <= 0)
268 		goto out;
269 	rsii.major_status = simple_strtoul(buf, &ep, 10);
270 	if (*ep)
271 		goto out;
272 	len = qword_get(&mesg, buf, mlen);
273 	if (len <= 0)
274 		goto out;
275 	rsii.minor_status = simple_strtoul(buf, &ep, 10);
276 	if (*ep)
277 		goto out;
278 
279 	/* out_handle */
280 	len = qword_get(&mesg, buf, mlen);
281 	if (len < 0)
282 		goto out;
283 	status = -ENOMEM;
284 	if (dup_to_netobj(&rsii.out_handle, buf, len))
285 		goto out;
286 
287 	/* out_token */
288 	len = qword_get(&mesg, buf, mlen);
289 	status = -EINVAL;
290 	if (len < 0)
291 		goto out;
292 	status = -ENOMEM;
293 	if (dup_to_netobj(&rsii.out_token, buf, len))
294 		goto out;
295 	rsii.h.expiry_time = expiry;
296 	rsip = rsi_update(cd, &rsii, rsip);
297 	status = 0;
298 out:
299 	rsi_free(&rsii);
300 	if (rsip)
301 		cache_put(&rsip->h, cd);
302 	else
303 		status = -ENOMEM;
304 	return status;
305 }
306 
307 static const struct cache_detail rsi_cache_template = {
308 	.owner		= THIS_MODULE,
309 	.hash_size	= RSI_HASHMAX,
310 	.name           = "auth.rpcsec.init",
311 	.cache_put      = rsi_put,
312 	.cache_upcall	= rsi_upcall,
313 	.cache_request  = rsi_request,
314 	.cache_parse    = rsi_parse,
315 	.match		= rsi_match,
316 	.init		= rsi_init,
317 	.update		= update_rsi,
318 	.alloc		= rsi_alloc,
319 };
320 
321 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item)
322 {
323 	struct cache_head *ch;
324 	int hash = rsi_hash(item);
325 
326 	ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
327 	if (ch)
328 		return container_of(ch, struct rsi, h);
329 	else
330 		return NULL;
331 }
332 
333 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old)
334 {
335 	struct cache_head *ch;
336 	int hash = rsi_hash(new);
337 
338 	ch = sunrpc_cache_update(cd, &new->h,
339 				 &old->h, hash);
340 	if (ch)
341 		return container_of(ch, struct rsi, h);
342 	else
343 		return NULL;
344 }
345 
346 
347 /*
348  * The rpcsec_context cache is used to store a context that is
349  * used in data exchange.
350  * The key is a context handle. The content is:
351  *  uid, gidlist, mechanism, service-set, mech-specific-data
352  */
353 
354 #define	RSC_HASHBITS	10
355 #define	RSC_HASHMAX	(1<<RSC_HASHBITS)
356 
357 #define GSS_SEQ_WIN	128
358 
359 struct gss_svc_seq_data {
360 	/* highest seq number seen so far: */
361 	u32			sd_max;
362 	/* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of
363 	 * sd_win is nonzero iff sequence number i has been seen already: */
364 	unsigned long		sd_win[GSS_SEQ_WIN/BITS_PER_LONG];
365 	spinlock_t		sd_lock;
366 };
367 
368 struct rsc {
369 	struct cache_head	h;
370 	struct xdr_netobj	handle;
371 	struct svc_cred		cred;
372 	struct gss_svc_seq_data	seqdata;
373 	struct gss_ctx		*mechctx;
374 	struct rcu_head		rcu_head;
375 };
376 
377 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
378 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item);
379 
380 static void rsc_free(struct rsc *rsci)
381 {
382 	kfree(rsci->handle.data);
383 	if (rsci->mechctx)
384 		gss_delete_sec_context(&rsci->mechctx);
385 	free_svc_cred(&rsci->cred);
386 }
387 
388 static void rsc_free_rcu(struct rcu_head *head)
389 {
390 	struct rsc *rsci = container_of(head, struct rsc, rcu_head);
391 
392 	kfree(rsci->handle.data);
393 	kfree(rsci);
394 }
395 
396 static void rsc_put(struct kref *ref)
397 {
398 	struct rsc *rsci = container_of(ref, struct rsc, h.ref);
399 
400 	if (rsci->mechctx)
401 		gss_delete_sec_context(&rsci->mechctx);
402 	free_svc_cred(&rsci->cred);
403 	call_rcu(&rsci->rcu_head, rsc_free_rcu);
404 }
405 
406 static inline int
407 rsc_hash(struct rsc *rsci)
408 {
409 	return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS);
410 }
411 
412 static int
413 rsc_match(struct cache_head *a, struct cache_head *b)
414 {
415 	struct rsc *new = container_of(a, struct rsc, h);
416 	struct rsc *tmp = container_of(b, struct rsc, h);
417 
418 	return netobj_equal(&new->handle, &tmp->handle);
419 }
420 
421 static void
422 rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
423 {
424 	struct rsc *new = container_of(cnew, struct rsc, h);
425 	struct rsc *tmp = container_of(ctmp, struct rsc, h);
426 
427 	new->handle.len = tmp->handle.len;
428 	tmp->handle.len = 0;
429 	new->handle.data = tmp->handle.data;
430 	tmp->handle.data = NULL;
431 	new->mechctx = NULL;
432 	init_svc_cred(&new->cred);
433 }
434 
435 static void
436 update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
437 {
438 	struct rsc *new = container_of(cnew, struct rsc, h);
439 	struct rsc *tmp = container_of(ctmp, struct rsc, h);
440 
441 	new->mechctx = tmp->mechctx;
442 	tmp->mechctx = NULL;
443 	memset(&new->seqdata, 0, sizeof(new->seqdata));
444 	spin_lock_init(&new->seqdata.sd_lock);
445 	new->cred = tmp->cred;
446 	init_svc_cred(&tmp->cred);
447 }
448 
449 static struct cache_head *
450 rsc_alloc(void)
451 {
452 	struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
453 	if (rsci)
454 		return &rsci->h;
455 	else
456 		return NULL;
457 }
458 
459 static int rsc_upcall(struct cache_detail *cd, struct cache_head *h)
460 {
461 	return -EINVAL;
462 }
463 
464 static int rsc_parse(struct cache_detail *cd,
465 		     char *mesg, int mlen)
466 {
467 	/* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
468 	char *buf = mesg;
469 	int id;
470 	int len, rv;
471 	struct rsc rsci, *rscp = NULL;
472 	time64_t expiry;
473 	int status = -EINVAL;
474 	struct gss_api_mech *gm = NULL;
475 
476 	memset(&rsci, 0, sizeof(rsci));
477 	/* context handle */
478 	len = qword_get(&mesg, buf, mlen);
479 	if (len < 0) goto out;
480 	status = -ENOMEM;
481 	if (dup_to_netobj(&rsci.handle, buf, len))
482 		goto out;
483 
484 	rsci.h.flags = 0;
485 	/* expiry */
486 	status = get_expiry(&mesg, &expiry);
487 	if (status)
488 		goto out;
489 
490 	status = -EINVAL;
491 	rscp = rsc_lookup(cd, &rsci);
492 	if (!rscp)
493 		goto out;
494 
495 	/* uid, or NEGATIVE */
496 	rv = get_int(&mesg, &id);
497 	if (rv == -EINVAL)
498 		goto out;
499 	if (rv == -ENOENT)
500 		set_bit(CACHE_NEGATIVE, &rsci.h.flags);
501 	else {
502 		int N, i;
503 
504 		/*
505 		 * NOTE: we skip uid_valid()/gid_valid() checks here:
506 		 * instead, * -1 id's are later mapped to the
507 		 * (export-specific) anonymous id by nfsd_setuser.
508 		 *
509 		 * (But supplementary gid's get no such special
510 		 * treatment so are checked for validity here.)
511 		 */
512 		/* uid */
513 		rsci.cred.cr_uid = make_kuid(current_user_ns(), id);
514 
515 		/* gid */
516 		if (get_int(&mesg, &id))
517 			goto out;
518 		rsci.cred.cr_gid = make_kgid(current_user_ns(), id);
519 
520 		/* number of additional gid's */
521 		if (get_int(&mesg, &N))
522 			goto out;
523 		if (N < 0 || N > NGROUPS_MAX)
524 			goto out;
525 		status = -ENOMEM;
526 		rsci.cred.cr_group_info = groups_alloc(N);
527 		if (rsci.cred.cr_group_info == NULL)
528 			goto out;
529 
530 		/* gid's */
531 		status = -EINVAL;
532 		for (i=0; i<N; i++) {
533 			kgid_t kgid;
534 			if (get_int(&mesg, &id))
535 				goto out;
536 			kgid = make_kgid(current_user_ns(), id);
537 			if (!gid_valid(kgid))
538 				goto out;
539 			rsci.cred.cr_group_info->gid[i] = kgid;
540 		}
541 		groups_sort(rsci.cred.cr_group_info);
542 
543 		/* mech name */
544 		len = qword_get(&mesg, buf, mlen);
545 		if (len < 0)
546 			goto out;
547 		gm = rsci.cred.cr_gss_mech = gss_mech_get_by_name(buf);
548 		status = -EOPNOTSUPP;
549 		if (!gm)
550 			goto out;
551 
552 		status = -EINVAL;
553 		/* mech-specific data: */
554 		len = qword_get(&mesg, buf, mlen);
555 		if (len < 0)
556 			goto out;
557 		status = gss_import_sec_context(buf, len, gm, &rsci.mechctx,
558 						NULL, GFP_KERNEL);
559 		if (status)
560 			goto out;
561 
562 		/* get client name */
563 		len = qword_get(&mesg, buf, mlen);
564 		if (len > 0) {
565 			rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
566 			if (!rsci.cred.cr_principal) {
567 				status = -ENOMEM;
568 				goto out;
569 			}
570 		}
571 
572 	}
573 	rsci.h.expiry_time = expiry;
574 	rscp = rsc_update(cd, &rsci, rscp);
575 	status = 0;
576 out:
577 	rsc_free(&rsci);
578 	if (rscp)
579 		cache_put(&rscp->h, cd);
580 	else
581 		status = -ENOMEM;
582 	return status;
583 }
584 
585 static const struct cache_detail rsc_cache_template = {
586 	.owner		= THIS_MODULE,
587 	.hash_size	= RSC_HASHMAX,
588 	.name		= "auth.rpcsec.context",
589 	.cache_put	= rsc_put,
590 	.cache_upcall	= rsc_upcall,
591 	.cache_parse	= rsc_parse,
592 	.match		= rsc_match,
593 	.init		= rsc_init,
594 	.update		= update_rsc,
595 	.alloc		= rsc_alloc,
596 };
597 
598 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item)
599 {
600 	struct cache_head *ch;
601 	int hash = rsc_hash(item);
602 
603 	ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash);
604 	if (ch)
605 		return container_of(ch, struct rsc, h);
606 	else
607 		return NULL;
608 }
609 
610 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old)
611 {
612 	struct cache_head *ch;
613 	int hash = rsc_hash(new);
614 
615 	ch = sunrpc_cache_update(cd, &new->h,
616 				 &old->h, hash);
617 	if (ch)
618 		return container_of(ch, struct rsc, h);
619 	else
620 		return NULL;
621 }
622 
623 
624 static struct rsc *
625 gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle)
626 {
627 	struct rsc rsci;
628 	struct rsc *found;
629 
630 	memset(&rsci, 0, sizeof(rsci));
631 	if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
632 		return NULL;
633 	found = rsc_lookup(cd, &rsci);
634 	rsc_free(&rsci);
635 	if (!found)
636 		return NULL;
637 	if (cache_check(cd, &found->h, NULL))
638 		return NULL;
639 	return found;
640 }
641 
642 /**
643  * gss_check_seq_num - GSS sequence number window check
644  * @rqstp: RPC Call to use when reporting errors
645  * @rsci: cached GSS context state (updated on return)
646  * @seq_num: sequence number to check
647  *
648  * Implements sequence number algorithm as specified in
649  * RFC 2203, Section 5.3.3.1. "Context Management".
650  *
651  * Return values:
652  *   %true: @rqstp's GSS sequence number is inside the window
653  *   %false: @rqstp's GSS sequence number is outside the window
654  */
655 static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci,
656 			      u32 seq_num)
657 {
658 	struct gss_svc_seq_data *sd = &rsci->seqdata;
659 	bool result = false;
660 
661 	spin_lock(&sd->sd_lock);
662 	if (seq_num > sd->sd_max) {
663 		if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
664 			memset(sd->sd_win, 0, sizeof(sd->sd_win));
665 			sd->sd_max = seq_num;
666 		} else while (sd->sd_max < seq_num) {
667 			sd->sd_max++;
668 			__clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win);
669 		}
670 		__set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win);
671 		goto ok;
672 	} else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) {
673 		goto toolow;
674 	}
675 	if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win))
676 		goto alreadyseen;
677 
678 ok:
679 	result = true;
680 out:
681 	spin_unlock(&sd->sd_lock);
682 	return result;
683 
684 toolow:
685 	trace_rpcgss_svc_seqno_low(rqstp, seq_num,
686 				   sd->sd_max - GSS_SEQ_WIN,
687 				   sd->sd_max);
688 	goto out;
689 alreadyseen:
690 	trace_rpcgss_svc_seqno_seen(rqstp, seq_num);
691 	goto out;
692 }
693 
694 /*
695  * Decode and verify a Call's verifier field. For RPC_AUTH_GSS Calls,
696  * the body of this field contains a variable length checksum.
697  *
698  * GSS-specific auth_stat values are mandated by RFC 2203 Section
699  * 5.3.3.3.
700  */
701 static int
702 svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci,
703 			  __be32 *rpcstart, struct rpc_gss_wire_cred *gc)
704 {
705 	struct xdr_stream	*xdr = &rqstp->rq_arg_stream;
706 	struct gss_ctx		*ctx_id = rsci->mechctx;
707 	u32			flavor, maj_stat;
708 	struct xdr_buf		rpchdr;
709 	struct xdr_netobj	checksum;
710 	struct kvec		iov;
711 
712 	/*
713 	 * Compute the checksum of the incoming Call from the
714 	 * XID field to credential field:
715 	 */
716 	iov.iov_base = rpcstart;
717 	iov.iov_len = (u8 *)xdr->p - (u8 *)rpcstart;
718 	xdr_buf_from_iov(&iov, &rpchdr);
719 
720 	/* Call's verf field: */
721 	if (xdr_stream_decode_opaque_auth(xdr, &flavor,
722 					  (void **)&checksum.data,
723 					  &checksum.len) < 0) {
724 		rqstp->rq_auth_stat = rpc_autherr_badverf;
725 		return SVC_DENIED;
726 	}
727 	if (flavor != RPC_AUTH_GSS) {
728 		rqstp->rq_auth_stat = rpc_autherr_badverf;
729 		return SVC_DENIED;
730 	}
731 
732 	if (rqstp->rq_deferred)
733 		return SVC_OK;
734 	maj_stat = gss_verify_mic(ctx_id, &rpchdr, &checksum);
735 	if (maj_stat != GSS_S_COMPLETE) {
736 		trace_rpcgss_svc_mic(rqstp, maj_stat);
737 		rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
738 		return SVC_DENIED;
739 	}
740 
741 	if (gc->gc_seq > MAXSEQ) {
742 		trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq);
743 		rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
744 		return SVC_DENIED;
745 	}
746 	if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq))
747 		return SVC_DROP;
748 	return SVC_OK;
749 }
750 
751 /*
752  * Construct and encode a Reply's verifier field. The verifier's body
753  * field contains a variable-length checksum of the GSS sequence
754  * number.
755  */
756 static bool
757 svcauth_gss_encode_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
758 {
759 	struct gss_svc_data	*gsd = rqstp->rq_auth_data;
760 	u32			maj_stat;
761 	struct xdr_buf		verf_data;
762 	struct xdr_netobj	checksum;
763 	struct kvec		iov;
764 
765 	gsd->gsd_seq_num = cpu_to_be32(seq);
766 	iov.iov_base = &gsd->gsd_seq_num;
767 	iov.iov_len = XDR_UNIT;
768 	xdr_buf_from_iov(&iov, &verf_data);
769 
770 	checksum.data = gsd->gsd_scratch;
771 	maj_stat = gss_get_mic(ctx_id, &verf_data, &checksum);
772 	if (maj_stat != GSS_S_COMPLETE)
773 		goto bad_mic;
774 
775 	return xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, RPC_AUTH_GSS,
776 					     checksum.data, checksum.len) > 0;
777 
778 bad_mic:
779 	trace_rpcgss_svc_get_mic(rqstp, maj_stat);
780 	return false;
781 }
782 
783 struct gss_domain {
784 	struct auth_domain	h;
785 	u32			pseudoflavor;
786 };
787 
788 static struct auth_domain *
789 find_gss_auth_domain(struct gss_ctx *ctx, u32 svc)
790 {
791 	char *name;
792 
793 	name = gss_service_to_auth_domain_name(ctx->mech_type, svc);
794 	if (!name)
795 		return NULL;
796 	return auth_domain_find(name);
797 }
798 
799 static struct auth_ops svcauthops_gss;
800 
801 u32 svcauth_gss_flavor(struct auth_domain *dom)
802 {
803 	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
804 
805 	return gd->pseudoflavor;
806 }
807 
808 EXPORT_SYMBOL_GPL(svcauth_gss_flavor);
809 
810 struct auth_domain *
811 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name)
812 {
813 	struct gss_domain	*new;
814 	struct auth_domain	*test;
815 	int			stat = -ENOMEM;
816 
817 	new = kmalloc(sizeof(*new), GFP_KERNEL);
818 	if (!new)
819 		goto out;
820 	kref_init(&new->h.ref);
821 	new->h.name = kstrdup(name, GFP_KERNEL);
822 	if (!new->h.name)
823 		goto out_free_dom;
824 	new->h.flavour = &svcauthops_gss;
825 	new->pseudoflavor = pseudoflavor;
826 
827 	test = auth_domain_lookup(name, &new->h);
828 	if (test != &new->h) {
829 		pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n",
830 			name);
831 		stat = -EADDRINUSE;
832 		auth_domain_put(test);
833 		goto out_free_name;
834 	}
835 	return test;
836 
837 out_free_name:
838 	kfree(new->h.name);
839 out_free_dom:
840 	kfree(new);
841 out:
842 	return ERR_PTR(stat);
843 }
844 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor);
845 
846 /*
847  * RFC 2203, Section 5.3.2.2
848  *
849  *	struct rpc_gss_integ_data {
850  *		opaque databody_integ<>;
851  *		opaque checksum<>;
852  *	};
853  *
854  *	struct rpc_gss_data_t {
855  *		unsigned int seq_num;
856  *		proc_req_arg_t arg;
857  *	};
858  */
859 static noinline_for_stack int
860 svcauth_gss_unwrap_integ(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
861 {
862 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
863 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
864 	u32 len, offset, seq_num, maj_stat;
865 	struct xdr_buf *buf = xdr->buf;
866 	struct xdr_buf databody_integ;
867 	struct xdr_netobj checksum;
868 
869 	/* NFS READ normally uses splice to send data in-place. However
870 	 * the data in cache can change after the reply's MIC is computed
871 	 * but before the RPC reply is sent. To prevent the client from
872 	 * rejecting the server-computed MIC in this somewhat rare case,
873 	 * do not use splice with the GSS integrity service.
874 	 */
875 	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
876 
877 	/* Did we already verify the signature on the original pass through? */
878 	if (rqstp->rq_deferred)
879 		return 0;
880 
881 	if (xdr_stream_decode_u32(xdr, &len) < 0)
882 		goto unwrap_failed;
883 	if (len & 3)
884 		goto unwrap_failed;
885 	offset = xdr_stream_pos(xdr);
886 	if (xdr_buf_subsegment(buf, &databody_integ, offset, len))
887 		goto unwrap_failed;
888 
889 	/*
890 	 * The xdr_stream now points to the @seq_num field. The next
891 	 * XDR data item is the @arg field, which contains the clear
892 	 * text RPC program payload. The checksum, which follows the
893 	 * @arg field, is located and decoded without updating the
894 	 * xdr_stream.
895 	 */
896 
897 	offset += len;
898 	if (xdr_decode_word(buf, offset, &checksum.len))
899 		goto unwrap_failed;
900 	if (checksum.len > sizeof(gsd->gsd_scratch))
901 		goto unwrap_failed;
902 	checksum.data = gsd->gsd_scratch;
903 	if (read_bytes_from_xdr_buf(buf, offset + XDR_UNIT, checksum.data,
904 				    checksum.len))
905 		goto unwrap_failed;
906 
907 	maj_stat = gss_verify_mic(ctx, &databody_integ, &checksum);
908 	if (maj_stat != GSS_S_COMPLETE)
909 		goto bad_mic;
910 
911 	/* The received seqno is protected by the checksum. */
912 	if (xdr_stream_decode_u32(xdr, &seq_num) < 0)
913 		goto unwrap_failed;
914 	if (seq_num != seq)
915 		goto bad_seqno;
916 
917 	xdr_truncate_decode(xdr, XDR_UNIT + checksum.len);
918 	return 0;
919 
920 unwrap_failed:
921 	trace_rpcgss_svc_unwrap_failed(rqstp);
922 	return -EINVAL;
923 bad_seqno:
924 	trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num);
925 	return -EINVAL;
926 bad_mic:
927 	trace_rpcgss_svc_mic(rqstp, maj_stat);
928 	return -EINVAL;
929 }
930 
931 /*
932  * RFC 2203, Section 5.3.2.3
933  *
934  *	struct rpc_gss_priv_data {
935  *		opaque databody_priv<>
936  *	};
937  *
938  *	struct rpc_gss_data_t {
939  *		unsigned int seq_num;
940  *		proc_req_arg_t arg;
941  *	};
942  */
943 static noinline_for_stack int
944 svcauth_gss_unwrap_priv(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx)
945 {
946 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
947 	u32 len, maj_stat, seq_num, offset;
948 	struct xdr_buf *buf = xdr->buf;
949 	unsigned int saved_len;
950 
951 	clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
952 
953 	if (xdr_stream_decode_u32(xdr, &len) < 0)
954 		goto unwrap_failed;
955 	if (rqstp->rq_deferred) {
956 		/* Already decrypted last time through! The sequence number
957 		 * check at out_seq is unnecessary but harmless: */
958 		goto out_seq;
959 	}
960 	if (len > xdr_stream_remaining(xdr))
961 		goto unwrap_failed;
962 	offset = xdr_stream_pos(xdr);
963 
964 	saved_len = buf->len;
965 	maj_stat = gss_unwrap(ctx, offset, offset + len, buf);
966 	if (maj_stat != GSS_S_COMPLETE)
967 		goto bad_unwrap;
968 	xdr->nwords -= XDR_QUADLEN(saved_len - buf->len);
969 
970 out_seq:
971 	/* gss_unwrap() decrypted the sequence number. */
972 	if (xdr_stream_decode_u32(xdr, &seq_num) < 0)
973 		goto unwrap_failed;
974 	if (seq_num != seq)
975 		goto bad_seqno;
976 	return 0;
977 
978 unwrap_failed:
979 	trace_rpcgss_svc_unwrap_failed(rqstp);
980 	return -EINVAL;
981 bad_seqno:
982 	trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num);
983 	return -EINVAL;
984 bad_unwrap:
985 	trace_rpcgss_svc_unwrap(rqstp, maj_stat);
986 	return -EINVAL;
987 }
988 
989 static enum svc_auth_status
990 svcauth_gss_set_client(struct svc_rqst *rqstp)
991 {
992 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
993 	struct rsc *rsci = svcdata->rsci;
994 	struct rpc_gss_wire_cred *gc = &svcdata->clcred;
995 	int stat;
996 
997 	rqstp->rq_auth_stat = rpc_autherr_badcred;
998 
999 	/*
1000 	 * A gss export can be specified either by:
1001 	 * 	export	*(sec=krb5,rw)
1002 	 * or by
1003 	 * 	export gss/krb5(rw)
1004 	 * The latter is deprecated; but for backwards compatibility reasons
1005 	 * the nfsd code will still fall back on trying it if the former
1006 	 * doesn't work; so we try to make both available to nfsd, below.
1007 	 */
1008 	rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc);
1009 	if (rqstp->rq_gssclient == NULL)
1010 		return SVC_DENIED;
1011 	stat = svcauth_unix_set_client(rqstp);
1012 	if (stat == SVC_DROP || stat == SVC_CLOSE)
1013 		return stat;
1014 
1015 	rqstp->rq_auth_stat = rpc_auth_ok;
1016 	return SVC_OK;
1017 }
1018 
1019 static bool
1020 svcauth_gss_proc_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
1021 			   struct xdr_netobj *out_handle, int *major_status,
1022 			   u32 seq_num)
1023 {
1024 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
1025 	struct rsc *rsci;
1026 	bool rc;
1027 
1028 	if (*major_status != GSS_S_COMPLETE)
1029 		goto null_verifier;
1030 	rsci = gss_svc_searchbyctx(cd, out_handle);
1031 	if (rsci == NULL) {
1032 		*major_status = GSS_S_NO_CONTEXT;
1033 		goto null_verifier;
1034 	}
1035 
1036 	rc = svcauth_gss_encode_verf(rqstp, rsci->mechctx, seq_num);
1037 	cache_put(&rsci->h, cd);
1038 	return rc;
1039 
1040 null_verifier:
1041 	return xdr_stream_encode_opaque_auth(xdr, RPC_AUTH_NULL, NULL, 0) > 0;
1042 }
1043 
1044 static void gss_free_in_token_pages(struct gssp_in_token *in_token)
1045 {
1046 	int i;
1047 
1048 	i = 0;
1049 	while (in_token->pages[i])
1050 		put_page(in_token->pages[i++]);
1051 	kfree(in_token->pages);
1052 	in_token->pages = NULL;
1053 }
1054 
1055 static int gss_read_proxy_verf(struct svc_rqst *rqstp,
1056 			       struct rpc_gss_wire_cred *gc,
1057 			       struct xdr_netobj *in_handle,
1058 			       struct gssp_in_token *in_token)
1059 {
1060 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1061 	unsigned int length, pgto_offs, pgfrom_offs;
1062 	int pages, i, pgto, pgfrom;
1063 	size_t to_offs, from_offs;
1064 	u32 inlen;
1065 
1066 	if (dup_netobj(in_handle, &gc->gc_ctx))
1067 		return SVC_CLOSE;
1068 
1069 	/*
1070 	 *  RFC 2203 Section 5.2.2
1071 	 *
1072 	 *	struct rpc_gss_init_arg {
1073 	 *		opaque gss_token<>;
1074 	 *	};
1075 	 */
1076 	if (xdr_stream_decode_u32(xdr, &inlen) < 0)
1077 		goto out_denied_free;
1078 	if (inlen > xdr_stream_remaining(xdr))
1079 		goto out_denied_free;
1080 
1081 	pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
1082 	in_token->pages = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
1083 	if (!in_token->pages)
1084 		goto out_denied_free;
1085 	in_token->page_base = 0;
1086 	in_token->page_len = inlen;
1087 	for (i = 0; i < pages; i++) {
1088 		in_token->pages[i] = alloc_page(GFP_KERNEL);
1089 		if (!in_token->pages[i]) {
1090 			gss_free_in_token_pages(in_token);
1091 			goto out_denied_free;
1092 		}
1093 	}
1094 
1095 	length = min_t(unsigned int, inlen, (char *)xdr->end - (char *)xdr->p);
1096 	memcpy(page_address(in_token->pages[0]), xdr->p, length);
1097 	inlen -= length;
1098 
1099 	to_offs = length;
1100 	from_offs = rqstp->rq_arg.page_base;
1101 	while (inlen) {
1102 		pgto = to_offs >> PAGE_SHIFT;
1103 		pgfrom = from_offs >> PAGE_SHIFT;
1104 		pgto_offs = to_offs & ~PAGE_MASK;
1105 		pgfrom_offs = from_offs & ~PAGE_MASK;
1106 
1107 		length = min_t(unsigned int, inlen,
1108 			 min_t(unsigned int, PAGE_SIZE - pgto_offs,
1109 			       PAGE_SIZE - pgfrom_offs));
1110 		memcpy(page_address(in_token->pages[pgto]) + pgto_offs,
1111 		       page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs,
1112 		       length);
1113 
1114 		to_offs += length;
1115 		from_offs += length;
1116 		inlen -= length;
1117 	}
1118 	return 0;
1119 
1120 out_denied_free:
1121 	kfree(in_handle->data);
1122 	return SVC_DENIED;
1123 }
1124 
1125 /*
1126  * RFC 2203, Section 5.2.3.1.
1127  *
1128  *	struct rpc_gss_init_res {
1129  *		opaque handle<>;
1130  *		unsigned int gss_major;
1131  *		unsigned int gss_minor;
1132  *		unsigned int seq_window;
1133  *		opaque gss_token<>;
1134  *	};
1135  */
1136 static bool
1137 svcxdr_encode_gss_init_res(struct xdr_stream *xdr,
1138 			   struct xdr_netobj *handle,
1139 			   struct xdr_netobj *gss_token,
1140 			   unsigned int major_status,
1141 			   unsigned int minor_status, u32 seq_num)
1142 {
1143 	if (xdr_stream_encode_opaque(xdr, handle->data, handle->len) < 0)
1144 		return false;
1145 	if (xdr_stream_encode_u32(xdr, major_status) < 0)
1146 		return false;
1147 	if (xdr_stream_encode_u32(xdr, minor_status) < 0)
1148 		return false;
1149 	if (xdr_stream_encode_u32(xdr, seq_num) < 0)
1150 		return false;
1151 	if (xdr_stream_encode_opaque(xdr, gss_token->data, gss_token->len) < 0)
1152 		return false;
1153 	return true;
1154 }
1155 
1156 /*
1157  * Having read the cred already and found we're in the context
1158  * initiation case, read the verifier and initiate (or check the results
1159  * of) upcalls to userspace for help with context initiation.  If
1160  * the upcall results are available, write the verifier and result.
1161  * Otherwise, drop the request pending an answer to the upcall.
1162  */
1163 static int
1164 svcauth_gss_legacy_init(struct svc_rqst *rqstp,
1165 			struct rpc_gss_wire_cred *gc)
1166 {
1167 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1168 	struct rsi *rsip, rsikey;
1169 	__be32 *p;
1170 	u32 len;
1171 	int ret;
1172 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1173 
1174 	memset(&rsikey, 0, sizeof(rsikey));
1175 	if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
1176 		return SVC_CLOSE;
1177 
1178 	/*
1179 	 *  RFC 2203 Section 5.2.2
1180 	 *
1181 	 *	struct rpc_gss_init_arg {
1182 	 *		opaque gss_token<>;
1183 	 *	};
1184 	 */
1185 	if (xdr_stream_decode_u32(xdr, &len) < 0) {
1186 		kfree(rsikey.in_handle.data);
1187 		return SVC_DENIED;
1188 	}
1189 	p = xdr_inline_decode(xdr, len);
1190 	if (!p) {
1191 		kfree(rsikey.in_handle.data);
1192 		return SVC_DENIED;
1193 	}
1194 	rsikey.in_token.data = kmalloc(len, GFP_KERNEL);
1195 	if (ZERO_OR_NULL_PTR(rsikey.in_token.data)) {
1196 		kfree(rsikey.in_handle.data);
1197 		return SVC_CLOSE;
1198 	}
1199 	memcpy(rsikey.in_token.data, p, len);
1200 	rsikey.in_token.len = len;
1201 
1202 	/* Perform upcall, or find upcall result: */
1203 	rsip = rsi_lookup(sn->rsi_cache, &rsikey);
1204 	rsi_free(&rsikey);
1205 	if (!rsip)
1206 		return SVC_CLOSE;
1207 	if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0)
1208 		/* No upcall result: */
1209 		return SVC_CLOSE;
1210 
1211 	ret = SVC_CLOSE;
1212 	if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &rsip->out_handle,
1213 					&rsip->major_status, GSS_SEQ_WIN))
1214 		goto out;
1215 	if (!svcxdr_set_accept_stat(rqstp))
1216 		goto out;
1217 	if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &rsip->out_handle,
1218 					&rsip->out_token, rsip->major_status,
1219 					rsip->minor_status, GSS_SEQ_WIN))
1220 		goto out;
1221 
1222 	ret = SVC_COMPLETE;
1223 out:
1224 	cache_put(&rsip->h, sn->rsi_cache);
1225 	return ret;
1226 }
1227 
1228 static int gss_proxy_save_rsc(struct cache_detail *cd,
1229 				struct gssp_upcall_data *ud,
1230 				uint64_t *handle)
1231 {
1232 	struct rsc rsci, *rscp = NULL;
1233 	static atomic64_t ctxhctr;
1234 	long long ctxh;
1235 	struct gss_api_mech *gm = NULL;
1236 	time64_t expiry;
1237 	int status;
1238 
1239 	memset(&rsci, 0, sizeof(rsci));
1240 	/* context handle */
1241 	status = -ENOMEM;
1242 	/* the handle needs to be just a unique id,
1243 	 * use a static counter */
1244 	ctxh = atomic64_inc_return(&ctxhctr);
1245 
1246 	/* make a copy for the caller */
1247 	*handle = ctxh;
1248 
1249 	/* make a copy for the rsc cache */
1250 	if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t)))
1251 		goto out;
1252 	rscp = rsc_lookup(cd, &rsci);
1253 	if (!rscp)
1254 		goto out;
1255 
1256 	/* creds */
1257 	if (!ud->found_creds) {
1258 		/* userspace seem buggy, we should always get at least a
1259 		 * mapping to nobody */
1260 		goto out;
1261 	} else {
1262 		struct timespec64 boot;
1263 
1264 		/* steal creds */
1265 		rsci.cred = ud->creds;
1266 		memset(&ud->creds, 0, sizeof(struct svc_cred));
1267 
1268 		status = -EOPNOTSUPP;
1269 		/* get mech handle from OID */
1270 		gm = gss_mech_get_by_OID(&ud->mech_oid);
1271 		if (!gm)
1272 			goto out;
1273 		rsci.cred.cr_gss_mech = gm;
1274 
1275 		status = -EINVAL;
1276 		/* mech-specific data: */
1277 		status = gss_import_sec_context(ud->out_handle.data,
1278 						ud->out_handle.len,
1279 						gm, &rsci.mechctx,
1280 						&expiry, GFP_KERNEL);
1281 		if (status)
1282 			goto out;
1283 
1284 		getboottime64(&boot);
1285 		expiry -= boot.tv_sec;
1286 	}
1287 
1288 	rsci.h.expiry_time = expiry;
1289 	rscp = rsc_update(cd, &rsci, rscp);
1290 	status = 0;
1291 out:
1292 	rsc_free(&rsci);
1293 	if (rscp)
1294 		cache_put(&rscp->h, cd);
1295 	else
1296 		status = -ENOMEM;
1297 	return status;
1298 }
1299 
1300 static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
1301 				  struct rpc_gss_wire_cred *gc)
1302 {
1303 	struct xdr_netobj cli_handle;
1304 	struct gssp_upcall_data ud;
1305 	uint64_t handle;
1306 	int status;
1307 	int ret;
1308 	struct net *net = SVC_NET(rqstp);
1309 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1310 
1311 	memset(&ud, 0, sizeof(ud));
1312 	ret = gss_read_proxy_verf(rqstp, gc, &ud.in_handle, &ud.in_token);
1313 	if (ret)
1314 		return ret;
1315 
1316 	ret = SVC_CLOSE;
1317 
1318 	/* Perform synchronous upcall to gss-proxy */
1319 	status = gssp_accept_sec_context_upcall(net, &ud);
1320 	if (status)
1321 		goto out;
1322 
1323 	trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status);
1324 
1325 	switch (ud.major_status) {
1326 	case GSS_S_CONTINUE_NEEDED:
1327 		cli_handle = ud.out_handle;
1328 		break;
1329 	case GSS_S_COMPLETE:
1330 		status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
1331 		if (status)
1332 			goto out;
1333 		cli_handle.data = (u8 *)&handle;
1334 		cli_handle.len = sizeof(handle);
1335 		break;
1336 	default:
1337 		goto out;
1338 	}
1339 
1340 	if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &cli_handle,
1341 					&ud.major_status, GSS_SEQ_WIN))
1342 		goto out;
1343 	if (!svcxdr_set_accept_stat(rqstp))
1344 		goto out;
1345 	if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &cli_handle,
1346 					&ud.out_token, ud.major_status,
1347 					ud.minor_status, GSS_SEQ_WIN))
1348 		goto out;
1349 
1350 	ret = SVC_COMPLETE;
1351 out:
1352 	gss_free_in_token_pages(&ud.in_token);
1353 	gssp_free_upcall_data(&ud);
1354 	return ret;
1355 }
1356 
1357 /*
1358  * Try to set the sn->use_gss_proxy variable to a new value. We only allow
1359  * it to be changed if it's currently undefined (-1). If it's any other value
1360  * then return -EBUSY unless the type wouldn't have changed anyway.
1361  */
1362 static int set_gss_proxy(struct net *net, int type)
1363 {
1364 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1365 	int ret;
1366 
1367 	WARN_ON_ONCE(type != 0 && type != 1);
1368 	ret = cmpxchg(&sn->use_gss_proxy, -1, type);
1369 	if (ret != -1 && ret != type)
1370 		return -EBUSY;
1371 	return 0;
1372 }
1373 
1374 static bool use_gss_proxy(struct net *net)
1375 {
1376 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1377 
1378 	/* If use_gss_proxy is still undefined, then try to disable it */
1379 	if (sn->use_gss_proxy == -1)
1380 		set_gss_proxy(net, 0);
1381 	return sn->use_gss_proxy;
1382 }
1383 
1384 static noinline_for_stack int
1385 svcauth_gss_proc_init(struct svc_rqst *rqstp, struct rpc_gss_wire_cred *gc)
1386 {
1387 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
1388 	u32 flavor, len;
1389 	void *body;
1390 
1391 	/* Call's verf field: */
1392 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
1393 		return SVC_GARBAGE;
1394 	if (flavor != RPC_AUTH_NULL || len != 0) {
1395 		rqstp->rq_auth_stat = rpc_autherr_badverf;
1396 		return SVC_DENIED;
1397 	}
1398 
1399 	if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) {
1400 		rqstp->rq_auth_stat = rpc_autherr_badcred;
1401 		return SVC_DENIED;
1402 	}
1403 
1404 	if (!use_gss_proxy(SVC_NET(rqstp)))
1405 		return svcauth_gss_legacy_init(rqstp, gc);
1406 	return svcauth_gss_proxy_init(rqstp, gc);
1407 }
1408 
1409 #ifdef CONFIG_PROC_FS
1410 
1411 static ssize_t write_gssp(struct file *file, const char __user *buf,
1412 			 size_t count, loff_t *ppos)
1413 {
1414 	struct net *net = pde_data(file_inode(file));
1415 	char tbuf[20];
1416 	unsigned long i;
1417 	int res;
1418 
1419 	if (*ppos || count > sizeof(tbuf)-1)
1420 		return -EINVAL;
1421 	if (copy_from_user(tbuf, buf, count))
1422 		return -EFAULT;
1423 
1424 	tbuf[count] = 0;
1425 	res = kstrtoul(tbuf, 0, &i);
1426 	if (res)
1427 		return res;
1428 	if (i != 1)
1429 		return -EINVAL;
1430 	res = set_gssp_clnt(net);
1431 	if (res)
1432 		return res;
1433 	res = set_gss_proxy(net, 1);
1434 	if (res)
1435 		return res;
1436 	return count;
1437 }
1438 
1439 static ssize_t read_gssp(struct file *file, char __user *buf,
1440 			 size_t count, loff_t *ppos)
1441 {
1442 	struct net *net = pde_data(file_inode(file));
1443 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1444 	unsigned long p = *ppos;
1445 	char tbuf[10];
1446 	size_t len;
1447 
1448 	snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy);
1449 	len = strlen(tbuf);
1450 	if (p >= len)
1451 		return 0;
1452 	len -= p;
1453 	if (len > count)
1454 		len = count;
1455 	if (copy_to_user(buf, (void *)(tbuf+p), len))
1456 		return -EFAULT;
1457 	*ppos += len;
1458 	return len;
1459 }
1460 
1461 static const struct proc_ops use_gss_proxy_proc_ops = {
1462 	.proc_open	= nonseekable_open,
1463 	.proc_write	= write_gssp,
1464 	.proc_read	= read_gssp,
1465 };
1466 
1467 static int create_use_gss_proxy_proc_entry(struct net *net)
1468 {
1469 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1470 	struct proc_dir_entry **p = &sn->use_gssp_proc;
1471 
1472 	sn->use_gss_proxy = -1;
1473 	*p = proc_create_data("use-gss-proxy", S_IFREG | 0600,
1474 			      sn->proc_net_rpc,
1475 			      &use_gss_proxy_proc_ops, net);
1476 	if (!*p)
1477 		return -ENOMEM;
1478 	init_gssp_clnt(sn);
1479 	return 0;
1480 }
1481 
1482 static void destroy_use_gss_proxy_proc_entry(struct net *net)
1483 {
1484 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1485 
1486 	if (sn->use_gssp_proc) {
1487 		remove_proc_entry("use-gss-proxy", sn->proc_net_rpc);
1488 		clear_gssp_clnt(sn);
1489 	}
1490 }
1491 
1492 static ssize_t read_gss_krb5_enctypes(struct file *file, char __user *buf,
1493 				      size_t count, loff_t *ppos)
1494 {
1495 	struct rpcsec_gss_oid oid = {
1496 		.len	= 9,
1497 		.data	= "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02",
1498 	};
1499 	struct gss_api_mech *mech;
1500 	ssize_t ret;
1501 
1502 	mech = gss_mech_get_by_OID(&oid);
1503 	if (!mech)
1504 		return 0;
1505 	if (!mech->gm_upcall_enctypes) {
1506 		gss_mech_put(mech);
1507 		return 0;
1508 	}
1509 
1510 	ret = simple_read_from_buffer(buf, count, ppos,
1511 				      mech->gm_upcall_enctypes,
1512 				      strlen(mech->gm_upcall_enctypes));
1513 	gss_mech_put(mech);
1514 	return ret;
1515 }
1516 
1517 static const struct proc_ops gss_krb5_enctypes_proc_ops = {
1518 	.proc_open	= nonseekable_open,
1519 	.proc_read	= read_gss_krb5_enctypes,
1520 };
1521 
1522 static int create_krb5_enctypes_proc_entry(struct net *net)
1523 {
1524 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1525 
1526 	sn->gss_krb5_enctypes =
1527 		proc_create_data("gss_krb5_enctypes", S_IFREG | 0444,
1528 				 sn->proc_net_rpc, &gss_krb5_enctypes_proc_ops,
1529 				 net);
1530 	return sn->gss_krb5_enctypes ? 0 : -ENOMEM;
1531 }
1532 
1533 static void destroy_krb5_enctypes_proc_entry(struct net *net)
1534 {
1535 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1536 
1537 	if (sn->gss_krb5_enctypes)
1538 		remove_proc_entry("gss_krb5_enctypes", sn->proc_net_rpc);
1539 }
1540 
1541 #else /* CONFIG_PROC_FS */
1542 
1543 static int create_use_gss_proxy_proc_entry(struct net *net)
1544 {
1545 	return 0;
1546 }
1547 
1548 static void destroy_use_gss_proxy_proc_entry(struct net *net) {}
1549 
1550 static int create_krb5_enctypes_proc_entry(struct net *net)
1551 {
1552 	return 0;
1553 }
1554 
1555 static void destroy_krb5_enctypes_proc_entry(struct net *net) {}
1556 
1557 #endif /* CONFIG_PROC_FS */
1558 
1559 /*
1560  * The Call's credential body should contain a struct rpc_gss_cred_t.
1561  *
1562  * RFC 2203 Section 5
1563  *
1564  *	struct rpc_gss_cred_t {
1565  *		union switch (unsigned int version) {
1566  *		case RPCSEC_GSS_VERS_1:
1567  *			struct {
1568  *				rpc_gss_proc_t gss_proc;
1569  *				unsigned int seq_num;
1570  *				rpc_gss_service_t service;
1571  *				opaque handle<>;
1572  *			} rpc_gss_cred_vers_1_t;
1573  *		}
1574  *	};
1575  */
1576 static bool
1577 svcauth_gss_decode_credbody(struct xdr_stream *xdr,
1578 			    struct rpc_gss_wire_cred *gc,
1579 			    __be32 **rpcstart)
1580 {
1581 	ssize_t handle_len;
1582 	u32 body_len;
1583 	__be32 *p;
1584 
1585 	p = xdr_inline_decode(xdr, XDR_UNIT);
1586 	if (!p)
1587 		return false;
1588 	/*
1589 	 * start of rpc packet is 7 u32's back from here:
1590 	 * xid direction rpcversion prog vers proc flavour
1591 	 */
1592 	*rpcstart = p - 7;
1593 	body_len = be32_to_cpup(p);
1594 	if (body_len > RPC_MAX_AUTH_SIZE)
1595 		return false;
1596 
1597 	/* struct rpc_gss_cred_t */
1598 	if (xdr_stream_decode_u32(xdr, &gc->gc_v) < 0)
1599 		return false;
1600 	if (xdr_stream_decode_u32(xdr, &gc->gc_proc) < 0)
1601 		return false;
1602 	if (xdr_stream_decode_u32(xdr, &gc->gc_seq) < 0)
1603 		return false;
1604 	if (xdr_stream_decode_u32(xdr, &gc->gc_svc) < 0)
1605 		return false;
1606 	handle_len = xdr_stream_decode_opaque_inline(xdr,
1607 						     (void **)&gc->gc_ctx.data,
1608 						     body_len);
1609 	if (handle_len < 0)
1610 		return false;
1611 	if (body_len != XDR_UNIT * 5 + xdr_align_size(handle_len))
1612 		return false;
1613 
1614 	gc->gc_ctx.len = handle_len;
1615 	return true;
1616 }
1617 
1618 /**
1619  * svcauth_gss_accept - Decode and validate incoming RPC_AUTH_GSS credential
1620  * @rqstp: RPC transaction
1621  *
1622  * Return values:
1623  *   %SVC_OK: Success
1624  *   %SVC_COMPLETE: GSS context lifetime event
1625  *   %SVC_DENIED: Credential or verifier is not valid
1626  *   %SVC_GARBAGE: Failed to decode credential or verifier
1627  *   %SVC_CLOSE: Temporary failure
1628  *
1629  * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531).
1630  */
1631 static enum svc_auth_status
1632 svcauth_gss_accept(struct svc_rqst *rqstp)
1633 {
1634 	struct gss_svc_data *svcdata = rqstp->rq_auth_data;
1635 	__be32		*rpcstart;
1636 	struct rpc_gss_wire_cred *gc;
1637 	struct rsc	*rsci = NULL;
1638 	int		ret;
1639 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1640 
1641 	rqstp->rq_auth_stat = rpc_autherr_badcred;
1642 	if (!svcdata)
1643 		svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL);
1644 	if (!svcdata)
1645 		goto auth_err;
1646 	rqstp->rq_auth_data = svcdata;
1647 	svcdata->gsd_databody_offset = 0;
1648 	svcdata->rsci = NULL;
1649 	gc = &svcdata->clcred;
1650 
1651 	if (!svcauth_gss_decode_credbody(&rqstp->rq_arg_stream, gc, &rpcstart))
1652 		goto auth_err;
1653 	if (gc->gc_v != RPC_GSS_VERSION)
1654 		goto auth_err;
1655 
1656 	switch (gc->gc_proc) {
1657 	case RPC_GSS_PROC_INIT:
1658 	case RPC_GSS_PROC_CONTINUE_INIT:
1659 		if (rqstp->rq_proc != 0)
1660 			goto auth_err;
1661 		return svcauth_gss_proc_init(rqstp, gc);
1662 	case RPC_GSS_PROC_DESTROY:
1663 		if (rqstp->rq_proc != 0)
1664 			goto auth_err;
1665 		fallthrough;
1666 	case RPC_GSS_PROC_DATA:
1667 		rqstp->rq_auth_stat = rpcsec_gsserr_credproblem;
1668 		rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx);
1669 		if (!rsci)
1670 			goto auth_err;
1671 		switch (svcauth_gss_verify_header(rqstp, rsci, rpcstart, gc)) {
1672 		case SVC_OK:
1673 			break;
1674 		case SVC_DENIED:
1675 			goto auth_err;
1676 		case SVC_DROP:
1677 			goto drop;
1678 		}
1679 		break;
1680 	default:
1681 		if (rqstp->rq_proc != 0)
1682 			goto auth_err;
1683 		rqstp->rq_auth_stat = rpc_autherr_rejectedcred;
1684 		goto auth_err;
1685 	}
1686 
1687 	/* now act upon the command: */
1688 	switch (gc->gc_proc) {
1689 	case RPC_GSS_PROC_DESTROY:
1690 		if (!svcauth_gss_encode_verf(rqstp, rsci->mechctx, gc->gc_seq))
1691 			goto auth_err;
1692 		if (!svcxdr_set_accept_stat(rqstp))
1693 			goto auth_err;
1694 		/* Delete the entry from the cache_list and call cache_put */
1695 		sunrpc_cache_unhash(sn->rsc_cache, &rsci->h);
1696 		goto complete;
1697 	case RPC_GSS_PROC_DATA:
1698 		rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem;
1699 		if (!svcauth_gss_encode_verf(rqstp, rsci->mechctx, gc->gc_seq))
1700 			goto auth_err;
1701 		if (!svcxdr_set_accept_stat(rqstp))
1702 			goto auth_err;
1703 		svcdata->gsd_databody_offset = xdr_stream_pos(&rqstp->rq_res_stream);
1704 		rqstp->rq_cred = rsci->cred;
1705 		get_group_info(rsci->cred.cr_group_info);
1706 		rqstp->rq_auth_stat = rpc_autherr_badcred;
1707 		switch (gc->gc_svc) {
1708 		case RPC_GSS_SVC_NONE:
1709 			break;
1710 		case RPC_GSS_SVC_INTEGRITY:
1711 			/* placeholders for body length and seq. number: */
1712 			xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2);
1713 			if (svcauth_gss_unwrap_integ(rqstp, gc->gc_seq,
1714 						     rsci->mechctx))
1715 				goto garbage_args;
1716 			svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE);
1717 			break;
1718 		case RPC_GSS_SVC_PRIVACY:
1719 			/* placeholders for body length and seq. number: */
1720 			xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2);
1721 			if (svcauth_gss_unwrap_priv(rqstp, gc->gc_seq,
1722 						    rsci->mechctx))
1723 				goto garbage_args;
1724 			svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE * 2);
1725 			break;
1726 		default:
1727 			goto auth_err;
1728 		}
1729 		svcdata->rsci = rsci;
1730 		cache_get(&rsci->h);
1731 		rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
1732 					rsci->mechctx->mech_type,
1733 					GSS_C_QOP_DEFAULT,
1734 					gc->gc_svc);
1735 		ret = SVC_OK;
1736 		trace_rpcgss_svc_authenticate(rqstp, gc);
1737 		goto out;
1738 	}
1739 garbage_args:
1740 	ret = SVC_GARBAGE;
1741 	goto out;
1742 auth_err:
1743 	xdr_truncate_encode(&rqstp->rq_res_stream, XDR_UNIT * 2);
1744 	ret = SVC_DENIED;
1745 	goto out;
1746 complete:
1747 	ret = SVC_COMPLETE;
1748 	goto out;
1749 drop:
1750 	ret = SVC_CLOSE;
1751 out:
1752 	if (rsci)
1753 		cache_put(&rsci->h, sn->rsc_cache);
1754 	return ret;
1755 }
1756 
1757 static u32
1758 svcauth_gss_prepare_to_wrap(struct svc_rqst *rqstp, struct gss_svc_data *gsd)
1759 {
1760 	u32 offset;
1761 
1762 	/* Release can be called twice, but we only wrap once. */
1763 	offset = gsd->gsd_databody_offset;
1764 	gsd->gsd_databody_offset = 0;
1765 
1766 	/* AUTH_ERROR replies are not wrapped. */
1767 	if (rqstp->rq_auth_stat != rpc_auth_ok)
1768 		return 0;
1769 
1770 	/* Also don't wrap if the accept_stat is nonzero: */
1771 	if (*rqstp->rq_accept_statp != rpc_success)
1772 		return 0;
1773 
1774 	return offset;
1775 }
1776 
1777 /*
1778  * RFC 2203, Section 5.3.2.2
1779  *
1780  *	struct rpc_gss_integ_data {
1781  *		opaque databody_integ<>;
1782  *		opaque checksum<>;
1783  *	};
1784  *
1785  *	struct rpc_gss_data_t {
1786  *		unsigned int seq_num;
1787  *		proc_req_arg_t arg;
1788  *	};
1789  *
1790  * The RPC Reply message has already been XDR-encoded. rq_res_stream
1791  * is now positioned so that the checksum can be written just past
1792  * the RPC Reply message.
1793  */
1794 static int svcauth_gss_wrap_integ(struct svc_rqst *rqstp)
1795 {
1796 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1797 	struct xdr_stream *xdr = &rqstp->rq_res_stream;
1798 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
1799 	struct xdr_buf *buf = xdr->buf;
1800 	struct xdr_buf databody_integ;
1801 	struct xdr_netobj checksum;
1802 	u32 offset, maj_stat;
1803 
1804 	offset = svcauth_gss_prepare_to_wrap(rqstp, gsd);
1805 	if (!offset)
1806 		goto out;
1807 
1808 	if (xdr_buf_subsegment(buf, &databody_integ, offset + XDR_UNIT,
1809 			       buf->len - offset - XDR_UNIT))
1810 		goto wrap_failed;
1811 	/* Buffer space for these has already been reserved in
1812 	 * svcauth_gss_accept(). */
1813 	if (xdr_encode_word(buf, offset, databody_integ.len))
1814 		goto wrap_failed;
1815 	if (xdr_encode_word(buf, offset + XDR_UNIT, gc->gc_seq))
1816 		goto wrap_failed;
1817 
1818 	checksum.data = gsd->gsd_scratch;
1819 	maj_stat = gss_get_mic(gsd->rsci->mechctx, &databody_integ, &checksum);
1820 	if (maj_stat != GSS_S_COMPLETE)
1821 		goto bad_mic;
1822 
1823 	if (xdr_stream_encode_opaque(xdr, checksum.data, checksum.len) < 0)
1824 		goto wrap_failed;
1825 	xdr_commit_encode(xdr);
1826 
1827 out:
1828 	return 0;
1829 
1830 bad_mic:
1831 	trace_rpcgss_svc_get_mic(rqstp, maj_stat);
1832 	return -EINVAL;
1833 wrap_failed:
1834 	trace_rpcgss_svc_wrap_failed(rqstp);
1835 	return -EINVAL;
1836 }
1837 
1838 /*
1839  * RFC 2203, Section 5.3.2.3
1840  *
1841  *	struct rpc_gss_priv_data {
1842  *		opaque databody_priv<>
1843  *	};
1844  *
1845  *	struct rpc_gss_data_t {
1846  *		unsigned int seq_num;
1847  *		proc_req_arg_t arg;
1848  *	};
1849  *
1850  * gss_wrap() expands the size of the RPC message payload in the
1851  * response buffer. The main purpose of svcauth_gss_wrap_priv()
1852  * is to ensure there is adequate space in the response buffer to
1853  * avoid overflow during the wrap.
1854  */
1855 static int svcauth_gss_wrap_priv(struct svc_rqst *rqstp)
1856 {
1857 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1858 	struct rpc_gss_wire_cred *gc = &gsd->clcred;
1859 	struct xdr_buf *buf = &rqstp->rq_res;
1860 	struct kvec *head = buf->head;
1861 	struct kvec *tail = buf->tail;
1862 	u32 offset, pad, maj_stat;
1863 	__be32 *p;
1864 
1865 	offset = svcauth_gss_prepare_to_wrap(rqstp, gsd);
1866 	if (!offset)
1867 		return 0;
1868 
1869 	/*
1870 	 * Buffer space for this field has already been reserved
1871 	 * in svcauth_gss_accept(). Note that the GSS sequence
1872 	 * number is encrypted along with the RPC reply payload.
1873 	 */
1874 	if (xdr_encode_word(buf, offset + XDR_UNIT, gc->gc_seq))
1875 		goto wrap_failed;
1876 
1877 	/*
1878 	 * If there is currently tail data, make sure there is
1879 	 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in
1880 	 * the page, and move the current tail data such that
1881 	 * there is RPC_MAX_AUTH_SIZE slack space available in
1882 	 * both the head and tail.
1883 	 */
1884 	if (tail->iov_base) {
1885 		if (tail->iov_base >= head->iov_base + PAGE_SIZE)
1886 			goto wrap_failed;
1887 		if (tail->iov_base < head->iov_base)
1888 			goto wrap_failed;
1889 		if (tail->iov_len + head->iov_len
1890 				+ 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1891 			goto wrap_failed;
1892 		memmove(tail->iov_base + RPC_MAX_AUTH_SIZE, tail->iov_base,
1893 			tail->iov_len);
1894 		tail->iov_base += RPC_MAX_AUTH_SIZE;
1895 	}
1896 	/*
1897 	 * If there is no current tail data, make sure there is
1898 	 * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the
1899 	 * allotted page, and set up tail information such that there
1900 	 * is RPC_MAX_AUTH_SIZE slack space available in both the
1901 	 * head and tail.
1902 	 */
1903 	if (!tail->iov_base) {
1904 		if (head->iov_len + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1905 			goto wrap_failed;
1906 		tail->iov_base = head->iov_base
1907 			+ head->iov_len + RPC_MAX_AUTH_SIZE;
1908 		tail->iov_len = 0;
1909 	}
1910 
1911 	maj_stat = gss_wrap(gsd->rsci->mechctx, offset + XDR_UNIT, buf,
1912 			    buf->pages);
1913 	if (maj_stat != GSS_S_COMPLETE)
1914 		goto bad_wrap;
1915 
1916 	/* Wrapping can change the size of databody_priv. */
1917 	if (xdr_encode_word(buf, offset, buf->len - offset - XDR_UNIT))
1918 		goto wrap_failed;
1919 	pad = xdr_pad_size(buf->len - offset - XDR_UNIT);
1920 	p = (__be32 *)(tail->iov_base + tail->iov_len);
1921 	memset(p, 0, pad);
1922 	tail->iov_len += pad;
1923 	buf->len += pad;
1924 
1925 	return 0;
1926 wrap_failed:
1927 	trace_rpcgss_svc_wrap_failed(rqstp);
1928 	return -EINVAL;
1929 bad_wrap:
1930 	trace_rpcgss_svc_wrap(rqstp, maj_stat);
1931 	return -ENOMEM;
1932 }
1933 
1934 /**
1935  * svcauth_gss_release - Wrap payload and release resources
1936  * @rqstp: RPC transaction context
1937  *
1938  * Return values:
1939  *    %0: the Reply is ready to be sent
1940  *    %-ENOMEM: failed to allocate memory
1941  *    %-EINVAL: encoding error
1942  */
1943 static int
1944 svcauth_gss_release(struct svc_rqst *rqstp)
1945 {
1946 	struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
1947 	struct gss_svc_data *gsd = rqstp->rq_auth_data;
1948 	struct rpc_gss_wire_cred *gc;
1949 	int stat;
1950 
1951 	if (!gsd)
1952 		goto out;
1953 	gc = &gsd->clcred;
1954 	if (gc->gc_proc != RPC_GSS_PROC_DATA)
1955 		goto out;
1956 
1957 	switch (gc->gc_svc) {
1958 	case RPC_GSS_SVC_NONE:
1959 		break;
1960 	case RPC_GSS_SVC_INTEGRITY:
1961 		stat = svcauth_gss_wrap_integ(rqstp);
1962 		if (stat)
1963 			goto out_err;
1964 		break;
1965 	case RPC_GSS_SVC_PRIVACY:
1966 		stat = svcauth_gss_wrap_priv(rqstp);
1967 		if (stat)
1968 			goto out_err;
1969 		break;
1970 	/*
1971 	 * For any other gc_svc value, svcauth_gss_accept() already set
1972 	 * the auth_error appropriately; just fall through:
1973 	 */
1974 	}
1975 
1976 out:
1977 	stat = 0;
1978 out_err:
1979 	if (rqstp->rq_client)
1980 		auth_domain_put(rqstp->rq_client);
1981 	rqstp->rq_client = NULL;
1982 	if (rqstp->rq_gssclient)
1983 		auth_domain_put(rqstp->rq_gssclient);
1984 	rqstp->rq_gssclient = NULL;
1985 	if (rqstp->rq_cred.cr_group_info)
1986 		put_group_info(rqstp->rq_cred.cr_group_info);
1987 	rqstp->rq_cred.cr_group_info = NULL;
1988 	if (gsd && gsd->rsci) {
1989 		cache_put(&gsd->rsci->h, sn->rsc_cache);
1990 		gsd->rsci = NULL;
1991 	}
1992 	return stat;
1993 }
1994 
1995 static void
1996 svcauth_gss_domain_release_rcu(struct rcu_head *head)
1997 {
1998 	struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
1999 	struct gss_domain *gd = container_of(dom, struct gss_domain, h);
2000 
2001 	kfree(dom->name);
2002 	kfree(gd);
2003 }
2004 
2005 static void
2006 svcauth_gss_domain_release(struct auth_domain *dom)
2007 {
2008 	call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu);
2009 }
2010 
2011 static struct auth_ops svcauthops_gss = {
2012 	.name		= "rpcsec_gss",
2013 	.owner		= THIS_MODULE,
2014 	.flavour	= RPC_AUTH_GSS,
2015 	.accept		= svcauth_gss_accept,
2016 	.release	= svcauth_gss_release,
2017 	.domain_release = svcauth_gss_domain_release,
2018 	.set_client	= svcauth_gss_set_client,
2019 };
2020 
2021 static int rsi_cache_create_net(struct net *net)
2022 {
2023 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2024 	struct cache_detail *cd;
2025 	int err;
2026 
2027 	cd = cache_create_net(&rsi_cache_template, net);
2028 	if (IS_ERR(cd))
2029 		return PTR_ERR(cd);
2030 	err = cache_register_net(cd, net);
2031 	if (err) {
2032 		cache_destroy_net(cd, net);
2033 		return err;
2034 	}
2035 	sn->rsi_cache = cd;
2036 	return 0;
2037 }
2038 
2039 static void rsi_cache_destroy_net(struct net *net)
2040 {
2041 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2042 	struct cache_detail *cd = sn->rsi_cache;
2043 
2044 	sn->rsi_cache = NULL;
2045 	cache_purge(cd);
2046 	cache_unregister_net(cd, net);
2047 	cache_destroy_net(cd, net);
2048 }
2049 
2050 static int rsc_cache_create_net(struct net *net)
2051 {
2052 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2053 	struct cache_detail *cd;
2054 	int err;
2055 
2056 	cd = cache_create_net(&rsc_cache_template, net);
2057 	if (IS_ERR(cd))
2058 		return PTR_ERR(cd);
2059 	err = cache_register_net(cd, net);
2060 	if (err) {
2061 		cache_destroy_net(cd, net);
2062 		return err;
2063 	}
2064 	sn->rsc_cache = cd;
2065 	return 0;
2066 }
2067 
2068 static void rsc_cache_destroy_net(struct net *net)
2069 {
2070 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
2071 	struct cache_detail *cd = sn->rsc_cache;
2072 
2073 	sn->rsc_cache = NULL;
2074 	cache_purge(cd);
2075 	cache_unregister_net(cd, net);
2076 	cache_destroy_net(cd, net);
2077 }
2078 
2079 int
2080 gss_svc_init_net(struct net *net)
2081 {
2082 	int rv;
2083 
2084 	rv = rsc_cache_create_net(net);
2085 	if (rv)
2086 		return rv;
2087 	rv = rsi_cache_create_net(net);
2088 	if (rv)
2089 		goto out1;
2090 	rv = create_use_gss_proxy_proc_entry(net);
2091 	if (rv)
2092 		goto out2;
2093 
2094 	rv = create_krb5_enctypes_proc_entry(net);
2095 	if (rv)
2096 		goto out3;
2097 
2098 	return 0;
2099 
2100 out3:
2101 	destroy_use_gss_proxy_proc_entry(net);
2102 out2:
2103 	rsi_cache_destroy_net(net);
2104 out1:
2105 	rsc_cache_destroy_net(net);
2106 	return rv;
2107 }
2108 
2109 void
2110 gss_svc_shutdown_net(struct net *net)
2111 {
2112 	destroy_krb5_enctypes_proc_entry(net);
2113 	destroy_use_gss_proxy_proc_entry(net);
2114 	rsi_cache_destroy_net(net);
2115 	rsc_cache_destroy_net(net);
2116 }
2117 
2118 int
2119 gss_svc_init(void)
2120 {
2121 	return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss);
2122 }
2123 
2124 void
2125 gss_svc_shutdown(void)
2126 {
2127 	svc_auth_unregister(RPC_AUTH_GSS);
2128 }
2129