xref: /openbmc/linux/net/sunrpc/auth_gss/auth_gss.c (revision adb57164)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * linux/net/sunrpc/auth_gss/auth_gss.c
4  *
5  * RPCSEC_GSS client authentication.
6  *
7  *  Copyright (c) 2000 The Regents of the University of Michigan.
8  *  All rights reserved.
9  *
10  *  Dug Song       <dugsong@monkey.org>
11  *  Andy Adamson   <andros@umich.edu>
12  */
13 
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/sunrpc/auth.h>
22 #include <linux/sunrpc/auth_gss.h>
23 #include <linux/sunrpc/gss_krb5.h>
24 #include <linux/sunrpc/svcauth_gss.h>
25 #include <linux/sunrpc/gss_err.h>
26 #include <linux/workqueue.h>
27 #include <linux/sunrpc/rpc_pipe_fs.h>
28 #include <linux/sunrpc/gss_api.h>
29 #include <linux/uaccess.h>
30 #include <linux/hashtable.h>
31 
32 #include "../netns.h"
33 
34 #include <trace/events/rpcgss.h>
35 
36 static const struct rpc_authops authgss_ops;
37 
38 static const struct rpc_credops gss_credops;
39 static const struct rpc_credops gss_nullops;
40 
41 #define GSS_RETRY_EXPIRED 5
42 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
43 
44 #define GSS_KEY_EXPIRE_TIMEO 240
45 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
46 
47 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
48 # define RPCDBG_FACILITY	RPCDBG_AUTH
49 #endif
50 
51 #define GSS_CRED_SLACK		(RPC_MAX_AUTH_SIZE * 2)
52 /* length of a krb5 verifier (48), plus data added before arguments when
53  * using integrity (two 4-byte integers): */
54 #define GSS_VERF_SLACK		100
55 
56 static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
57 static DEFINE_SPINLOCK(gss_auth_hash_lock);
58 
59 struct gss_pipe {
60 	struct rpc_pipe_dir_object pdo;
61 	struct rpc_pipe *pipe;
62 	struct rpc_clnt *clnt;
63 	const char *name;
64 	struct kref kref;
65 };
66 
67 struct gss_auth {
68 	struct kref kref;
69 	struct hlist_node hash;
70 	struct rpc_auth rpc_auth;
71 	struct gss_api_mech *mech;
72 	enum rpc_gss_svc service;
73 	struct rpc_clnt *client;
74 	struct net *net;
75 	/*
76 	 * There are two upcall pipes; dentry[1], named "gssd", is used
77 	 * for the new text-based upcall; dentry[0] is named after the
78 	 * mechanism (for example, "krb5") and exists for
79 	 * backwards-compatibility with older gssd's.
80 	 */
81 	struct gss_pipe *gss_pipe[2];
82 	const char *target_name;
83 };
84 
85 /* pipe_version >= 0 if and only if someone has a pipe open. */
86 static DEFINE_SPINLOCK(pipe_version_lock);
87 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
88 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
89 static void gss_put_auth(struct gss_auth *gss_auth);
90 
91 static void gss_free_ctx(struct gss_cl_ctx *);
92 static const struct rpc_pipe_ops gss_upcall_ops_v0;
93 static const struct rpc_pipe_ops gss_upcall_ops_v1;
94 
95 static inline struct gss_cl_ctx *
96 gss_get_ctx(struct gss_cl_ctx *ctx)
97 {
98 	refcount_inc(&ctx->count);
99 	return ctx;
100 }
101 
102 static inline void
103 gss_put_ctx(struct gss_cl_ctx *ctx)
104 {
105 	if (refcount_dec_and_test(&ctx->count))
106 		gss_free_ctx(ctx);
107 }
108 
109 /* gss_cred_set_ctx:
110  * called by gss_upcall_callback and gss_create_upcall in order
111  * to set the gss context. The actual exchange of an old context
112  * and a new one is protected by the pipe->lock.
113  */
114 static void
115 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
116 {
117 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
118 
119 	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
120 		return;
121 	gss_get_ctx(ctx);
122 	rcu_assign_pointer(gss_cred->gc_ctx, ctx);
123 	set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
124 	smp_mb__before_atomic();
125 	clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
126 }
127 
128 static const void *
129 simple_get_bytes(const void *p, const void *end, void *res, size_t len)
130 {
131 	const void *q = (const void *)((const char *)p + len);
132 	if (unlikely(q > end || q < p))
133 		return ERR_PTR(-EFAULT);
134 	memcpy(res, p, len);
135 	return q;
136 }
137 
138 static inline const void *
139 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
140 {
141 	const void *q;
142 	unsigned int len;
143 
144 	p = simple_get_bytes(p, end, &len, sizeof(len));
145 	if (IS_ERR(p))
146 		return p;
147 	q = (const void *)((const char *)p + len);
148 	if (unlikely(q > end || q < p))
149 		return ERR_PTR(-EFAULT);
150 	dest->data = kmemdup(p, len, GFP_NOFS);
151 	if (unlikely(dest->data == NULL))
152 		return ERR_PTR(-ENOMEM);
153 	dest->len = len;
154 	return q;
155 }
156 
157 static struct gss_cl_ctx *
158 gss_cred_get_ctx(struct rpc_cred *cred)
159 {
160 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
161 	struct gss_cl_ctx *ctx = NULL;
162 
163 	rcu_read_lock();
164 	ctx = rcu_dereference(gss_cred->gc_ctx);
165 	if (ctx)
166 		gss_get_ctx(ctx);
167 	rcu_read_unlock();
168 	return ctx;
169 }
170 
171 static struct gss_cl_ctx *
172 gss_alloc_context(void)
173 {
174 	struct gss_cl_ctx *ctx;
175 
176 	ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
177 	if (ctx != NULL) {
178 		ctx->gc_proc = RPC_GSS_PROC_DATA;
179 		ctx->gc_seq = 1;	/* NetApp 6.4R1 doesn't accept seq. no. 0 */
180 		spin_lock_init(&ctx->gc_seq_lock);
181 		refcount_set(&ctx->count,1);
182 	}
183 	return ctx;
184 }
185 
186 #define GSSD_MIN_TIMEOUT (60 * 60)
187 static const void *
188 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
189 {
190 	const void *q;
191 	unsigned int seclen;
192 	unsigned int timeout;
193 	unsigned long now = jiffies;
194 	u32 window_size;
195 	int ret;
196 
197 	/* First unsigned int gives the remaining lifetime in seconds of the
198 	 * credential - e.g. the remaining TGT lifetime for Kerberos or
199 	 * the -t value passed to GSSD.
200 	 */
201 	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
202 	if (IS_ERR(p))
203 		goto err;
204 	if (timeout == 0)
205 		timeout = GSSD_MIN_TIMEOUT;
206 	ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
207 	/* Sequence number window. Determines the maximum number of
208 	 * simultaneous requests
209 	 */
210 	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
211 	if (IS_ERR(p))
212 		goto err;
213 	ctx->gc_win = window_size;
214 	/* gssd signals an error by passing ctx->gc_win = 0: */
215 	if (ctx->gc_win == 0) {
216 		/*
217 		 * in which case, p points to an error code. Anything other
218 		 * than -EKEYEXPIRED gets converted to -EACCES.
219 		 */
220 		p = simple_get_bytes(p, end, &ret, sizeof(ret));
221 		if (!IS_ERR(p))
222 			p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
223 						    ERR_PTR(-EACCES);
224 		goto err;
225 	}
226 	/* copy the opaque wire context */
227 	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
228 	if (IS_ERR(p))
229 		goto err;
230 	/* import the opaque security context */
231 	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
232 	if (IS_ERR(p))
233 		goto err;
234 	q = (const void *)((const char *)p + seclen);
235 	if (unlikely(q > end || q < p)) {
236 		p = ERR_PTR(-EFAULT);
237 		goto err;
238 	}
239 	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
240 	if (ret < 0) {
241 		trace_rpcgss_import_ctx(ret);
242 		p = ERR_PTR(ret);
243 		goto err;
244 	}
245 
246 	/* is there any trailing data? */
247 	if (q == end) {
248 		p = q;
249 		goto done;
250 	}
251 
252 	/* pull in acceptor name (if there is one) */
253 	p = simple_get_netobj(q, end, &ctx->gc_acceptor);
254 	if (IS_ERR(p))
255 		goto err;
256 done:
257 	trace_rpcgss_context(ctx->gc_expiry, now, timeout,
258 			     ctx->gc_acceptor.len, ctx->gc_acceptor.data);
259 err:
260 	return p;
261 }
262 
263 /* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
264  *	Is user space expecting no more than UPCALL_BUF_LEN bytes?
265  *	Note that there are now _two_ NI_MAXHOST sized data items
266  *	being passed in this string.
267  */
268 #define UPCALL_BUF_LEN	256
269 
270 struct gss_upcall_msg {
271 	refcount_t count;
272 	kuid_t	uid;
273 	const char *service_name;
274 	struct rpc_pipe_msg msg;
275 	struct list_head list;
276 	struct gss_auth *auth;
277 	struct rpc_pipe *pipe;
278 	struct rpc_wait_queue rpc_waitqueue;
279 	wait_queue_head_t waitqueue;
280 	struct gss_cl_ctx *ctx;
281 	char databuf[UPCALL_BUF_LEN];
282 };
283 
284 static int get_pipe_version(struct net *net)
285 {
286 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
287 	int ret;
288 
289 	spin_lock(&pipe_version_lock);
290 	if (sn->pipe_version >= 0) {
291 		atomic_inc(&sn->pipe_users);
292 		ret = sn->pipe_version;
293 	} else
294 		ret = -EAGAIN;
295 	spin_unlock(&pipe_version_lock);
296 	return ret;
297 }
298 
299 static void put_pipe_version(struct net *net)
300 {
301 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
302 
303 	if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
304 		sn->pipe_version = -1;
305 		spin_unlock(&pipe_version_lock);
306 	}
307 }
308 
309 static void
310 gss_release_msg(struct gss_upcall_msg *gss_msg)
311 {
312 	struct net *net = gss_msg->auth->net;
313 	if (!refcount_dec_and_test(&gss_msg->count))
314 		return;
315 	put_pipe_version(net);
316 	BUG_ON(!list_empty(&gss_msg->list));
317 	if (gss_msg->ctx != NULL)
318 		gss_put_ctx(gss_msg->ctx);
319 	rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
320 	gss_put_auth(gss_msg->auth);
321 	kfree_const(gss_msg->service_name);
322 	kfree(gss_msg);
323 }
324 
325 static struct gss_upcall_msg *
326 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
327 {
328 	struct gss_upcall_msg *pos;
329 	list_for_each_entry(pos, &pipe->in_downcall, list) {
330 		if (!uid_eq(pos->uid, uid))
331 			continue;
332 		if (auth && pos->auth->service != auth->service)
333 			continue;
334 		refcount_inc(&pos->count);
335 		return pos;
336 	}
337 	return NULL;
338 }
339 
340 /* Try to add an upcall to the pipefs queue.
341  * If an upcall owned by our uid already exists, then we return a reference
342  * to that upcall instead of adding the new upcall.
343  */
344 static inline struct gss_upcall_msg *
345 gss_add_msg(struct gss_upcall_msg *gss_msg)
346 {
347 	struct rpc_pipe *pipe = gss_msg->pipe;
348 	struct gss_upcall_msg *old;
349 
350 	spin_lock(&pipe->lock);
351 	old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
352 	if (old == NULL) {
353 		refcount_inc(&gss_msg->count);
354 		list_add(&gss_msg->list, &pipe->in_downcall);
355 	} else
356 		gss_msg = old;
357 	spin_unlock(&pipe->lock);
358 	return gss_msg;
359 }
360 
361 static void
362 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
363 {
364 	list_del_init(&gss_msg->list);
365 	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
366 	wake_up_all(&gss_msg->waitqueue);
367 	refcount_dec(&gss_msg->count);
368 }
369 
370 static void
371 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
372 {
373 	struct rpc_pipe *pipe = gss_msg->pipe;
374 
375 	if (list_empty(&gss_msg->list))
376 		return;
377 	spin_lock(&pipe->lock);
378 	if (!list_empty(&gss_msg->list))
379 		__gss_unhash_msg(gss_msg);
380 	spin_unlock(&pipe->lock);
381 }
382 
383 static void
384 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
385 {
386 	switch (gss_msg->msg.errno) {
387 	case 0:
388 		if (gss_msg->ctx == NULL)
389 			break;
390 		clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
391 		gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
392 		break;
393 	case -EKEYEXPIRED:
394 		set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
395 	}
396 	gss_cred->gc_upcall_timestamp = jiffies;
397 	gss_cred->gc_upcall = NULL;
398 	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
399 }
400 
401 static void
402 gss_upcall_callback(struct rpc_task *task)
403 {
404 	struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
405 			struct gss_cred, gc_base);
406 	struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
407 	struct rpc_pipe *pipe = gss_msg->pipe;
408 
409 	spin_lock(&pipe->lock);
410 	gss_handle_downcall_result(gss_cred, gss_msg);
411 	spin_unlock(&pipe->lock);
412 	task->tk_status = gss_msg->msg.errno;
413 	gss_release_msg(gss_msg);
414 }
415 
416 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
417 			      const struct cred *cred)
418 {
419 	struct user_namespace *userns = cred->user_ns;
420 
421 	uid_t uid = from_kuid_munged(userns, gss_msg->uid);
422 	memcpy(gss_msg->databuf, &uid, sizeof(uid));
423 	gss_msg->msg.data = gss_msg->databuf;
424 	gss_msg->msg.len = sizeof(uid);
425 
426 	BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
427 }
428 
429 static ssize_t
430 gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
431 		char __user *buf, size_t buflen)
432 {
433 	struct gss_upcall_msg *gss_msg = container_of(msg,
434 						      struct gss_upcall_msg,
435 						      msg);
436 	if (msg->copied == 0)
437 		gss_encode_v0_msg(gss_msg, file->f_cred);
438 	return rpc_pipe_generic_upcall(file, msg, buf, buflen);
439 }
440 
441 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
442 				const char *service_name,
443 				const char *target_name,
444 				const struct cred *cred)
445 {
446 	struct user_namespace *userns = cred->user_ns;
447 	struct gss_api_mech *mech = gss_msg->auth->mech;
448 	char *p = gss_msg->databuf;
449 	size_t buflen = sizeof(gss_msg->databuf);
450 	int len;
451 
452 	len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
453 			from_kuid_munged(userns, gss_msg->uid));
454 	buflen -= len;
455 	p += len;
456 	gss_msg->msg.len = len;
457 
458 	/*
459 	 * target= is a full service principal that names the remote
460 	 * identity that we are authenticating to.
461 	 */
462 	if (target_name) {
463 		len = scnprintf(p, buflen, " target=%s", target_name);
464 		buflen -= len;
465 		p += len;
466 		gss_msg->msg.len += len;
467 	}
468 
469 	/*
470 	 * gssd uses service= and srchost= to select a matching key from
471 	 * the system's keytab to use as the source principal.
472 	 *
473 	 * service= is the service name part of the source principal,
474 	 * or "*" (meaning choose any).
475 	 *
476 	 * srchost= is the hostname part of the source principal. When
477 	 * not provided, gssd uses the local hostname.
478 	 */
479 	if (service_name) {
480 		char *c = strchr(service_name, '@');
481 
482 		if (!c)
483 			len = scnprintf(p, buflen, " service=%s",
484 					service_name);
485 		else
486 			len = scnprintf(p, buflen,
487 					" service=%.*s srchost=%s",
488 					(int)(c - service_name),
489 					service_name, c + 1);
490 		buflen -= len;
491 		p += len;
492 		gss_msg->msg.len += len;
493 	}
494 
495 	if (mech->gm_upcall_enctypes) {
496 		len = scnprintf(p, buflen, " enctypes=%s",
497 				mech->gm_upcall_enctypes);
498 		buflen -= len;
499 		p += len;
500 		gss_msg->msg.len += len;
501 	}
502 	trace_rpcgss_upcall_msg(gss_msg->databuf);
503 	len = scnprintf(p, buflen, "\n");
504 	if (len == 0)
505 		goto out_overflow;
506 	gss_msg->msg.len += len;
507 	gss_msg->msg.data = gss_msg->databuf;
508 	return 0;
509 out_overflow:
510 	WARN_ON_ONCE(1);
511 	return -ENOMEM;
512 }
513 
514 static ssize_t
515 gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
516 		char __user *buf, size_t buflen)
517 {
518 	struct gss_upcall_msg *gss_msg = container_of(msg,
519 						      struct gss_upcall_msg,
520 						      msg);
521 	int err;
522 	if (msg->copied == 0) {
523 		err = gss_encode_v1_msg(gss_msg,
524 					gss_msg->service_name,
525 					gss_msg->auth->target_name,
526 					file->f_cred);
527 		if (err)
528 			return err;
529 	}
530 	return rpc_pipe_generic_upcall(file, msg, buf, buflen);
531 }
532 
533 static struct gss_upcall_msg *
534 gss_alloc_msg(struct gss_auth *gss_auth,
535 		kuid_t uid, const char *service_name)
536 {
537 	struct gss_upcall_msg *gss_msg;
538 	int vers;
539 	int err = -ENOMEM;
540 
541 	gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
542 	if (gss_msg == NULL)
543 		goto err;
544 	vers = get_pipe_version(gss_auth->net);
545 	err = vers;
546 	if (err < 0)
547 		goto err_free_msg;
548 	gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
549 	INIT_LIST_HEAD(&gss_msg->list);
550 	rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
551 	init_waitqueue_head(&gss_msg->waitqueue);
552 	refcount_set(&gss_msg->count, 1);
553 	gss_msg->uid = uid;
554 	gss_msg->auth = gss_auth;
555 	kref_get(&gss_auth->kref);
556 	if (service_name) {
557 		gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS);
558 		if (!gss_msg->service_name) {
559 			err = -ENOMEM;
560 			goto err_put_pipe_version;
561 		}
562 	}
563 	return gss_msg;
564 err_put_pipe_version:
565 	put_pipe_version(gss_auth->net);
566 err_free_msg:
567 	kfree(gss_msg);
568 err:
569 	return ERR_PTR(err);
570 }
571 
572 static struct gss_upcall_msg *
573 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
574 {
575 	struct gss_cred *gss_cred = container_of(cred,
576 			struct gss_cred, gc_base);
577 	struct gss_upcall_msg *gss_new, *gss_msg;
578 	kuid_t uid = cred->cr_cred->fsuid;
579 
580 	gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
581 	if (IS_ERR(gss_new))
582 		return gss_new;
583 	gss_msg = gss_add_msg(gss_new);
584 	if (gss_msg == gss_new) {
585 		int res;
586 		refcount_inc(&gss_msg->count);
587 		res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
588 		if (res) {
589 			gss_unhash_msg(gss_new);
590 			refcount_dec(&gss_msg->count);
591 			gss_release_msg(gss_new);
592 			gss_msg = ERR_PTR(res);
593 		}
594 	} else
595 		gss_release_msg(gss_new);
596 	return gss_msg;
597 }
598 
599 static void warn_gssd(void)
600 {
601 	dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
602 }
603 
604 static inline int
605 gss_refresh_upcall(struct rpc_task *task)
606 {
607 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
608 	struct gss_auth *gss_auth = container_of(cred->cr_auth,
609 			struct gss_auth, rpc_auth);
610 	struct gss_cred *gss_cred = container_of(cred,
611 			struct gss_cred, gc_base);
612 	struct gss_upcall_msg *gss_msg;
613 	struct rpc_pipe *pipe;
614 	int err = 0;
615 
616 	gss_msg = gss_setup_upcall(gss_auth, cred);
617 	if (PTR_ERR(gss_msg) == -EAGAIN) {
618 		/* XXX: warning on the first, under the assumption we
619 		 * shouldn't normally hit this case on a refresh. */
620 		warn_gssd();
621 		rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
622 				task, NULL, jiffies + (15 * HZ));
623 		err = -EAGAIN;
624 		goto out;
625 	}
626 	if (IS_ERR(gss_msg)) {
627 		err = PTR_ERR(gss_msg);
628 		goto out;
629 	}
630 	pipe = gss_msg->pipe;
631 	spin_lock(&pipe->lock);
632 	if (gss_cred->gc_upcall != NULL)
633 		rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
634 	else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
635 		gss_cred->gc_upcall = gss_msg;
636 		/* gss_upcall_callback will release the reference to gss_upcall_msg */
637 		refcount_inc(&gss_msg->count);
638 		rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
639 	} else {
640 		gss_handle_downcall_result(gss_cred, gss_msg);
641 		err = gss_msg->msg.errno;
642 	}
643 	spin_unlock(&pipe->lock);
644 	gss_release_msg(gss_msg);
645 out:
646 	trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
647 					     cred->cr_cred->fsuid), err);
648 	return err;
649 }
650 
651 static inline int
652 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
653 {
654 	struct net *net = gss_auth->net;
655 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
656 	struct rpc_pipe *pipe;
657 	struct rpc_cred *cred = &gss_cred->gc_base;
658 	struct gss_upcall_msg *gss_msg;
659 	DEFINE_WAIT(wait);
660 	int err;
661 
662 retry:
663 	err = 0;
664 	/* if gssd is down, just skip upcalling altogether */
665 	if (!gssd_running(net)) {
666 		warn_gssd();
667 		err = -EACCES;
668 		goto out;
669 	}
670 	gss_msg = gss_setup_upcall(gss_auth, cred);
671 	if (PTR_ERR(gss_msg) == -EAGAIN) {
672 		err = wait_event_interruptible_timeout(pipe_version_waitqueue,
673 				sn->pipe_version >= 0, 15 * HZ);
674 		if (sn->pipe_version < 0) {
675 			warn_gssd();
676 			err = -EACCES;
677 		}
678 		if (err < 0)
679 			goto out;
680 		goto retry;
681 	}
682 	if (IS_ERR(gss_msg)) {
683 		err = PTR_ERR(gss_msg);
684 		goto out;
685 	}
686 	pipe = gss_msg->pipe;
687 	for (;;) {
688 		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
689 		spin_lock(&pipe->lock);
690 		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
691 			break;
692 		}
693 		spin_unlock(&pipe->lock);
694 		if (fatal_signal_pending(current)) {
695 			err = -ERESTARTSYS;
696 			goto out_intr;
697 		}
698 		schedule();
699 	}
700 	if (gss_msg->ctx)
701 		gss_cred_set_ctx(cred, gss_msg->ctx);
702 	else
703 		err = gss_msg->msg.errno;
704 	spin_unlock(&pipe->lock);
705 out_intr:
706 	finish_wait(&gss_msg->waitqueue, &wait);
707 	gss_release_msg(gss_msg);
708 out:
709 	trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
710 					     cred->cr_cred->fsuid), err);
711 	return err;
712 }
713 
714 #define MSG_BUF_MAXSIZE 1024
715 
716 static ssize_t
717 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
718 {
719 	const void *p, *end;
720 	void *buf;
721 	struct gss_upcall_msg *gss_msg;
722 	struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
723 	struct gss_cl_ctx *ctx;
724 	uid_t id;
725 	kuid_t uid;
726 	ssize_t err = -EFBIG;
727 
728 	if (mlen > MSG_BUF_MAXSIZE)
729 		goto out;
730 	err = -ENOMEM;
731 	buf = kmalloc(mlen, GFP_NOFS);
732 	if (!buf)
733 		goto out;
734 
735 	err = -EFAULT;
736 	if (copy_from_user(buf, src, mlen))
737 		goto err;
738 
739 	end = (const void *)((char *)buf + mlen);
740 	p = simple_get_bytes(buf, end, &id, sizeof(id));
741 	if (IS_ERR(p)) {
742 		err = PTR_ERR(p);
743 		goto err;
744 	}
745 
746 	uid = make_kuid(current_user_ns(), id);
747 	if (!uid_valid(uid)) {
748 		err = -EINVAL;
749 		goto err;
750 	}
751 
752 	err = -ENOMEM;
753 	ctx = gss_alloc_context();
754 	if (ctx == NULL)
755 		goto err;
756 
757 	err = -ENOENT;
758 	/* Find a matching upcall */
759 	spin_lock(&pipe->lock);
760 	gss_msg = __gss_find_upcall(pipe, uid, NULL);
761 	if (gss_msg == NULL) {
762 		spin_unlock(&pipe->lock);
763 		goto err_put_ctx;
764 	}
765 	list_del_init(&gss_msg->list);
766 	spin_unlock(&pipe->lock);
767 
768 	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
769 	if (IS_ERR(p)) {
770 		err = PTR_ERR(p);
771 		switch (err) {
772 		case -EACCES:
773 		case -EKEYEXPIRED:
774 			gss_msg->msg.errno = err;
775 			err = mlen;
776 			break;
777 		case -EFAULT:
778 		case -ENOMEM:
779 		case -EINVAL:
780 		case -ENOSYS:
781 			gss_msg->msg.errno = -EAGAIN;
782 			break;
783 		default:
784 			printk(KERN_CRIT "%s: bad return from "
785 				"gss_fill_context: %zd\n", __func__, err);
786 			gss_msg->msg.errno = -EIO;
787 		}
788 		goto err_release_msg;
789 	}
790 	gss_msg->ctx = gss_get_ctx(ctx);
791 	err = mlen;
792 
793 err_release_msg:
794 	spin_lock(&pipe->lock);
795 	__gss_unhash_msg(gss_msg);
796 	spin_unlock(&pipe->lock);
797 	gss_release_msg(gss_msg);
798 err_put_ctx:
799 	gss_put_ctx(ctx);
800 err:
801 	kfree(buf);
802 out:
803 	return err;
804 }
805 
806 static int gss_pipe_open(struct inode *inode, int new_version)
807 {
808 	struct net *net = inode->i_sb->s_fs_info;
809 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
810 	int ret = 0;
811 
812 	spin_lock(&pipe_version_lock);
813 	if (sn->pipe_version < 0) {
814 		/* First open of any gss pipe determines the version: */
815 		sn->pipe_version = new_version;
816 		rpc_wake_up(&pipe_version_rpc_waitqueue);
817 		wake_up(&pipe_version_waitqueue);
818 	} else if (sn->pipe_version != new_version) {
819 		/* Trying to open a pipe of a different version */
820 		ret = -EBUSY;
821 		goto out;
822 	}
823 	atomic_inc(&sn->pipe_users);
824 out:
825 	spin_unlock(&pipe_version_lock);
826 	return ret;
827 
828 }
829 
830 static int gss_pipe_open_v0(struct inode *inode)
831 {
832 	return gss_pipe_open(inode, 0);
833 }
834 
835 static int gss_pipe_open_v1(struct inode *inode)
836 {
837 	return gss_pipe_open(inode, 1);
838 }
839 
840 static void
841 gss_pipe_release(struct inode *inode)
842 {
843 	struct net *net = inode->i_sb->s_fs_info;
844 	struct rpc_pipe *pipe = RPC_I(inode)->pipe;
845 	struct gss_upcall_msg *gss_msg;
846 
847 restart:
848 	spin_lock(&pipe->lock);
849 	list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
850 
851 		if (!list_empty(&gss_msg->msg.list))
852 			continue;
853 		gss_msg->msg.errno = -EPIPE;
854 		refcount_inc(&gss_msg->count);
855 		__gss_unhash_msg(gss_msg);
856 		spin_unlock(&pipe->lock);
857 		gss_release_msg(gss_msg);
858 		goto restart;
859 	}
860 	spin_unlock(&pipe->lock);
861 
862 	put_pipe_version(net);
863 }
864 
865 static void
866 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
867 {
868 	struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
869 
870 	if (msg->errno < 0) {
871 		refcount_inc(&gss_msg->count);
872 		gss_unhash_msg(gss_msg);
873 		if (msg->errno == -ETIMEDOUT)
874 			warn_gssd();
875 		gss_release_msg(gss_msg);
876 	}
877 	gss_release_msg(gss_msg);
878 }
879 
880 static void gss_pipe_dentry_destroy(struct dentry *dir,
881 		struct rpc_pipe_dir_object *pdo)
882 {
883 	struct gss_pipe *gss_pipe = pdo->pdo_data;
884 	struct rpc_pipe *pipe = gss_pipe->pipe;
885 
886 	if (pipe->dentry != NULL) {
887 		rpc_unlink(pipe->dentry);
888 		pipe->dentry = NULL;
889 	}
890 }
891 
892 static int gss_pipe_dentry_create(struct dentry *dir,
893 		struct rpc_pipe_dir_object *pdo)
894 {
895 	struct gss_pipe *p = pdo->pdo_data;
896 	struct dentry *dentry;
897 
898 	dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
899 	if (IS_ERR(dentry))
900 		return PTR_ERR(dentry);
901 	p->pipe->dentry = dentry;
902 	return 0;
903 }
904 
905 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
906 	.create = gss_pipe_dentry_create,
907 	.destroy = gss_pipe_dentry_destroy,
908 };
909 
910 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
911 		const char *name,
912 		const struct rpc_pipe_ops *upcall_ops)
913 {
914 	struct gss_pipe *p;
915 	int err = -ENOMEM;
916 
917 	p = kmalloc(sizeof(*p), GFP_KERNEL);
918 	if (p == NULL)
919 		goto err;
920 	p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
921 	if (IS_ERR(p->pipe)) {
922 		err = PTR_ERR(p->pipe);
923 		goto err_free_gss_pipe;
924 	}
925 	p->name = name;
926 	p->clnt = clnt;
927 	kref_init(&p->kref);
928 	rpc_init_pipe_dir_object(&p->pdo,
929 			&gss_pipe_dir_object_ops,
930 			p);
931 	return p;
932 err_free_gss_pipe:
933 	kfree(p);
934 err:
935 	return ERR_PTR(err);
936 }
937 
938 struct gss_alloc_pdo {
939 	struct rpc_clnt *clnt;
940 	const char *name;
941 	const struct rpc_pipe_ops *upcall_ops;
942 };
943 
944 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
945 {
946 	struct gss_pipe *gss_pipe;
947 	struct gss_alloc_pdo *args = data;
948 
949 	if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
950 		return 0;
951 	gss_pipe = container_of(pdo, struct gss_pipe, pdo);
952 	if (strcmp(gss_pipe->name, args->name) != 0)
953 		return 0;
954 	if (!kref_get_unless_zero(&gss_pipe->kref))
955 		return 0;
956 	return 1;
957 }
958 
959 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
960 {
961 	struct gss_pipe *gss_pipe;
962 	struct gss_alloc_pdo *args = data;
963 
964 	gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
965 	if (!IS_ERR(gss_pipe))
966 		return &gss_pipe->pdo;
967 	return NULL;
968 }
969 
970 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
971 		const char *name,
972 		const struct rpc_pipe_ops *upcall_ops)
973 {
974 	struct net *net = rpc_net_ns(clnt);
975 	struct rpc_pipe_dir_object *pdo;
976 	struct gss_alloc_pdo args = {
977 		.clnt = clnt,
978 		.name = name,
979 		.upcall_ops = upcall_ops,
980 	};
981 
982 	pdo = rpc_find_or_alloc_pipe_dir_object(net,
983 			&clnt->cl_pipedir_objects,
984 			gss_pipe_match_pdo,
985 			gss_pipe_alloc_pdo,
986 			&args);
987 	if (pdo != NULL)
988 		return container_of(pdo, struct gss_pipe, pdo);
989 	return ERR_PTR(-ENOMEM);
990 }
991 
992 static void __gss_pipe_free(struct gss_pipe *p)
993 {
994 	struct rpc_clnt *clnt = p->clnt;
995 	struct net *net = rpc_net_ns(clnt);
996 
997 	rpc_remove_pipe_dir_object(net,
998 			&clnt->cl_pipedir_objects,
999 			&p->pdo);
1000 	rpc_destroy_pipe_data(p->pipe);
1001 	kfree(p);
1002 }
1003 
1004 static void __gss_pipe_release(struct kref *kref)
1005 {
1006 	struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
1007 
1008 	__gss_pipe_free(p);
1009 }
1010 
1011 static void gss_pipe_free(struct gss_pipe *p)
1012 {
1013 	if (p != NULL)
1014 		kref_put(&p->kref, __gss_pipe_release);
1015 }
1016 
1017 /*
1018  * NOTE: we have the opportunity to use different
1019  * parameters based on the input flavor (which must be a pseudoflavor)
1020  */
1021 static struct gss_auth *
1022 gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1023 {
1024 	rpc_authflavor_t flavor = args->pseudoflavor;
1025 	struct gss_auth *gss_auth;
1026 	struct gss_pipe *gss_pipe;
1027 	struct rpc_auth * auth;
1028 	int err = -ENOMEM; /* XXX? */
1029 
1030 	if (!try_module_get(THIS_MODULE))
1031 		return ERR_PTR(err);
1032 	if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
1033 		goto out_dec;
1034 	INIT_HLIST_NODE(&gss_auth->hash);
1035 	gss_auth->target_name = NULL;
1036 	if (args->target_name) {
1037 		gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
1038 		if (gss_auth->target_name == NULL)
1039 			goto err_free;
1040 	}
1041 	gss_auth->client = clnt;
1042 	gss_auth->net = get_net(rpc_net_ns(clnt));
1043 	err = -EINVAL;
1044 	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
1045 	if (!gss_auth->mech)
1046 		goto err_put_net;
1047 	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
1048 	if (gss_auth->service == 0)
1049 		goto err_put_mech;
1050 	if (!gssd_running(gss_auth->net))
1051 		goto err_put_mech;
1052 	auth = &gss_auth->rpc_auth;
1053 	auth->au_cslack = GSS_CRED_SLACK >> 2;
1054 	auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
1055 	auth->au_verfsize = GSS_VERF_SLACK >> 2;
1056 	auth->au_ralign = GSS_VERF_SLACK >> 2;
1057 	auth->au_flags = 0;
1058 	auth->au_ops = &authgss_ops;
1059 	auth->au_flavor = flavor;
1060 	if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
1061 		auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
1062 	refcount_set(&auth->au_count, 1);
1063 	kref_init(&gss_auth->kref);
1064 
1065 	err = rpcauth_init_credcache(auth);
1066 	if (err)
1067 		goto err_put_mech;
1068 	/*
1069 	 * Note: if we created the old pipe first, then someone who
1070 	 * examined the directory at the right moment might conclude
1071 	 * that we supported only the old pipe.  So we instead create
1072 	 * the new pipe first.
1073 	 */
1074 	gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
1075 	if (IS_ERR(gss_pipe)) {
1076 		err = PTR_ERR(gss_pipe);
1077 		goto err_destroy_credcache;
1078 	}
1079 	gss_auth->gss_pipe[1] = gss_pipe;
1080 
1081 	gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
1082 			&gss_upcall_ops_v0);
1083 	if (IS_ERR(gss_pipe)) {
1084 		err = PTR_ERR(gss_pipe);
1085 		goto err_destroy_pipe_1;
1086 	}
1087 	gss_auth->gss_pipe[0] = gss_pipe;
1088 
1089 	return gss_auth;
1090 err_destroy_pipe_1:
1091 	gss_pipe_free(gss_auth->gss_pipe[1]);
1092 err_destroy_credcache:
1093 	rpcauth_destroy_credcache(auth);
1094 err_put_mech:
1095 	gss_mech_put(gss_auth->mech);
1096 err_put_net:
1097 	put_net(gss_auth->net);
1098 err_free:
1099 	kfree(gss_auth->target_name);
1100 	kfree(gss_auth);
1101 out_dec:
1102 	module_put(THIS_MODULE);
1103 	trace_rpcgss_createauth(flavor, err);
1104 	return ERR_PTR(err);
1105 }
1106 
1107 static void
1108 gss_free(struct gss_auth *gss_auth)
1109 {
1110 	gss_pipe_free(gss_auth->gss_pipe[0]);
1111 	gss_pipe_free(gss_auth->gss_pipe[1]);
1112 	gss_mech_put(gss_auth->mech);
1113 	put_net(gss_auth->net);
1114 	kfree(gss_auth->target_name);
1115 
1116 	kfree(gss_auth);
1117 	module_put(THIS_MODULE);
1118 }
1119 
1120 static void
1121 gss_free_callback(struct kref *kref)
1122 {
1123 	struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
1124 
1125 	gss_free(gss_auth);
1126 }
1127 
1128 static void
1129 gss_put_auth(struct gss_auth *gss_auth)
1130 {
1131 	kref_put(&gss_auth->kref, gss_free_callback);
1132 }
1133 
1134 static void
1135 gss_destroy(struct rpc_auth *auth)
1136 {
1137 	struct gss_auth *gss_auth = container_of(auth,
1138 			struct gss_auth, rpc_auth);
1139 
1140 	if (hash_hashed(&gss_auth->hash)) {
1141 		spin_lock(&gss_auth_hash_lock);
1142 		hash_del(&gss_auth->hash);
1143 		spin_unlock(&gss_auth_hash_lock);
1144 	}
1145 
1146 	gss_pipe_free(gss_auth->gss_pipe[0]);
1147 	gss_auth->gss_pipe[0] = NULL;
1148 	gss_pipe_free(gss_auth->gss_pipe[1]);
1149 	gss_auth->gss_pipe[1] = NULL;
1150 	rpcauth_destroy_credcache(auth);
1151 
1152 	gss_put_auth(gss_auth);
1153 }
1154 
1155 /*
1156  * Auths may be shared between rpc clients that were cloned from a
1157  * common client with the same xprt, if they also share the flavor and
1158  * target_name.
1159  *
1160  * The auth is looked up from the oldest parent sharing the same
1161  * cl_xprt, and the auth itself references only that common parent
1162  * (which is guaranteed to last as long as any of its descendants).
1163  */
1164 static struct gss_auth *
1165 gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
1166 		struct rpc_clnt *clnt,
1167 		struct gss_auth *new)
1168 {
1169 	struct gss_auth *gss_auth;
1170 	unsigned long hashval = (unsigned long)clnt;
1171 
1172 	spin_lock(&gss_auth_hash_lock);
1173 	hash_for_each_possible(gss_auth_hash_table,
1174 			gss_auth,
1175 			hash,
1176 			hashval) {
1177 		if (gss_auth->client != clnt)
1178 			continue;
1179 		if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1180 			continue;
1181 		if (gss_auth->target_name != args->target_name) {
1182 			if (gss_auth->target_name == NULL)
1183 				continue;
1184 			if (args->target_name == NULL)
1185 				continue;
1186 			if (strcmp(gss_auth->target_name, args->target_name))
1187 				continue;
1188 		}
1189 		if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
1190 			continue;
1191 		goto out;
1192 	}
1193 	if (new)
1194 		hash_add(gss_auth_hash_table, &new->hash, hashval);
1195 	gss_auth = new;
1196 out:
1197 	spin_unlock(&gss_auth_hash_lock);
1198 	return gss_auth;
1199 }
1200 
1201 static struct gss_auth *
1202 gss_create_hashed(const struct rpc_auth_create_args *args,
1203 		  struct rpc_clnt *clnt)
1204 {
1205 	struct gss_auth *gss_auth;
1206 	struct gss_auth *new;
1207 
1208 	gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
1209 	if (gss_auth != NULL)
1210 		goto out;
1211 	new = gss_create_new(args, clnt);
1212 	if (IS_ERR(new))
1213 		return new;
1214 	gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
1215 	if (gss_auth != new)
1216 		gss_destroy(&new->rpc_auth);
1217 out:
1218 	return gss_auth;
1219 }
1220 
1221 static struct rpc_auth *
1222 gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1223 {
1224 	struct gss_auth *gss_auth;
1225 	struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
1226 
1227 	while (clnt != clnt->cl_parent) {
1228 		struct rpc_clnt *parent = clnt->cl_parent;
1229 		/* Find the original parent for this transport */
1230 		if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
1231 			break;
1232 		clnt = parent;
1233 	}
1234 
1235 	gss_auth = gss_create_hashed(args, clnt);
1236 	if (IS_ERR(gss_auth))
1237 		return ERR_CAST(gss_auth);
1238 	return &gss_auth->rpc_auth;
1239 }
1240 
1241 static struct gss_cred *
1242 gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
1243 {
1244 	struct gss_cred *new;
1245 
1246 	/* Make a copy of the cred so that we can reference count it */
1247 	new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
1248 	if (new) {
1249 		struct auth_cred acred = {
1250 			.cred = gss_cred->gc_base.cr_cred,
1251 		};
1252 		struct gss_cl_ctx *ctx =
1253 			rcu_dereference_protected(gss_cred->gc_ctx, 1);
1254 
1255 		rpcauth_init_cred(&new->gc_base, &acred,
1256 				&gss_auth->rpc_auth,
1257 				&gss_nullops);
1258 		new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
1259 		new->gc_service = gss_cred->gc_service;
1260 		new->gc_principal = gss_cred->gc_principal;
1261 		kref_get(&gss_auth->kref);
1262 		rcu_assign_pointer(new->gc_ctx, ctx);
1263 		gss_get_ctx(ctx);
1264 	}
1265 	return new;
1266 }
1267 
1268 /*
1269  * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
1270  * to the server with the GSS control procedure field set to
1271  * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1272  * all RPCSEC_GSS state associated with that context.
1273  */
1274 static void
1275 gss_send_destroy_context(struct rpc_cred *cred)
1276 {
1277 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1278 	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1279 	struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1280 	struct gss_cred *new;
1281 	struct rpc_task *task;
1282 
1283 	new = gss_dup_cred(gss_auth, gss_cred);
1284 	if (new) {
1285 		ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1286 
1287 		task = rpc_call_null(gss_auth->client, &new->gc_base,
1288 				RPC_TASK_ASYNC|RPC_TASK_SOFT);
1289 		if (!IS_ERR(task))
1290 			rpc_put_task(task);
1291 
1292 		put_rpccred(&new->gc_base);
1293 	}
1294 }
1295 
1296 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1297  * to create a new cred or context, so they check that things have been
1298  * allocated before freeing them. */
1299 static void
1300 gss_do_free_ctx(struct gss_cl_ctx *ctx)
1301 {
1302 	gss_delete_sec_context(&ctx->gc_gss_ctx);
1303 	kfree(ctx->gc_wire_ctx.data);
1304 	kfree(ctx->gc_acceptor.data);
1305 	kfree(ctx);
1306 }
1307 
1308 static void
1309 gss_free_ctx_callback(struct rcu_head *head)
1310 {
1311 	struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1312 	gss_do_free_ctx(ctx);
1313 }
1314 
1315 static void
1316 gss_free_ctx(struct gss_cl_ctx *ctx)
1317 {
1318 	call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1319 }
1320 
1321 static void
1322 gss_free_cred(struct gss_cred *gss_cred)
1323 {
1324 	kfree(gss_cred);
1325 }
1326 
1327 static void
1328 gss_free_cred_callback(struct rcu_head *head)
1329 {
1330 	struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1331 	gss_free_cred(gss_cred);
1332 }
1333 
1334 static void
1335 gss_destroy_nullcred(struct rpc_cred *cred)
1336 {
1337 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1338 	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1339 	struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1340 
1341 	RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1342 	put_cred(cred->cr_cred);
1343 	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1344 	if (ctx)
1345 		gss_put_ctx(ctx);
1346 	gss_put_auth(gss_auth);
1347 }
1348 
1349 static void
1350 gss_destroy_cred(struct rpc_cred *cred)
1351 {
1352 
1353 	if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
1354 		gss_send_destroy_context(cred);
1355 	gss_destroy_nullcred(cred);
1356 }
1357 
1358 static int
1359 gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
1360 {
1361 	return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
1362 }
1363 
1364 /*
1365  * Lookup RPCSEC_GSS cred for the current process
1366  */
1367 static struct rpc_cred *
1368 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1369 {
1370 	return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
1371 }
1372 
1373 static struct rpc_cred *
1374 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
1375 {
1376 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1377 	struct gss_cred	*cred = NULL;
1378 	int err = -ENOMEM;
1379 
1380 	if (!(cred = kzalloc(sizeof(*cred), gfp)))
1381 		goto out_err;
1382 
1383 	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1384 	/*
1385 	 * Note: in order to force a call to call_refresh(), we deliberately
1386 	 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1387 	 */
1388 	cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1389 	cred->gc_service = gss_auth->service;
1390 	cred->gc_principal = acred->principal;
1391 	kref_get(&gss_auth->kref);
1392 	return &cred->gc_base;
1393 
1394 out_err:
1395 	return ERR_PTR(err);
1396 }
1397 
1398 static int
1399 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1400 {
1401 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1402 	struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1403 	int err;
1404 
1405 	do {
1406 		err = gss_create_upcall(gss_auth, gss_cred);
1407 	} while (err == -EAGAIN);
1408 	return err;
1409 }
1410 
1411 static char *
1412 gss_stringify_acceptor(struct rpc_cred *cred)
1413 {
1414 	char *string = NULL;
1415 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1416 	struct gss_cl_ctx *ctx;
1417 	unsigned int len;
1418 	struct xdr_netobj *acceptor;
1419 
1420 	rcu_read_lock();
1421 	ctx = rcu_dereference(gss_cred->gc_ctx);
1422 	if (!ctx)
1423 		goto out;
1424 
1425 	len = ctx->gc_acceptor.len;
1426 	rcu_read_unlock();
1427 
1428 	/* no point if there's no string */
1429 	if (!len)
1430 		return NULL;
1431 realloc:
1432 	string = kmalloc(len + 1, GFP_KERNEL);
1433 	if (!string)
1434 		return NULL;
1435 
1436 	rcu_read_lock();
1437 	ctx = rcu_dereference(gss_cred->gc_ctx);
1438 
1439 	/* did the ctx disappear or was it replaced by one with no acceptor? */
1440 	if (!ctx || !ctx->gc_acceptor.len) {
1441 		kfree(string);
1442 		string = NULL;
1443 		goto out;
1444 	}
1445 
1446 	acceptor = &ctx->gc_acceptor;
1447 
1448 	/*
1449 	 * Did we find a new acceptor that's longer than the original? Allocate
1450 	 * a longer buffer and try again.
1451 	 */
1452 	if (len < acceptor->len) {
1453 		len = acceptor->len;
1454 		rcu_read_unlock();
1455 		kfree(string);
1456 		goto realloc;
1457 	}
1458 
1459 	memcpy(string, acceptor->data, acceptor->len);
1460 	string[acceptor->len] = '\0';
1461 out:
1462 	rcu_read_unlock();
1463 	return string;
1464 }
1465 
1466 /*
1467  * Returns -EACCES if GSS context is NULL or will expire within the
1468  * timeout (miliseconds)
1469  */
1470 static int
1471 gss_key_timeout(struct rpc_cred *rc)
1472 {
1473 	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1474 	struct gss_cl_ctx *ctx;
1475 	unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
1476 	int ret = 0;
1477 
1478 	rcu_read_lock();
1479 	ctx = rcu_dereference(gss_cred->gc_ctx);
1480 	if (!ctx || time_after(timeout, ctx->gc_expiry))
1481 		ret = -EACCES;
1482 	rcu_read_unlock();
1483 
1484 	return ret;
1485 }
1486 
1487 static int
1488 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1489 {
1490 	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1491 	struct gss_cl_ctx *ctx;
1492 	int ret;
1493 
1494 	if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1495 		goto out;
1496 	/* Don't match with creds that have expired. */
1497 	rcu_read_lock();
1498 	ctx = rcu_dereference(gss_cred->gc_ctx);
1499 	if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
1500 		rcu_read_unlock();
1501 		return 0;
1502 	}
1503 	rcu_read_unlock();
1504 	if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1505 		return 0;
1506 out:
1507 	if (acred->principal != NULL) {
1508 		if (gss_cred->gc_principal == NULL)
1509 			return 0;
1510 		ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
1511 	} else {
1512 		if (gss_cred->gc_principal != NULL)
1513 			return 0;
1514 		ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
1515 	}
1516 	return ret;
1517 }
1518 
1519 /*
1520  * Marshal credentials.
1521  *
1522  * The expensive part is computing the verifier. We can't cache a
1523  * pre-computed version of the verifier because the seqno, which
1524  * is different every time, is included in the MIC.
1525  */
1526 static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1527 {
1528 	struct rpc_rqst *req = task->tk_rqstp;
1529 	struct rpc_cred *cred = req->rq_cred;
1530 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1531 						 gc_base);
1532 	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
1533 	__be32		*p, *cred_len;
1534 	u32             maj_stat = 0;
1535 	struct xdr_netobj mic;
1536 	struct kvec	iov;
1537 	struct xdr_buf	verf_buf;
1538 	int status;
1539 
1540 	/* Credential */
1541 
1542 	p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
1543 			      ctx->gc_wire_ctx.len);
1544 	if (!p)
1545 		goto marshal_failed;
1546 	*p++ = rpc_auth_gss;
1547 	cred_len = p++;
1548 
1549 	spin_lock(&ctx->gc_seq_lock);
1550 	req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1551 	spin_unlock(&ctx->gc_seq_lock);
1552 	if (req->rq_seqno == MAXSEQ)
1553 		goto expired;
1554 	trace_rpcgss_seqno(task);
1555 
1556 	*p++ = cpu_to_be32(RPC_GSS_VERSION);
1557 	*p++ = cpu_to_be32(ctx->gc_proc);
1558 	*p++ = cpu_to_be32(req->rq_seqno);
1559 	*p++ = cpu_to_be32(gss_cred->gc_service);
1560 	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1561 	*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
1562 
1563 	/* Verifier */
1564 
1565 	/* We compute the checksum for the verifier over the xdr-encoded bytes
1566 	 * starting with the xid and ending at the end of the credential: */
1567 	iov.iov_base = req->rq_snd_buf.head[0].iov_base;
1568 	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1569 	xdr_buf_from_iov(&iov, &verf_buf);
1570 
1571 	p = xdr_reserve_space(xdr, sizeof(*p));
1572 	if (!p)
1573 		goto marshal_failed;
1574 	*p++ = rpc_auth_gss;
1575 	mic.data = (u8 *)(p + 1);
1576 	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1577 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1578 		goto expired;
1579 	else if (maj_stat != 0)
1580 		goto bad_mic;
1581 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1582 		goto marshal_failed;
1583 	status = 0;
1584 out:
1585 	gss_put_ctx(ctx);
1586 	return status;
1587 expired:
1588 	clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1589 	status = -EKEYEXPIRED;
1590 	goto out;
1591 marshal_failed:
1592 	status = -EMSGSIZE;
1593 	goto out;
1594 bad_mic:
1595 	trace_rpcgss_get_mic(task, maj_stat);
1596 	status = -EIO;
1597 	goto out;
1598 }
1599 
1600 static int gss_renew_cred(struct rpc_task *task)
1601 {
1602 	struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1603 	struct gss_cred *gss_cred = container_of(oldcred,
1604 						 struct gss_cred,
1605 						 gc_base);
1606 	struct rpc_auth *auth = oldcred->cr_auth;
1607 	struct auth_cred acred = {
1608 		.cred = oldcred->cr_cred,
1609 		.principal = gss_cred->gc_principal,
1610 	};
1611 	struct rpc_cred *new;
1612 
1613 	new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1614 	if (IS_ERR(new))
1615 		return PTR_ERR(new);
1616 	task->tk_rqstp->rq_cred = new;
1617 	put_rpccred(oldcred);
1618 	return 0;
1619 }
1620 
1621 static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1622 {
1623 	if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1624 		unsigned long now = jiffies;
1625 		unsigned long begin, expire;
1626 		struct gss_cred *gss_cred;
1627 
1628 		gss_cred = container_of(cred, struct gss_cred, gc_base);
1629 		begin = gss_cred->gc_upcall_timestamp;
1630 		expire = begin + gss_expired_cred_retry_delay * HZ;
1631 
1632 		if (time_in_range_open(now, begin, expire))
1633 			return 1;
1634 	}
1635 	return 0;
1636 }
1637 
1638 /*
1639 * Refresh credentials. XXX - finish
1640 */
1641 static int
1642 gss_refresh(struct rpc_task *task)
1643 {
1644 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1645 	int ret = 0;
1646 
1647 	if (gss_cred_is_negative_entry(cred))
1648 		return -EKEYEXPIRED;
1649 
1650 	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1651 			!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1652 		ret = gss_renew_cred(task);
1653 		if (ret < 0)
1654 			goto out;
1655 		cred = task->tk_rqstp->rq_cred;
1656 	}
1657 
1658 	if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1659 		ret = gss_refresh_upcall(task);
1660 out:
1661 	return ret;
1662 }
1663 
1664 /* Dummy refresh routine: used only when destroying the context */
1665 static int
1666 gss_refresh_null(struct rpc_task *task)
1667 {
1668 	return 0;
1669 }
1670 
1671 static int
1672 gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
1673 {
1674 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1675 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1676 	__be32		*p, *seq = NULL;
1677 	struct kvec	iov;
1678 	struct xdr_buf	verf_buf;
1679 	struct xdr_netobj mic;
1680 	u32		len, maj_stat;
1681 	int		status;
1682 
1683 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1684 	if (!p)
1685 		goto validate_failed;
1686 	if (*p++ != rpc_auth_gss)
1687 		goto validate_failed;
1688 	len = be32_to_cpup(p);
1689 	if (len > RPC_MAX_AUTH_SIZE)
1690 		goto validate_failed;
1691 	p = xdr_inline_decode(xdr, len);
1692 	if (!p)
1693 		goto validate_failed;
1694 
1695 	seq = kmalloc(4, GFP_NOFS);
1696 	if (!seq)
1697 		goto validate_failed;
1698 	*seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
1699 	iov.iov_base = seq;
1700 	iov.iov_len = 4;
1701 	xdr_buf_from_iov(&iov, &verf_buf);
1702 	mic.data = (u8 *)p;
1703 	mic.len = len;
1704 	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1705 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1706 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1707 	if (maj_stat)
1708 		goto bad_mic;
1709 
1710 	/* We leave it to unwrap to calculate au_rslack. For now we just
1711 	 * calculate the length of the verifier: */
1712 	cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1713 	status = 0;
1714 out:
1715 	gss_put_ctx(ctx);
1716 	kfree(seq);
1717 	return status;
1718 
1719 validate_failed:
1720 	status = -EIO;
1721 	goto out;
1722 bad_mic:
1723 	trace_rpcgss_verify_mic(task, maj_stat);
1724 	status = -EACCES;
1725 	goto out;
1726 }
1727 
1728 static noinline_for_stack int
1729 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1730 		   struct rpc_task *task, struct xdr_stream *xdr)
1731 {
1732 	struct rpc_rqst *rqstp = task->tk_rqstp;
1733 	struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
1734 	struct xdr_netobj mic;
1735 	__be32 *p, *integ_len;
1736 	u32 offset, maj_stat;
1737 
1738 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1739 	if (!p)
1740 		goto wrap_failed;
1741 	integ_len = p++;
1742 	*p = cpu_to_be32(rqstp->rq_seqno);
1743 
1744 	if (rpcauth_wrap_req_encode(task, xdr))
1745 		goto wrap_failed;
1746 
1747 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1748 	if (xdr_buf_subsegment(snd_buf, &integ_buf,
1749 				offset, snd_buf->len - offset))
1750 		goto wrap_failed;
1751 	*integ_len = cpu_to_be32(integ_buf.len);
1752 
1753 	p = xdr_reserve_space(xdr, 0);
1754 	if (!p)
1755 		goto wrap_failed;
1756 	mic.data = (u8 *)(p + 1);
1757 	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1758 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1759 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1760 	else if (maj_stat)
1761 		goto bad_mic;
1762 	/* Check that the trailing MIC fit in the buffer, after the fact */
1763 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1764 		goto wrap_failed;
1765 	return 0;
1766 wrap_failed:
1767 	return -EMSGSIZE;
1768 bad_mic:
1769 	trace_rpcgss_get_mic(task, maj_stat);
1770 	return -EIO;
1771 }
1772 
1773 static void
1774 priv_release_snd_buf(struct rpc_rqst *rqstp)
1775 {
1776 	int i;
1777 
1778 	for (i=0; i < rqstp->rq_enc_pages_num; i++)
1779 		__free_page(rqstp->rq_enc_pages[i]);
1780 	kfree(rqstp->rq_enc_pages);
1781 	rqstp->rq_release_snd_buf = NULL;
1782 }
1783 
1784 static int
1785 alloc_enc_pages(struct rpc_rqst *rqstp)
1786 {
1787 	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1788 	int first, last, i;
1789 
1790 	if (rqstp->rq_release_snd_buf)
1791 		rqstp->rq_release_snd_buf(rqstp);
1792 
1793 	if (snd_buf->page_len == 0) {
1794 		rqstp->rq_enc_pages_num = 0;
1795 		return 0;
1796 	}
1797 
1798 	first = snd_buf->page_base >> PAGE_SHIFT;
1799 	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1800 	rqstp->rq_enc_pages_num = last - first + 1 + 1;
1801 	rqstp->rq_enc_pages
1802 		= kmalloc_array(rqstp->rq_enc_pages_num,
1803 				sizeof(struct page *),
1804 				GFP_NOFS);
1805 	if (!rqstp->rq_enc_pages)
1806 		goto out;
1807 	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1808 		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1809 		if (rqstp->rq_enc_pages[i] == NULL)
1810 			goto out_free;
1811 	}
1812 	rqstp->rq_release_snd_buf = priv_release_snd_buf;
1813 	return 0;
1814 out_free:
1815 	rqstp->rq_enc_pages_num = i;
1816 	priv_release_snd_buf(rqstp);
1817 out:
1818 	return -EAGAIN;
1819 }
1820 
1821 static noinline_for_stack int
1822 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1823 		  struct rpc_task *task, struct xdr_stream *xdr)
1824 {
1825 	struct rpc_rqst *rqstp = task->tk_rqstp;
1826 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1827 	u32		pad, offset, maj_stat;
1828 	int		status;
1829 	__be32		*p, *opaque_len;
1830 	struct page	**inpages;
1831 	int		first;
1832 	struct kvec	*iov;
1833 
1834 	status = -EIO;
1835 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1836 	if (!p)
1837 		goto wrap_failed;
1838 	opaque_len = p++;
1839 	*p = cpu_to_be32(rqstp->rq_seqno);
1840 
1841 	if (rpcauth_wrap_req_encode(task, xdr))
1842 		goto wrap_failed;
1843 
1844 	status = alloc_enc_pages(rqstp);
1845 	if (unlikely(status))
1846 		goto wrap_failed;
1847 	first = snd_buf->page_base >> PAGE_SHIFT;
1848 	inpages = snd_buf->pages + first;
1849 	snd_buf->pages = rqstp->rq_enc_pages;
1850 	snd_buf->page_base -= first << PAGE_SHIFT;
1851 	/*
1852 	 * Move the tail into its own page, in case gss_wrap needs
1853 	 * more space in the head when wrapping.
1854 	 *
1855 	 * Still... Why can't gss_wrap just slide the tail down?
1856 	 */
1857 	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1858 		char *tmp;
1859 
1860 		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1861 		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1862 		snd_buf->tail[0].iov_base = tmp;
1863 	}
1864 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1865 	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1866 	/* slack space should prevent this ever happening: */
1867 	if (unlikely(snd_buf->len > snd_buf->buflen))
1868 		goto wrap_failed;
1869 	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1870 	 * done anyway, so it's safe to put the request on the wire: */
1871 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1872 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1873 	else if (maj_stat)
1874 		goto bad_wrap;
1875 
1876 	*opaque_len = cpu_to_be32(snd_buf->len - offset);
1877 	/* guess whether the pad goes into the head or the tail: */
1878 	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1879 		iov = snd_buf->tail;
1880 	else
1881 		iov = snd_buf->head;
1882 	p = iov->iov_base + iov->iov_len;
1883 	pad = xdr_pad_size(snd_buf->len - offset);
1884 	memset(p, 0, pad);
1885 	iov->iov_len += pad;
1886 	snd_buf->len += pad;
1887 
1888 	return 0;
1889 wrap_failed:
1890 	return status;
1891 bad_wrap:
1892 	trace_rpcgss_wrap(task, maj_stat);
1893 	return -EIO;
1894 }
1895 
1896 static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
1897 {
1898 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1899 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1900 			gc_base);
1901 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1902 	int status;
1903 
1904 	status = -EIO;
1905 	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1906 		/* The spec seems a little ambiguous here, but I think that not
1907 		 * wrapping context destruction requests makes the most sense.
1908 		 */
1909 		status = rpcauth_wrap_req_encode(task, xdr);
1910 		goto out;
1911 	}
1912 	switch (gss_cred->gc_service) {
1913 	case RPC_GSS_SVC_NONE:
1914 		status = rpcauth_wrap_req_encode(task, xdr);
1915 		break;
1916 	case RPC_GSS_SVC_INTEGRITY:
1917 		status = gss_wrap_req_integ(cred, ctx, task, xdr);
1918 		break;
1919 	case RPC_GSS_SVC_PRIVACY:
1920 		status = gss_wrap_req_priv(cred, ctx, task, xdr);
1921 		break;
1922 	default:
1923 		status = -EIO;
1924 	}
1925 out:
1926 	gss_put_ctx(ctx);
1927 	return status;
1928 }
1929 
1930 static int
1931 gss_unwrap_resp_auth(struct rpc_cred *cred)
1932 {
1933 	struct rpc_auth *auth = cred->cr_auth;
1934 
1935 	auth->au_rslack = auth->au_verfsize;
1936 	auth->au_ralign = auth->au_verfsize;
1937 	return 0;
1938 }
1939 
1940 /*
1941  * RFC 2203, Section 5.3.2.2
1942  *
1943  *	struct rpc_gss_integ_data {
1944  *		opaque databody_integ<>;
1945  *		opaque checksum<>;
1946  *	};
1947  *
1948  *	struct rpc_gss_data_t {
1949  *		unsigned int seq_num;
1950  *		proc_req_arg_t arg;
1951  *	};
1952  */
1953 static noinline_for_stack int
1954 gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
1955 		      struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
1956 		      struct xdr_stream *xdr)
1957 {
1958 	struct xdr_buf gss_data, *rcv_buf = &rqstp->rq_rcv_buf;
1959 	struct rpc_auth *auth = cred->cr_auth;
1960 	u32 len, offset, seqno, maj_stat;
1961 	struct xdr_netobj mic;
1962 	int ret;
1963 
1964 	ret = -EIO;
1965 	mic.data = NULL;
1966 
1967 	/* opaque databody_integ<>; */
1968 	if (xdr_stream_decode_u32(xdr, &len))
1969 		goto unwrap_failed;
1970 	if (len & 3)
1971 		goto unwrap_failed;
1972 	offset = rcv_buf->len - xdr_stream_remaining(xdr);
1973 	if (xdr_stream_decode_u32(xdr, &seqno))
1974 		goto unwrap_failed;
1975 	if (seqno != rqstp->rq_seqno)
1976 		goto bad_seqno;
1977 	if (xdr_buf_subsegment(rcv_buf, &gss_data, offset, len))
1978 		goto unwrap_failed;
1979 
1980 	/*
1981 	 * The xdr_stream now points to the beginning of the
1982 	 * upper layer payload, to be passed below to
1983 	 * rpcauth_unwrap_resp_decode(). The checksum, which
1984 	 * follows the upper layer payload in @rcv_buf, is
1985 	 * located and parsed without updating the xdr_stream.
1986 	 */
1987 
1988 	/* opaque checksum<>; */
1989 	offset += len;
1990 	if (xdr_decode_word(rcv_buf, offset, &len))
1991 		goto unwrap_failed;
1992 	offset += sizeof(__be32);
1993 	if (offset + len > rcv_buf->len)
1994 		goto unwrap_failed;
1995 	mic.len = len;
1996 	mic.data = kmalloc(len, GFP_NOFS);
1997 	if (!mic.data)
1998 		goto unwrap_failed;
1999 	if (read_bytes_from_xdr_buf(rcv_buf, offset, mic.data, mic.len))
2000 		goto unwrap_failed;
2001 
2002 	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &gss_data, &mic);
2003 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2004 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2005 	if (maj_stat != GSS_S_COMPLETE)
2006 		goto bad_mic;
2007 
2008 	auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
2009 	auth->au_ralign = auth->au_verfsize + 2;
2010 	ret = 0;
2011 
2012 out:
2013 	kfree(mic.data);
2014 	return ret;
2015 
2016 unwrap_failed:
2017 	trace_rpcgss_unwrap_failed(task);
2018 	goto out;
2019 bad_seqno:
2020 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, seqno);
2021 	goto out;
2022 bad_mic:
2023 	trace_rpcgss_verify_mic(task, maj_stat);
2024 	goto out;
2025 }
2026 
2027 static noinline_for_stack int
2028 gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
2029 		     struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
2030 		     struct xdr_stream *xdr)
2031 {
2032 	struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
2033 	struct kvec *head = rqstp->rq_rcv_buf.head;
2034 	struct rpc_auth *auth = cred->cr_auth;
2035 	unsigned int savedlen = rcv_buf->len;
2036 	u32 offset, opaque_len, maj_stat;
2037 	__be32 *p;
2038 
2039 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
2040 	if (unlikely(!p))
2041 		goto unwrap_failed;
2042 	opaque_len = be32_to_cpup(p++);
2043 	offset = (u8 *)(p) - (u8 *)head->iov_base;
2044 	if (offset + opaque_len > rcv_buf->len)
2045 		goto unwrap_failed;
2046 	rcv_buf->len = offset + opaque_len;
2047 
2048 	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
2049 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2050 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2051 	if (maj_stat != GSS_S_COMPLETE)
2052 		goto bad_unwrap;
2053 	/* gss_unwrap decrypted the sequence number */
2054 	if (be32_to_cpup(p++) != rqstp->rq_seqno)
2055 		goto bad_seqno;
2056 
2057 	/* gss_unwrap redacts the opaque blob from the head iovec.
2058 	 * rcv_buf has changed, thus the stream needs to be reset.
2059 	 */
2060 	xdr_init_decode(xdr, rcv_buf, p, rqstp);
2061 
2062 	auth->au_rslack = auth->au_verfsize + 2 +
2063 			  XDR_QUADLEN(savedlen - rcv_buf->len);
2064 	auth->au_ralign = auth->au_verfsize + 2 +
2065 			  XDR_QUADLEN(savedlen - rcv_buf->len);
2066 	return 0;
2067 unwrap_failed:
2068 	trace_rpcgss_unwrap_failed(task);
2069 	return -EIO;
2070 bad_seqno:
2071 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
2072 	return -EIO;
2073 bad_unwrap:
2074 	trace_rpcgss_unwrap(task, maj_stat);
2075 	return -EIO;
2076 }
2077 
2078 static bool
2079 gss_seq_is_newer(u32 new, u32 old)
2080 {
2081 	return (s32)(new - old) > 0;
2082 }
2083 
2084 static bool
2085 gss_xmit_need_reencode(struct rpc_task *task)
2086 {
2087 	struct rpc_rqst *req = task->tk_rqstp;
2088 	struct rpc_cred *cred = req->rq_cred;
2089 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2090 	u32 win, seq_xmit = 0;
2091 	bool ret = true;
2092 
2093 	if (!ctx)
2094 		goto out;
2095 
2096 	if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
2097 		goto out_ctx;
2098 
2099 	seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
2100 	while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
2101 		u32 tmp = seq_xmit;
2102 
2103 		seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
2104 		if (seq_xmit == tmp) {
2105 			ret = false;
2106 			goto out_ctx;
2107 		}
2108 	}
2109 
2110 	win = ctx->gc_win;
2111 	if (win > 0)
2112 		ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
2113 
2114 out_ctx:
2115 	gss_put_ctx(ctx);
2116 out:
2117 	trace_rpcgss_need_reencode(task, seq_xmit, ret);
2118 	return ret;
2119 }
2120 
2121 static int
2122 gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
2123 {
2124 	struct rpc_rqst *rqstp = task->tk_rqstp;
2125 	struct rpc_cred *cred = rqstp->rq_cred;
2126 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
2127 			gc_base);
2128 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2129 	int status = -EIO;
2130 
2131 	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
2132 		goto out_decode;
2133 	switch (gss_cred->gc_service) {
2134 	case RPC_GSS_SVC_NONE:
2135 		status = gss_unwrap_resp_auth(cred);
2136 		break;
2137 	case RPC_GSS_SVC_INTEGRITY:
2138 		status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
2139 		break;
2140 	case RPC_GSS_SVC_PRIVACY:
2141 		status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
2142 		break;
2143 	}
2144 	if (status)
2145 		goto out;
2146 
2147 out_decode:
2148 	status = rpcauth_unwrap_resp_decode(task, xdr);
2149 out:
2150 	gss_put_ctx(ctx);
2151 	return status;
2152 }
2153 
2154 static const struct rpc_authops authgss_ops = {
2155 	.owner		= THIS_MODULE,
2156 	.au_flavor	= RPC_AUTH_GSS,
2157 	.au_name	= "RPCSEC_GSS",
2158 	.create		= gss_create,
2159 	.destroy	= gss_destroy,
2160 	.hash_cred	= gss_hash_cred,
2161 	.lookup_cred	= gss_lookup_cred,
2162 	.crcreate	= gss_create_cred,
2163 	.info2flavor	= gss_mech_info2flavor,
2164 	.flavor2info	= gss_mech_flavor2info,
2165 };
2166 
2167 static const struct rpc_credops gss_credops = {
2168 	.cr_name		= "AUTH_GSS",
2169 	.crdestroy		= gss_destroy_cred,
2170 	.cr_init		= gss_cred_init,
2171 	.crmatch		= gss_match,
2172 	.crmarshal		= gss_marshal,
2173 	.crrefresh		= gss_refresh,
2174 	.crvalidate		= gss_validate,
2175 	.crwrap_req		= gss_wrap_req,
2176 	.crunwrap_resp		= gss_unwrap_resp,
2177 	.crkey_timeout		= gss_key_timeout,
2178 	.crstringify_acceptor	= gss_stringify_acceptor,
2179 	.crneed_reencode	= gss_xmit_need_reencode,
2180 };
2181 
2182 static const struct rpc_credops gss_nullops = {
2183 	.cr_name		= "AUTH_GSS",
2184 	.crdestroy		= gss_destroy_nullcred,
2185 	.crmatch		= gss_match,
2186 	.crmarshal		= gss_marshal,
2187 	.crrefresh		= gss_refresh_null,
2188 	.crvalidate		= gss_validate,
2189 	.crwrap_req		= gss_wrap_req,
2190 	.crunwrap_resp		= gss_unwrap_resp,
2191 	.crstringify_acceptor	= gss_stringify_acceptor,
2192 };
2193 
2194 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
2195 	.upcall		= gss_v0_upcall,
2196 	.downcall	= gss_pipe_downcall,
2197 	.destroy_msg	= gss_pipe_destroy_msg,
2198 	.open_pipe	= gss_pipe_open_v0,
2199 	.release_pipe	= gss_pipe_release,
2200 };
2201 
2202 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
2203 	.upcall		= gss_v1_upcall,
2204 	.downcall	= gss_pipe_downcall,
2205 	.destroy_msg	= gss_pipe_destroy_msg,
2206 	.open_pipe	= gss_pipe_open_v1,
2207 	.release_pipe	= gss_pipe_release,
2208 };
2209 
2210 static __net_init int rpcsec_gss_init_net(struct net *net)
2211 {
2212 	return gss_svc_init_net(net);
2213 }
2214 
2215 static __net_exit void rpcsec_gss_exit_net(struct net *net)
2216 {
2217 	gss_svc_shutdown_net(net);
2218 }
2219 
2220 static struct pernet_operations rpcsec_gss_net_ops = {
2221 	.init = rpcsec_gss_init_net,
2222 	.exit = rpcsec_gss_exit_net,
2223 };
2224 
2225 /*
2226  * Initialize RPCSEC_GSS module
2227  */
2228 static int __init init_rpcsec_gss(void)
2229 {
2230 	int err = 0;
2231 
2232 	err = rpcauth_register(&authgss_ops);
2233 	if (err)
2234 		goto out;
2235 	err = gss_svc_init();
2236 	if (err)
2237 		goto out_unregister;
2238 	err = register_pernet_subsys(&rpcsec_gss_net_ops);
2239 	if (err)
2240 		goto out_svc_exit;
2241 	rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
2242 	return 0;
2243 out_svc_exit:
2244 	gss_svc_shutdown();
2245 out_unregister:
2246 	rpcauth_unregister(&authgss_ops);
2247 out:
2248 	return err;
2249 }
2250 
2251 static void __exit exit_rpcsec_gss(void)
2252 {
2253 	unregister_pernet_subsys(&rpcsec_gss_net_ops);
2254 	gss_svc_shutdown();
2255 	rpcauth_unregister(&authgss_ops);
2256 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
2257 }
2258 
2259 MODULE_ALIAS("rpc-auth-6");
2260 MODULE_LICENSE("GPL");
2261 module_param_named(expired_cred_retry_delay,
2262 		   gss_expired_cred_retry_delay,
2263 		   uint, 0644);
2264 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
2265 		"the RPC engine retries an expired credential");
2266 
2267 module_param_named(key_expire_timeo,
2268 		   gss_key_expire_timeo,
2269 		   uint, 0644);
2270 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
2271 		"credential keys lifetime where the NFS layer cleans up "
2272 		"prior to key expiration");
2273 
2274 module_init(init_rpcsec_gss)
2275 module_exit(exit_rpcsec_gss)
2276