xref: /openbmc/linux/net/sunrpc/auth_gss/auth_gss.c (revision b8d312aa)
1 // SPDX-License-Identifier: BSD-3-Clause
2 /*
3  * linux/net/sunrpc/auth_gss/auth_gss.c
4  *
5  * RPCSEC_GSS client authentication.
6  *
7  *  Copyright (c) 2000 The Regents of the University of Michigan.
8  *  All rights reserved.
9  *
10  *  Dug Song       <dugsong@monkey.org>
11  *  Andy Adamson   <andros@umich.edu>
12  */
13 
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/sunrpc/auth.h>
22 #include <linux/sunrpc/auth_gss.h>
23 #include <linux/sunrpc/svcauth_gss.h>
24 #include <linux/sunrpc/gss_err.h>
25 #include <linux/workqueue.h>
26 #include <linux/sunrpc/rpc_pipe_fs.h>
27 #include <linux/sunrpc/gss_api.h>
28 #include <linux/uaccess.h>
29 #include <linux/hashtable.h>
30 
31 #include "../netns.h"
32 
33 #include <trace/events/rpcgss.h>
34 
35 static const struct rpc_authops authgss_ops;
36 
37 static const struct rpc_credops gss_credops;
38 static const struct rpc_credops gss_nullops;
39 
40 #define GSS_RETRY_EXPIRED 5
41 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED;
42 
43 #define GSS_KEY_EXPIRE_TIMEO 240
44 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
45 
46 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
47 # define RPCDBG_FACILITY	RPCDBG_AUTH
48 #endif
49 
50 #define GSS_CRED_SLACK		(RPC_MAX_AUTH_SIZE * 2)
51 /* length of a krb5 verifier (48), plus data added before arguments when
52  * using integrity (two 4-byte integers): */
53 #define GSS_VERF_SLACK		100
54 
55 static DEFINE_HASHTABLE(gss_auth_hash_table, 4);
56 static DEFINE_SPINLOCK(gss_auth_hash_lock);
57 
58 struct gss_pipe {
59 	struct rpc_pipe_dir_object pdo;
60 	struct rpc_pipe *pipe;
61 	struct rpc_clnt *clnt;
62 	const char *name;
63 	struct kref kref;
64 };
65 
66 struct gss_auth {
67 	struct kref kref;
68 	struct hlist_node hash;
69 	struct rpc_auth rpc_auth;
70 	struct gss_api_mech *mech;
71 	enum rpc_gss_svc service;
72 	struct rpc_clnt *client;
73 	struct net *net;
74 	/*
75 	 * There are two upcall pipes; dentry[1], named "gssd", is used
76 	 * for the new text-based upcall; dentry[0] is named after the
77 	 * mechanism (for example, "krb5") and exists for
78 	 * backwards-compatibility with older gssd's.
79 	 */
80 	struct gss_pipe *gss_pipe[2];
81 	const char *target_name;
82 };
83 
84 /* pipe_version >= 0 if and only if someone has a pipe open. */
85 static DEFINE_SPINLOCK(pipe_version_lock);
86 static struct rpc_wait_queue pipe_version_rpc_waitqueue;
87 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
88 static void gss_put_auth(struct gss_auth *gss_auth);
89 
90 static void gss_free_ctx(struct gss_cl_ctx *);
91 static const struct rpc_pipe_ops gss_upcall_ops_v0;
92 static const struct rpc_pipe_ops gss_upcall_ops_v1;
93 
94 static inline struct gss_cl_ctx *
95 gss_get_ctx(struct gss_cl_ctx *ctx)
96 {
97 	refcount_inc(&ctx->count);
98 	return ctx;
99 }
100 
101 static inline void
102 gss_put_ctx(struct gss_cl_ctx *ctx)
103 {
104 	if (refcount_dec_and_test(&ctx->count))
105 		gss_free_ctx(ctx);
106 }
107 
108 /* gss_cred_set_ctx:
109  * called by gss_upcall_callback and gss_create_upcall in order
110  * to set the gss context. The actual exchange of an old context
111  * and a new one is protected by the pipe->lock.
112  */
113 static void
114 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
115 {
116 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
117 
118 	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
119 		return;
120 	gss_get_ctx(ctx);
121 	rcu_assign_pointer(gss_cred->gc_ctx, ctx);
122 	set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
123 	smp_mb__before_atomic();
124 	clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
125 }
126 
127 static const void *
128 simple_get_bytes(const void *p, const void *end, void *res, size_t len)
129 {
130 	const void *q = (const void *)((const char *)p + len);
131 	if (unlikely(q > end || q < p))
132 		return ERR_PTR(-EFAULT);
133 	memcpy(res, p, len);
134 	return q;
135 }
136 
137 static inline const void *
138 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest)
139 {
140 	const void *q;
141 	unsigned int len;
142 
143 	p = simple_get_bytes(p, end, &len, sizeof(len));
144 	if (IS_ERR(p))
145 		return p;
146 	q = (const void *)((const char *)p + len);
147 	if (unlikely(q > end || q < p))
148 		return ERR_PTR(-EFAULT);
149 	dest->data = kmemdup(p, len, GFP_NOFS);
150 	if (unlikely(dest->data == NULL))
151 		return ERR_PTR(-ENOMEM);
152 	dest->len = len;
153 	return q;
154 }
155 
156 static struct gss_cl_ctx *
157 gss_cred_get_ctx(struct rpc_cred *cred)
158 {
159 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
160 	struct gss_cl_ctx *ctx = NULL;
161 
162 	rcu_read_lock();
163 	ctx = rcu_dereference(gss_cred->gc_ctx);
164 	if (ctx)
165 		gss_get_ctx(ctx);
166 	rcu_read_unlock();
167 	return ctx;
168 }
169 
170 static struct gss_cl_ctx *
171 gss_alloc_context(void)
172 {
173 	struct gss_cl_ctx *ctx;
174 
175 	ctx = kzalloc(sizeof(*ctx), GFP_NOFS);
176 	if (ctx != NULL) {
177 		ctx->gc_proc = RPC_GSS_PROC_DATA;
178 		ctx->gc_seq = 1;	/* NetApp 6.4R1 doesn't accept seq. no. 0 */
179 		spin_lock_init(&ctx->gc_seq_lock);
180 		refcount_set(&ctx->count,1);
181 	}
182 	return ctx;
183 }
184 
185 #define GSSD_MIN_TIMEOUT (60 * 60)
186 static const void *
187 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm)
188 {
189 	const void *q;
190 	unsigned int seclen;
191 	unsigned int timeout;
192 	unsigned long now = jiffies;
193 	u32 window_size;
194 	int ret;
195 
196 	/* First unsigned int gives the remaining lifetime in seconds of the
197 	 * credential - e.g. the remaining TGT lifetime for Kerberos or
198 	 * the -t value passed to GSSD.
199 	 */
200 	p = simple_get_bytes(p, end, &timeout, sizeof(timeout));
201 	if (IS_ERR(p))
202 		goto err;
203 	if (timeout == 0)
204 		timeout = GSSD_MIN_TIMEOUT;
205 	ctx->gc_expiry = now + ((unsigned long)timeout * HZ);
206 	/* Sequence number window. Determines the maximum number of
207 	 * simultaneous requests
208 	 */
209 	p = simple_get_bytes(p, end, &window_size, sizeof(window_size));
210 	if (IS_ERR(p))
211 		goto err;
212 	ctx->gc_win = window_size;
213 	/* gssd signals an error by passing ctx->gc_win = 0: */
214 	if (ctx->gc_win == 0) {
215 		/*
216 		 * in which case, p points to an error code. Anything other
217 		 * than -EKEYEXPIRED gets converted to -EACCES.
218 		 */
219 		p = simple_get_bytes(p, end, &ret, sizeof(ret));
220 		if (!IS_ERR(p))
221 			p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) :
222 						    ERR_PTR(-EACCES);
223 		goto err;
224 	}
225 	/* copy the opaque wire context */
226 	p = simple_get_netobj(p, end, &ctx->gc_wire_ctx);
227 	if (IS_ERR(p))
228 		goto err;
229 	/* import the opaque security context */
230 	p  = simple_get_bytes(p, end, &seclen, sizeof(seclen));
231 	if (IS_ERR(p))
232 		goto err;
233 	q = (const void *)((const char *)p + seclen);
234 	if (unlikely(q > end || q < p)) {
235 		p = ERR_PTR(-EFAULT);
236 		goto err;
237 	}
238 	ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS);
239 	if (ret < 0) {
240 		trace_rpcgss_import_ctx(ret);
241 		p = ERR_PTR(ret);
242 		goto err;
243 	}
244 
245 	/* is there any trailing data? */
246 	if (q == end) {
247 		p = q;
248 		goto done;
249 	}
250 
251 	/* pull in acceptor name (if there is one) */
252 	p = simple_get_netobj(q, end, &ctx->gc_acceptor);
253 	if (IS_ERR(p))
254 		goto err;
255 done:
256 	trace_rpcgss_context(ctx->gc_expiry, now, timeout,
257 			     ctx->gc_acceptor.len, ctx->gc_acceptor.data);
258 err:
259 	return p;
260 }
261 
262 /* XXX: Need some documentation about why UPCALL_BUF_LEN is so small.
263  *	Is user space expecting no more than UPCALL_BUF_LEN bytes?
264  *	Note that there are now _two_ NI_MAXHOST sized data items
265  *	being passed in this string.
266  */
267 #define UPCALL_BUF_LEN	256
268 
269 struct gss_upcall_msg {
270 	refcount_t count;
271 	kuid_t	uid;
272 	const char *service_name;
273 	struct rpc_pipe_msg msg;
274 	struct list_head list;
275 	struct gss_auth *auth;
276 	struct rpc_pipe *pipe;
277 	struct rpc_wait_queue rpc_waitqueue;
278 	wait_queue_head_t waitqueue;
279 	struct gss_cl_ctx *ctx;
280 	char databuf[UPCALL_BUF_LEN];
281 };
282 
283 static int get_pipe_version(struct net *net)
284 {
285 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
286 	int ret;
287 
288 	spin_lock(&pipe_version_lock);
289 	if (sn->pipe_version >= 0) {
290 		atomic_inc(&sn->pipe_users);
291 		ret = sn->pipe_version;
292 	} else
293 		ret = -EAGAIN;
294 	spin_unlock(&pipe_version_lock);
295 	return ret;
296 }
297 
298 static void put_pipe_version(struct net *net)
299 {
300 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
301 
302 	if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
303 		sn->pipe_version = -1;
304 		spin_unlock(&pipe_version_lock);
305 	}
306 }
307 
308 static void
309 gss_release_msg(struct gss_upcall_msg *gss_msg)
310 {
311 	struct net *net = gss_msg->auth->net;
312 	if (!refcount_dec_and_test(&gss_msg->count))
313 		return;
314 	put_pipe_version(net);
315 	BUG_ON(!list_empty(&gss_msg->list));
316 	if (gss_msg->ctx != NULL)
317 		gss_put_ctx(gss_msg->ctx);
318 	rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
319 	gss_put_auth(gss_msg->auth);
320 	kfree_const(gss_msg->service_name);
321 	kfree(gss_msg);
322 }
323 
324 static struct gss_upcall_msg *
325 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth)
326 {
327 	struct gss_upcall_msg *pos;
328 	list_for_each_entry(pos, &pipe->in_downcall, list) {
329 		if (!uid_eq(pos->uid, uid))
330 			continue;
331 		if (auth && pos->auth->service != auth->service)
332 			continue;
333 		refcount_inc(&pos->count);
334 		return pos;
335 	}
336 	return NULL;
337 }
338 
339 /* Try to add an upcall to the pipefs queue.
340  * If an upcall owned by our uid already exists, then we return a reference
341  * to that upcall instead of adding the new upcall.
342  */
343 static inline struct gss_upcall_msg *
344 gss_add_msg(struct gss_upcall_msg *gss_msg)
345 {
346 	struct rpc_pipe *pipe = gss_msg->pipe;
347 	struct gss_upcall_msg *old;
348 
349 	spin_lock(&pipe->lock);
350 	old = __gss_find_upcall(pipe, gss_msg->uid, gss_msg->auth);
351 	if (old == NULL) {
352 		refcount_inc(&gss_msg->count);
353 		list_add(&gss_msg->list, &pipe->in_downcall);
354 	} else
355 		gss_msg = old;
356 	spin_unlock(&pipe->lock);
357 	return gss_msg;
358 }
359 
360 static void
361 __gss_unhash_msg(struct gss_upcall_msg *gss_msg)
362 {
363 	list_del_init(&gss_msg->list);
364 	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
365 	wake_up_all(&gss_msg->waitqueue);
366 	refcount_dec(&gss_msg->count);
367 }
368 
369 static void
370 gss_unhash_msg(struct gss_upcall_msg *gss_msg)
371 {
372 	struct rpc_pipe *pipe = gss_msg->pipe;
373 
374 	if (list_empty(&gss_msg->list))
375 		return;
376 	spin_lock(&pipe->lock);
377 	if (!list_empty(&gss_msg->list))
378 		__gss_unhash_msg(gss_msg);
379 	spin_unlock(&pipe->lock);
380 }
381 
382 static void
383 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg)
384 {
385 	switch (gss_msg->msg.errno) {
386 	case 0:
387 		if (gss_msg->ctx == NULL)
388 			break;
389 		clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
390 		gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx);
391 		break;
392 	case -EKEYEXPIRED:
393 		set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags);
394 	}
395 	gss_cred->gc_upcall_timestamp = jiffies;
396 	gss_cred->gc_upcall = NULL;
397 	rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
398 }
399 
400 static void
401 gss_upcall_callback(struct rpc_task *task)
402 {
403 	struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
404 			struct gss_cred, gc_base);
405 	struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
406 	struct rpc_pipe *pipe = gss_msg->pipe;
407 
408 	spin_lock(&pipe->lock);
409 	gss_handle_downcall_result(gss_cred, gss_msg);
410 	spin_unlock(&pipe->lock);
411 	task->tk_status = gss_msg->msg.errno;
412 	gss_release_msg(gss_msg);
413 }
414 
415 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg,
416 			      const struct cred *cred)
417 {
418 	struct user_namespace *userns = cred->user_ns;
419 
420 	uid_t uid = from_kuid_munged(userns, gss_msg->uid);
421 	memcpy(gss_msg->databuf, &uid, sizeof(uid));
422 	gss_msg->msg.data = gss_msg->databuf;
423 	gss_msg->msg.len = sizeof(uid);
424 
425 	BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf));
426 }
427 
428 static ssize_t
429 gss_v0_upcall(struct file *file, struct rpc_pipe_msg *msg,
430 		char __user *buf, size_t buflen)
431 {
432 	struct gss_upcall_msg *gss_msg = container_of(msg,
433 						      struct gss_upcall_msg,
434 						      msg);
435 	if (msg->copied == 0)
436 		gss_encode_v0_msg(gss_msg, file->f_cred);
437 	return rpc_pipe_generic_upcall(file, msg, buf, buflen);
438 }
439 
440 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg,
441 				const char *service_name,
442 				const char *target_name,
443 				const struct cred *cred)
444 {
445 	struct user_namespace *userns = cred->user_ns;
446 	struct gss_api_mech *mech = gss_msg->auth->mech;
447 	char *p = gss_msg->databuf;
448 	size_t buflen = sizeof(gss_msg->databuf);
449 	int len;
450 
451 	len = scnprintf(p, buflen, "mech=%s uid=%d", mech->gm_name,
452 			from_kuid_munged(userns, gss_msg->uid));
453 	buflen -= len;
454 	p += len;
455 	gss_msg->msg.len = len;
456 
457 	/*
458 	 * target= is a full service principal that names the remote
459 	 * identity that we are authenticating to.
460 	 */
461 	if (target_name) {
462 		len = scnprintf(p, buflen, " target=%s", target_name);
463 		buflen -= len;
464 		p += len;
465 		gss_msg->msg.len += len;
466 	}
467 
468 	/*
469 	 * gssd uses service= and srchost= to select a matching key from
470 	 * the system's keytab to use as the source principal.
471 	 *
472 	 * service= is the service name part of the source principal,
473 	 * or "*" (meaning choose any).
474 	 *
475 	 * srchost= is the hostname part of the source principal. When
476 	 * not provided, gssd uses the local hostname.
477 	 */
478 	if (service_name) {
479 		char *c = strchr(service_name, '@');
480 
481 		if (!c)
482 			len = scnprintf(p, buflen, " service=%s",
483 					service_name);
484 		else
485 			len = scnprintf(p, buflen,
486 					" service=%.*s srchost=%s",
487 					(int)(c - service_name),
488 					service_name, c + 1);
489 		buflen -= len;
490 		p += len;
491 		gss_msg->msg.len += len;
492 	}
493 
494 	if (mech->gm_upcall_enctypes) {
495 		len = scnprintf(p, buflen, " enctypes=%s",
496 				mech->gm_upcall_enctypes);
497 		buflen -= len;
498 		p += len;
499 		gss_msg->msg.len += len;
500 	}
501 	trace_rpcgss_upcall_msg(gss_msg->databuf);
502 	len = scnprintf(p, buflen, "\n");
503 	if (len == 0)
504 		goto out_overflow;
505 	gss_msg->msg.len += len;
506 	gss_msg->msg.data = gss_msg->databuf;
507 	return 0;
508 out_overflow:
509 	WARN_ON_ONCE(1);
510 	return -ENOMEM;
511 }
512 
513 static ssize_t
514 gss_v1_upcall(struct file *file, struct rpc_pipe_msg *msg,
515 		char __user *buf, size_t buflen)
516 {
517 	struct gss_upcall_msg *gss_msg = container_of(msg,
518 						      struct gss_upcall_msg,
519 						      msg);
520 	int err;
521 	if (msg->copied == 0) {
522 		err = gss_encode_v1_msg(gss_msg,
523 					gss_msg->service_name,
524 					gss_msg->auth->target_name,
525 					file->f_cred);
526 		if (err)
527 			return err;
528 	}
529 	return rpc_pipe_generic_upcall(file, msg, buf, buflen);
530 }
531 
532 static struct gss_upcall_msg *
533 gss_alloc_msg(struct gss_auth *gss_auth,
534 		kuid_t uid, const char *service_name)
535 {
536 	struct gss_upcall_msg *gss_msg;
537 	int vers;
538 	int err = -ENOMEM;
539 
540 	gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
541 	if (gss_msg == NULL)
542 		goto err;
543 	vers = get_pipe_version(gss_auth->net);
544 	err = vers;
545 	if (err < 0)
546 		goto err_free_msg;
547 	gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe;
548 	INIT_LIST_HEAD(&gss_msg->list);
549 	rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq");
550 	init_waitqueue_head(&gss_msg->waitqueue);
551 	refcount_set(&gss_msg->count, 1);
552 	gss_msg->uid = uid;
553 	gss_msg->auth = gss_auth;
554 	kref_get(&gss_auth->kref);
555 	if (service_name) {
556 		gss_msg->service_name = kstrdup_const(service_name, GFP_NOFS);
557 		if (!gss_msg->service_name) {
558 			err = -ENOMEM;
559 			goto err_put_pipe_version;
560 		}
561 	}
562 	return gss_msg;
563 err_put_pipe_version:
564 	put_pipe_version(gss_auth->net);
565 err_free_msg:
566 	kfree(gss_msg);
567 err:
568 	return ERR_PTR(err);
569 }
570 
571 static struct gss_upcall_msg *
572 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred)
573 {
574 	struct gss_cred *gss_cred = container_of(cred,
575 			struct gss_cred, gc_base);
576 	struct gss_upcall_msg *gss_new, *gss_msg;
577 	kuid_t uid = cred->cr_cred->fsuid;
578 
579 	gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal);
580 	if (IS_ERR(gss_new))
581 		return gss_new;
582 	gss_msg = gss_add_msg(gss_new);
583 	if (gss_msg == gss_new) {
584 		int res;
585 		refcount_inc(&gss_msg->count);
586 		res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
587 		if (res) {
588 			gss_unhash_msg(gss_new);
589 			refcount_dec(&gss_msg->count);
590 			gss_release_msg(gss_new);
591 			gss_msg = ERR_PTR(res);
592 		}
593 	} else
594 		gss_release_msg(gss_new);
595 	return gss_msg;
596 }
597 
598 static void warn_gssd(void)
599 {
600 	dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n");
601 }
602 
603 static inline int
604 gss_refresh_upcall(struct rpc_task *task)
605 {
606 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
607 	struct gss_auth *gss_auth = container_of(cred->cr_auth,
608 			struct gss_auth, rpc_auth);
609 	struct gss_cred *gss_cred = container_of(cred,
610 			struct gss_cred, gc_base);
611 	struct gss_upcall_msg *gss_msg;
612 	struct rpc_pipe *pipe;
613 	int err = 0;
614 
615 	gss_msg = gss_setup_upcall(gss_auth, cred);
616 	if (PTR_ERR(gss_msg) == -EAGAIN) {
617 		/* XXX: warning on the first, under the assumption we
618 		 * shouldn't normally hit this case on a refresh. */
619 		warn_gssd();
620 		rpc_sleep_on_timeout(&pipe_version_rpc_waitqueue,
621 				task, NULL, jiffies + (15 * HZ));
622 		err = -EAGAIN;
623 		goto out;
624 	}
625 	if (IS_ERR(gss_msg)) {
626 		err = PTR_ERR(gss_msg);
627 		goto out;
628 	}
629 	pipe = gss_msg->pipe;
630 	spin_lock(&pipe->lock);
631 	if (gss_cred->gc_upcall != NULL)
632 		rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
633 	else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
634 		gss_cred->gc_upcall = gss_msg;
635 		/* gss_upcall_callback will release the reference to gss_upcall_msg */
636 		refcount_inc(&gss_msg->count);
637 		rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
638 	} else {
639 		gss_handle_downcall_result(gss_cred, gss_msg);
640 		err = gss_msg->msg.errno;
641 	}
642 	spin_unlock(&pipe->lock);
643 	gss_release_msg(gss_msg);
644 out:
645 	trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
646 					     cred->cr_cred->fsuid), err);
647 	return err;
648 }
649 
650 static inline int
651 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
652 {
653 	struct net *net = gss_auth->net;
654 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
655 	struct rpc_pipe *pipe;
656 	struct rpc_cred *cred = &gss_cred->gc_base;
657 	struct gss_upcall_msg *gss_msg;
658 	DEFINE_WAIT(wait);
659 	int err;
660 
661 retry:
662 	err = 0;
663 	/* if gssd is down, just skip upcalling altogether */
664 	if (!gssd_running(net)) {
665 		warn_gssd();
666 		err = -EACCES;
667 		goto out;
668 	}
669 	gss_msg = gss_setup_upcall(gss_auth, cred);
670 	if (PTR_ERR(gss_msg) == -EAGAIN) {
671 		err = wait_event_interruptible_timeout(pipe_version_waitqueue,
672 				sn->pipe_version >= 0, 15 * HZ);
673 		if (sn->pipe_version < 0) {
674 			warn_gssd();
675 			err = -EACCES;
676 		}
677 		if (err < 0)
678 			goto out;
679 		goto retry;
680 	}
681 	if (IS_ERR(gss_msg)) {
682 		err = PTR_ERR(gss_msg);
683 		goto out;
684 	}
685 	pipe = gss_msg->pipe;
686 	for (;;) {
687 		prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
688 		spin_lock(&pipe->lock);
689 		if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
690 			break;
691 		}
692 		spin_unlock(&pipe->lock);
693 		if (fatal_signal_pending(current)) {
694 			err = -ERESTARTSYS;
695 			goto out_intr;
696 		}
697 		schedule();
698 	}
699 	if (gss_msg->ctx)
700 		gss_cred_set_ctx(cred, gss_msg->ctx);
701 	else
702 		err = gss_msg->msg.errno;
703 	spin_unlock(&pipe->lock);
704 out_intr:
705 	finish_wait(&gss_msg->waitqueue, &wait);
706 	gss_release_msg(gss_msg);
707 out:
708 	trace_rpcgss_upcall_result(from_kuid(&init_user_ns,
709 					     cred->cr_cred->fsuid), err);
710 	return err;
711 }
712 
713 #define MSG_BUF_MAXSIZE 1024
714 
715 static ssize_t
716 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
717 {
718 	const void *p, *end;
719 	void *buf;
720 	struct gss_upcall_msg *gss_msg;
721 	struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe;
722 	struct gss_cl_ctx *ctx;
723 	uid_t id;
724 	kuid_t uid;
725 	ssize_t err = -EFBIG;
726 
727 	if (mlen > MSG_BUF_MAXSIZE)
728 		goto out;
729 	err = -ENOMEM;
730 	buf = kmalloc(mlen, GFP_NOFS);
731 	if (!buf)
732 		goto out;
733 
734 	err = -EFAULT;
735 	if (copy_from_user(buf, src, mlen))
736 		goto err;
737 
738 	end = (const void *)((char *)buf + mlen);
739 	p = simple_get_bytes(buf, end, &id, sizeof(id));
740 	if (IS_ERR(p)) {
741 		err = PTR_ERR(p);
742 		goto err;
743 	}
744 
745 	uid = make_kuid(current_user_ns(), id);
746 	if (!uid_valid(uid)) {
747 		err = -EINVAL;
748 		goto err;
749 	}
750 
751 	err = -ENOMEM;
752 	ctx = gss_alloc_context();
753 	if (ctx == NULL)
754 		goto err;
755 
756 	err = -ENOENT;
757 	/* Find a matching upcall */
758 	spin_lock(&pipe->lock);
759 	gss_msg = __gss_find_upcall(pipe, uid, NULL);
760 	if (gss_msg == NULL) {
761 		spin_unlock(&pipe->lock);
762 		goto err_put_ctx;
763 	}
764 	list_del_init(&gss_msg->list);
765 	spin_unlock(&pipe->lock);
766 
767 	p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
768 	if (IS_ERR(p)) {
769 		err = PTR_ERR(p);
770 		switch (err) {
771 		case -EACCES:
772 		case -EKEYEXPIRED:
773 			gss_msg->msg.errno = err;
774 			err = mlen;
775 			break;
776 		case -EFAULT:
777 		case -ENOMEM:
778 		case -EINVAL:
779 		case -ENOSYS:
780 			gss_msg->msg.errno = -EAGAIN;
781 			break;
782 		default:
783 			printk(KERN_CRIT "%s: bad return from "
784 				"gss_fill_context: %zd\n", __func__, err);
785 			gss_msg->msg.errno = -EIO;
786 		}
787 		goto err_release_msg;
788 	}
789 	gss_msg->ctx = gss_get_ctx(ctx);
790 	err = mlen;
791 
792 err_release_msg:
793 	spin_lock(&pipe->lock);
794 	__gss_unhash_msg(gss_msg);
795 	spin_unlock(&pipe->lock);
796 	gss_release_msg(gss_msg);
797 err_put_ctx:
798 	gss_put_ctx(ctx);
799 err:
800 	kfree(buf);
801 out:
802 	return err;
803 }
804 
805 static int gss_pipe_open(struct inode *inode, int new_version)
806 {
807 	struct net *net = inode->i_sb->s_fs_info;
808 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
809 	int ret = 0;
810 
811 	spin_lock(&pipe_version_lock);
812 	if (sn->pipe_version < 0) {
813 		/* First open of any gss pipe determines the version: */
814 		sn->pipe_version = new_version;
815 		rpc_wake_up(&pipe_version_rpc_waitqueue);
816 		wake_up(&pipe_version_waitqueue);
817 	} else if (sn->pipe_version != new_version) {
818 		/* Trying to open a pipe of a different version */
819 		ret = -EBUSY;
820 		goto out;
821 	}
822 	atomic_inc(&sn->pipe_users);
823 out:
824 	spin_unlock(&pipe_version_lock);
825 	return ret;
826 
827 }
828 
829 static int gss_pipe_open_v0(struct inode *inode)
830 {
831 	return gss_pipe_open(inode, 0);
832 }
833 
834 static int gss_pipe_open_v1(struct inode *inode)
835 {
836 	return gss_pipe_open(inode, 1);
837 }
838 
839 static void
840 gss_pipe_release(struct inode *inode)
841 {
842 	struct net *net = inode->i_sb->s_fs_info;
843 	struct rpc_pipe *pipe = RPC_I(inode)->pipe;
844 	struct gss_upcall_msg *gss_msg;
845 
846 restart:
847 	spin_lock(&pipe->lock);
848 	list_for_each_entry(gss_msg, &pipe->in_downcall, list) {
849 
850 		if (!list_empty(&gss_msg->msg.list))
851 			continue;
852 		gss_msg->msg.errno = -EPIPE;
853 		refcount_inc(&gss_msg->count);
854 		__gss_unhash_msg(gss_msg);
855 		spin_unlock(&pipe->lock);
856 		gss_release_msg(gss_msg);
857 		goto restart;
858 	}
859 	spin_unlock(&pipe->lock);
860 
861 	put_pipe_version(net);
862 }
863 
864 static void
865 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
866 {
867 	struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg);
868 
869 	if (msg->errno < 0) {
870 		refcount_inc(&gss_msg->count);
871 		gss_unhash_msg(gss_msg);
872 		if (msg->errno == -ETIMEDOUT)
873 			warn_gssd();
874 		gss_release_msg(gss_msg);
875 	}
876 	gss_release_msg(gss_msg);
877 }
878 
879 static void gss_pipe_dentry_destroy(struct dentry *dir,
880 		struct rpc_pipe_dir_object *pdo)
881 {
882 	struct gss_pipe *gss_pipe = pdo->pdo_data;
883 	struct rpc_pipe *pipe = gss_pipe->pipe;
884 
885 	if (pipe->dentry != NULL) {
886 		rpc_unlink(pipe->dentry);
887 		pipe->dentry = NULL;
888 	}
889 }
890 
891 static int gss_pipe_dentry_create(struct dentry *dir,
892 		struct rpc_pipe_dir_object *pdo)
893 {
894 	struct gss_pipe *p = pdo->pdo_data;
895 	struct dentry *dentry;
896 
897 	dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe);
898 	if (IS_ERR(dentry))
899 		return PTR_ERR(dentry);
900 	p->pipe->dentry = dentry;
901 	return 0;
902 }
903 
904 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = {
905 	.create = gss_pipe_dentry_create,
906 	.destroy = gss_pipe_dentry_destroy,
907 };
908 
909 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt,
910 		const char *name,
911 		const struct rpc_pipe_ops *upcall_ops)
912 {
913 	struct gss_pipe *p;
914 	int err = -ENOMEM;
915 
916 	p = kmalloc(sizeof(*p), GFP_KERNEL);
917 	if (p == NULL)
918 		goto err;
919 	p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN);
920 	if (IS_ERR(p->pipe)) {
921 		err = PTR_ERR(p->pipe);
922 		goto err_free_gss_pipe;
923 	}
924 	p->name = name;
925 	p->clnt = clnt;
926 	kref_init(&p->kref);
927 	rpc_init_pipe_dir_object(&p->pdo,
928 			&gss_pipe_dir_object_ops,
929 			p);
930 	return p;
931 err_free_gss_pipe:
932 	kfree(p);
933 err:
934 	return ERR_PTR(err);
935 }
936 
937 struct gss_alloc_pdo {
938 	struct rpc_clnt *clnt;
939 	const char *name;
940 	const struct rpc_pipe_ops *upcall_ops;
941 };
942 
943 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data)
944 {
945 	struct gss_pipe *gss_pipe;
946 	struct gss_alloc_pdo *args = data;
947 
948 	if (pdo->pdo_ops != &gss_pipe_dir_object_ops)
949 		return 0;
950 	gss_pipe = container_of(pdo, struct gss_pipe, pdo);
951 	if (strcmp(gss_pipe->name, args->name) != 0)
952 		return 0;
953 	if (!kref_get_unless_zero(&gss_pipe->kref))
954 		return 0;
955 	return 1;
956 }
957 
958 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data)
959 {
960 	struct gss_pipe *gss_pipe;
961 	struct gss_alloc_pdo *args = data;
962 
963 	gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops);
964 	if (!IS_ERR(gss_pipe))
965 		return &gss_pipe->pdo;
966 	return NULL;
967 }
968 
969 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt,
970 		const char *name,
971 		const struct rpc_pipe_ops *upcall_ops)
972 {
973 	struct net *net = rpc_net_ns(clnt);
974 	struct rpc_pipe_dir_object *pdo;
975 	struct gss_alloc_pdo args = {
976 		.clnt = clnt,
977 		.name = name,
978 		.upcall_ops = upcall_ops,
979 	};
980 
981 	pdo = rpc_find_or_alloc_pipe_dir_object(net,
982 			&clnt->cl_pipedir_objects,
983 			gss_pipe_match_pdo,
984 			gss_pipe_alloc_pdo,
985 			&args);
986 	if (pdo != NULL)
987 		return container_of(pdo, struct gss_pipe, pdo);
988 	return ERR_PTR(-ENOMEM);
989 }
990 
991 static void __gss_pipe_free(struct gss_pipe *p)
992 {
993 	struct rpc_clnt *clnt = p->clnt;
994 	struct net *net = rpc_net_ns(clnt);
995 
996 	rpc_remove_pipe_dir_object(net,
997 			&clnt->cl_pipedir_objects,
998 			&p->pdo);
999 	rpc_destroy_pipe_data(p->pipe);
1000 	kfree(p);
1001 }
1002 
1003 static void __gss_pipe_release(struct kref *kref)
1004 {
1005 	struct gss_pipe *p = container_of(kref, struct gss_pipe, kref);
1006 
1007 	__gss_pipe_free(p);
1008 }
1009 
1010 static void gss_pipe_free(struct gss_pipe *p)
1011 {
1012 	if (p != NULL)
1013 		kref_put(&p->kref, __gss_pipe_release);
1014 }
1015 
1016 /*
1017  * NOTE: we have the opportunity to use different
1018  * parameters based on the input flavor (which must be a pseudoflavor)
1019  */
1020 static struct gss_auth *
1021 gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1022 {
1023 	rpc_authflavor_t flavor = args->pseudoflavor;
1024 	struct gss_auth *gss_auth;
1025 	struct gss_pipe *gss_pipe;
1026 	struct rpc_auth * auth;
1027 	int err = -ENOMEM; /* XXX? */
1028 
1029 	if (!try_module_get(THIS_MODULE))
1030 		return ERR_PTR(err);
1031 	if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL)))
1032 		goto out_dec;
1033 	INIT_HLIST_NODE(&gss_auth->hash);
1034 	gss_auth->target_name = NULL;
1035 	if (args->target_name) {
1036 		gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL);
1037 		if (gss_auth->target_name == NULL)
1038 			goto err_free;
1039 	}
1040 	gss_auth->client = clnt;
1041 	gss_auth->net = get_net(rpc_net_ns(clnt));
1042 	err = -EINVAL;
1043 	gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor);
1044 	if (!gss_auth->mech)
1045 		goto err_put_net;
1046 	gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
1047 	if (gss_auth->service == 0)
1048 		goto err_put_mech;
1049 	if (!gssd_running(gss_auth->net))
1050 		goto err_put_mech;
1051 	auth = &gss_auth->rpc_auth;
1052 	auth->au_cslack = GSS_CRED_SLACK >> 2;
1053 	auth->au_rslack = GSS_VERF_SLACK >> 2;
1054 	auth->au_verfsize = GSS_VERF_SLACK >> 2;
1055 	auth->au_ralign = GSS_VERF_SLACK >> 2;
1056 	auth->au_flags = 0;
1057 	auth->au_ops = &authgss_ops;
1058 	auth->au_flavor = flavor;
1059 	if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
1060 		auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
1061 	refcount_set(&auth->au_count, 1);
1062 	kref_init(&gss_auth->kref);
1063 
1064 	err = rpcauth_init_credcache(auth);
1065 	if (err)
1066 		goto err_put_mech;
1067 	/*
1068 	 * Note: if we created the old pipe first, then someone who
1069 	 * examined the directory at the right moment might conclude
1070 	 * that we supported only the old pipe.  So we instead create
1071 	 * the new pipe first.
1072 	 */
1073 	gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1);
1074 	if (IS_ERR(gss_pipe)) {
1075 		err = PTR_ERR(gss_pipe);
1076 		goto err_destroy_credcache;
1077 	}
1078 	gss_auth->gss_pipe[1] = gss_pipe;
1079 
1080 	gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name,
1081 			&gss_upcall_ops_v0);
1082 	if (IS_ERR(gss_pipe)) {
1083 		err = PTR_ERR(gss_pipe);
1084 		goto err_destroy_pipe_1;
1085 	}
1086 	gss_auth->gss_pipe[0] = gss_pipe;
1087 
1088 	return gss_auth;
1089 err_destroy_pipe_1:
1090 	gss_pipe_free(gss_auth->gss_pipe[1]);
1091 err_destroy_credcache:
1092 	rpcauth_destroy_credcache(auth);
1093 err_put_mech:
1094 	gss_mech_put(gss_auth->mech);
1095 err_put_net:
1096 	put_net(gss_auth->net);
1097 err_free:
1098 	kfree(gss_auth->target_name);
1099 	kfree(gss_auth);
1100 out_dec:
1101 	module_put(THIS_MODULE);
1102 	trace_rpcgss_createauth(flavor, err);
1103 	return ERR_PTR(err);
1104 }
1105 
1106 static void
1107 gss_free(struct gss_auth *gss_auth)
1108 {
1109 	gss_pipe_free(gss_auth->gss_pipe[0]);
1110 	gss_pipe_free(gss_auth->gss_pipe[1]);
1111 	gss_mech_put(gss_auth->mech);
1112 	put_net(gss_auth->net);
1113 	kfree(gss_auth->target_name);
1114 
1115 	kfree(gss_auth);
1116 	module_put(THIS_MODULE);
1117 }
1118 
1119 static void
1120 gss_free_callback(struct kref *kref)
1121 {
1122 	struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref);
1123 
1124 	gss_free(gss_auth);
1125 }
1126 
1127 static void
1128 gss_put_auth(struct gss_auth *gss_auth)
1129 {
1130 	kref_put(&gss_auth->kref, gss_free_callback);
1131 }
1132 
1133 static void
1134 gss_destroy(struct rpc_auth *auth)
1135 {
1136 	struct gss_auth *gss_auth = container_of(auth,
1137 			struct gss_auth, rpc_auth);
1138 
1139 	if (hash_hashed(&gss_auth->hash)) {
1140 		spin_lock(&gss_auth_hash_lock);
1141 		hash_del(&gss_auth->hash);
1142 		spin_unlock(&gss_auth_hash_lock);
1143 	}
1144 
1145 	gss_pipe_free(gss_auth->gss_pipe[0]);
1146 	gss_auth->gss_pipe[0] = NULL;
1147 	gss_pipe_free(gss_auth->gss_pipe[1]);
1148 	gss_auth->gss_pipe[1] = NULL;
1149 	rpcauth_destroy_credcache(auth);
1150 
1151 	gss_put_auth(gss_auth);
1152 }
1153 
1154 /*
1155  * Auths may be shared between rpc clients that were cloned from a
1156  * common client with the same xprt, if they also share the flavor and
1157  * target_name.
1158  *
1159  * The auth is looked up from the oldest parent sharing the same
1160  * cl_xprt, and the auth itself references only that common parent
1161  * (which is guaranteed to last as long as any of its descendants).
1162  */
1163 static struct gss_auth *
1164 gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
1165 		struct rpc_clnt *clnt,
1166 		struct gss_auth *new)
1167 {
1168 	struct gss_auth *gss_auth;
1169 	unsigned long hashval = (unsigned long)clnt;
1170 
1171 	spin_lock(&gss_auth_hash_lock);
1172 	hash_for_each_possible(gss_auth_hash_table,
1173 			gss_auth,
1174 			hash,
1175 			hashval) {
1176 		if (gss_auth->client != clnt)
1177 			continue;
1178 		if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor)
1179 			continue;
1180 		if (gss_auth->target_name != args->target_name) {
1181 			if (gss_auth->target_name == NULL)
1182 				continue;
1183 			if (args->target_name == NULL)
1184 				continue;
1185 			if (strcmp(gss_auth->target_name, args->target_name))
1186 				continue;
1187 		}
1188 		if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
1189 			continue;
1190 		goto out;
1191 	}
1192 	if (new)
1193 		hash_add(gss_auth_hash_table, &new->hash, hashval);
1194 	gss_auth = new;
1195 out:
1196 	spin_unlock(&gss_auth_hash_lock);
1197 	return gss_auth;
1198 }
1199 
1200 static struct gss_auth *
1201 gss_create_hashed(const struct rpc_auth_create_args *args,
1202 		  struct rpc_clnt *clnt)
1203 {
1204 	struct gss_auth *gss_auth;
1205 	struct gss_auth *new;
1206 
1207 	gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL);
1208 	if (gss_auth != NULL)
1209 		goto out;
1210 	new = gss_create_new(args, clnt);
1211 	if (IS_ERR(new))
1212 		return new;
1213 	gss_auth = gss_auth_find_or_add_hashed(args, clnt, new);
1214 	if (gss_auth != new)
1215 		gss_destroy(&new->rpc_auth);
1216 out:
1217 	return gss_auth;
1218 }
1219 
1220 static struct rpc_auth *
1221 gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
1222 {
1223 	struct gss_auth *gss_auth;
1224 	struct rpc_xprt_switch *xps = rcu_access_pointer(clnt->cl_xpi.xpi_xpswitch);
1225 
1226 	while (clnt != clnt->cl_parent) {
1227 		struct rpc_clnt *parent = clnt->cl_parent;
1228 		/* Find the original parent for this transport */
1229 		if (rcu_access_pointer(parent->cl_xpi.xpi_xpswitch) != xps)
1230 			break;
1231 		clnt = parent;
1232 	}
1233 
1234 	gss_auth = gss_create_hashed(args, clnt);
1235 	if (IS_ERR(gss_auth))
1236 		return ERR_CAST(gss_auth);
1237 	return &gss_auth->rpc_auth;
1238 }
1239 
1240 static struct gss_cred *
1241 gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
1242 {
1243 	struct gss_cred *new;
1244 
1245 	/* Make a copy of the cred so that we can reference count it */
1246 	new = kzalloc(sizeof(*gss_cred), GFP_NOFS);
1247 	if (new) {
1248 		struct auth_cred acred = {
1249 			.cred = gss_cred->gc_base.cr_cred,
1250 		};
1251 		struct gss_cl_ctx *ctx =
1252 			rcu_dereference_protected(gss_cred->gc_ctx, 1);
1253 
1254 		rpcauth_init_cred(&new->gc_base, &acred,
1255 				&gss_auth->rpc_auth,
1256 				&gss_nullops);
1257 		new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
1258 		new->gc_service = gss_cred->gc_service;
1259 		new->gc_principal = gss_cred->gc_principal;
1260 		kref_get(&gss_auth->kref);
1261 		rcu_assign_pointer(new->gc_ctx, ctx);
1262 		gss_get_ctx(ctx);
1263 	}
1264 	return new;
1265 }
1266 
1267 /*
1268  * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
1269  * to the server with the GSS control procedure field set to
1270  * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
1271  * all RPCSEC_GSS state associated with that context.
1272  */
1273 static void
1274 gss_send_destroy_context(struct rpc_cred *cred)
1275 {
1276 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1277 	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1278 	struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1279 	struct gss_cred *new;
1280 	struct rpc_task *task;
1281 
1282 	new = gss_dup_cred(gss_auth, gss_cred);
1283 	if (new) {
1284 		ctx->gc_proc = RPC_GSS_PROC_DESTROY;
1285 
1286 		task = rpc_call_null(gss_auth->client, &new->gc_base,
1287 				RPC_TASK_ASYNC|RPC_TASK_SOFT);
1288 		if (!IS_ERR(task))
1289 			rpc_put_task(task);
1290 
1291 		put_rpccred(&new->gc_base);
1292 	}
1293 }
1294 
1295 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
1296  * to create a new cred or context, so they check that things have been
1297  * allocated before freeing them. */
1298 static void
1299 gss_do_free_ctx(struct gss_cl_ctx *ctx)
1300 {
1301 	gss_delete_sec_context(&ctx->gc_gss_ctx);
1302 	kfree(ctx->gc_wire_ctx.data);
1303 	kfree(ctx->gc_acceptor.data);
1304 	kfree(ctx);
1305 }
1306 
1307 static void
1308 gss_free_ctx_callback(struct rcu_head *head)
1309 {
1310 	struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1311 	gss_do_free_ctx(ctx);
1312 }
1313 
1314 static void
1315 gss_free_ctx(struct gss_cl_ctx *ctx)
1316 {
1317 	call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
1318 }
1319 
1320 static void
1321 gss_free_cred(struct gss_cred *gss_cred)
1322 {
1323 	kfree(gss_cred);
1324 }
1325 
1326 static void
1327 gss_free_cred_callback(struct rcu_head *head)
1328 {
1329 	struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1330 	gss_free_cred(gss_cred);
1331 }
1332 
1333 static void
1334 gss_destroy_nullcred(struct rpc_cred *cred)
1335 {
1336 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1337 	struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
1338 	struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
1339 
1340 	RCU_INIT_POINTER(gss_cred->gc_ctx, NULL);
1341 	put_cred(cred->cr_cred);
1342 	call_rcu(&cred->cr_rcu, gss_free_cred_callback);
1343 	if (ctx)
1344 		gss_put_ctx(ctx);
1345 	gss_put_auth(gss_auth);
1346 }
1347 
1348 static void
1349 gss_destroy_cred(struct rpc_cred *cred)
1350 {
1351 
1352 	if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
1353 		gss_send_destroy_context(cred);
1354 	gss_destroy_nullcred(cred);
1355 }
1356 
1357 static int
1358 gss_hash_cred(struct auth_cred *acred, unsigned int hashbits)
1359 {
1360 	return hash_64(from_kuid(&init_user_ns, acred->cred->fsuid), hashbits);
1361 }
1362 
1363 /*
1364  * Lookup RPCSEC_GSS cred for the current process
1365  */
1366 static struct rpc_cred *
1367 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
1368 {
1369 	return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
1370 }
1371 
1372 static struct rpc_cred *
1373 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
1374 {
1375 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1376 	struct gss_cred	*cred = NULL;
1377 	int err = -ENOMEM;
1378 
1379 	if (!(cred = kzalloc(sizeof(*cred), gfp)))
1380 		goto out_err;
1381 
1382 	rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
1383 	/*
1384 	 * Note: in order to force a call to call_refresh(), we deliberately
1385 	 * fail to flag the credential as RPCAUTH_CRED_UPTODATE.
1386 	 */
1387 	cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
1388 	cred->gc_service = gss_auth->service;
1389 	cred->gc_principal = acred->principal;
1390 	kref_get(&gss_auth->kref);
1391 	return &cred->gc_base;
1392 
1393 out_err:
1394 	return ERR_PTR(err);
1395 }
1396 
1397 static int
1398 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred)
1399 {
1400 	struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
1401 	struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base);
1402 	int err;
1403 
1404 	do {
1405 		err = gss_create_upcall(gss_auth, gss_cred);
1406 	} while (err == -EAGAIN);
1407 	return err;
1408 }
1409 
1410 static char *
1411 gss_stringify_acceptor(struct rpc_cred *cred)
1412 {
1413 	char *string = NULL;
1414 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1415 	struct gss_cl_ctx *ctx;
1416 	unsigned int len;
1417 	struct xdr_netobj *acceptor;
1418 
1419 	rcu_read_lock();
1420 	ctx = rcu_dereference(gss_cred->gc_ctx);
1421 	if (!ctx)
1422 		goto out;
1423 
1424 	len = ctx->gc_acceptor.len;
1425 	rcu_read_unlock();
1426 
1427 	/* no point if there's no string */
1428 	if (!len)
1429 		return NULL;
1430 realloc:
1431 	string = kmalloc(len + 1, GFP_KERNEL);
1432 	if (!string)
1433 		return NULL;
1434 
1435 	rcu_read_lock();
1436 	ctx = rcu_dereference(gss_cred->gc_ctx);
1437 
1438 	/* did the ctx disappear or was it replaced by one with no acceptor? */
1439 	if (!ctx || !ctx->gc_acceptor.len) {
1440 		kfree(string);
1441 		string = NULL;
1442 		goto out;
1443 	}
1444 
1445 	acceptor = &ctx->gc_acceptor;
1446 
1447 	/*
1448 	 * Did we find a new acceptor that's longer than the original? Allocate
1449 	 * a longer buffer and try again.
1450 	 */
1451 	if (len < acceptor->len) {
1452 		len = acceptor->len;
1453 		rcu_read_unlock();
1454 		kfree(string);
1455 		goto realloc;
1456 	}
1457 
1458 	memcpy(string, acceptor->data, acceptor->len);
1459 	string[acceptor->len] = '\0';
1460 out:
1461 	rcu_read_unlock();
1462 	return string;
1463 }
1464 
1465 /*
1466  * Returns -EACCES if GSS context is NULL or will expire within the
1467  * timeout (miliseconds)
1468  */
1469 static int
1470 gss_key_timeout(struct rpc_cred *rc)
1471 {
1472 	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1473 	struct gss_cl_ctx *ctx;
1474 	unsigned long timeout = jiffies + (gss_key_expire_timeo * HZ);
1475 	int ret = 0;
1476 
1477 	rcu_read_lock();
1478 	ctx = rcu_dereference(gss_cred->gc_ctx);
1479 	if (!ctx || time_after(timeout, ctx->gc_expiry))
1480 		ret = -EACCES;
1481 	rcu_read_unlock();
1482 
1483 	return ret;
1484 }
1485 
1486 static int
1487 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
1488 {
1489 	struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
1490 	struct gss_cl_ctx *ctx;
1491 	int ret;
1492 
1493 	if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
1494 		goto out;
1495 	/* Don't match with creds that have expired. */
1496 	rcu_read_lock();
1497 	ctx = rcu_dereference(gss_cred->gc_ctx);
1498 	if (!ctx || time_after(jiffies, ctx->gc_expiry)) {
1499 		rcu_read_unlock();
1500 		return 0;
1501 	}
1502 	rcu_read_unlock();
1503 	if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
1504 		return 0;
1505 out:
1506 	if (acred->principal != NULL) {
1507 		if (gss_cred->gc_principal == NULL)
1508 			return 0;
1509 		ret = strcmp(acred->principal, gss_cred->gc_principal) == 0;
1510 	} else {
1511 		if (gss_cred->gc_principal != NULL)
1512 			return 0;
1513 		ret = uid_eq(rc->cr_cred->fsuid, acred->cred->fsuid);
1514 	}
1515 	return ret;
1516 }
1517 
1518 /*
1519  * Marshal credentials.
1520  *
1521  * The expensive part is computing the verifier. We can't cache a
1522  * pre-computed version of the verifier because the seqno, which
1523  * is different every time, is included in the MIC.
1524  */
1525 static int gss_marshal(struct rpc_task *task, struct xdr_stream *xdr)
1526 {
1527 	struct rpc_rqst *req = task->tk_rqstp;
1528 	struct rpc_cred *cred = req->rq_cred;
1529 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1530 						 gc_base);
1531 	struct gss_cl_ctx	*ctx = gss_cred_get_ctx(cred);
1532 	__be32		*p, *cred_len;
1533 	u32             maj_stat = 0;
1534 	struct xdr_netobj mic;
1535 	struct kvec	iov;
1536 	struct xdr_buf	verf_buf;
1537 	int status;
1538 
1539 	/* Credential */
1540 
1541 	p = xdr_reserve_space(xdr, 7 * sizeof(*p) +
1542 			      ctx->gc_wire_ctx.len);
1543 	if (!p)
1544 		goto marshal_failed;
1545 	*p++ = rpc_auth_gss;
1546 	cred_len = p++;
1547 
1548 	spin_lock(&ctx->gc_seq_lock);
1549 	req->rq_seqno = (ctx->gc_seq < MAXSEQ) ? ctx->gc_seq++ : MAXSEQ;
1550 	spin_unlock(&ctx->gc_seq_lock);
1551 	if (req->rq_seqno == MAXSEQ)
1552 		goto expired;
1553 	trace_rpcgss_seqno(task);
1554 
1555 	*p++ = cpu_to_be32(RPC_GSS_VERSION);
1556 	*p++ = cpu_to_be32(ctx->gc_proc);
1557 	*p++ = cpu_to_be32(req->rq_seqno);
1558 	*p++ = cpu_to_be32(gss_cred->gc_service);
1559 	p = xdr_encode_netobj(p, &ctx->gc_wire_ctx);
1560 	*cred_len = cpu_to_be32((p - (cred_len + 1)) << 2);
1561 
1562 	/* Verifier */
1563 
1564 	/* We compute the checksum for the verifier over the xdr-encoded bytes
1565 	 * starting with the xid and ending at the end of the credential: */
1566 	iov.iov_base = req->rq_snd_buf.head[0].iov_base;
1567 	iov.iov_len = (u8 *)p - (u8 *)iov.iov_base;
1568 	xdr_buf_from_iov(&iov, &verf_buf);
1569 
1570 	p = xdr_reserve_space(xdr, sizeof(*p));
1571 	if (!p)
1572 		goto marshal_failed;
1573 	*p++ = rpc_auth_gss;
1574 	mic.data = (u8 *)(p + 1);
1575 	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1576 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1577 		goto expired;
1578 	else if (maj_stat != 0)
1579 		goto bad_mic;
1580 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1581 		goto marshal_failed;
1582 	status = 0;
1583 out:
1584 	gss_put_ctx(ctx);
1585 	return status;
1586 expired:
1587 	clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1588 	status = -EKEYEXPIRED;
1589 	goto out;
1590 marshal_failed:
1591 	status = -EMSGSIZE;
1592 	goto out;
1593 bad_mic:
1594 	trace_rpcgss_get_mic(task, maj_stat);
1595 	status = -EIO;
1596 	goto out;
1597 }
1598 
1599 static int gss_renew_cred(struct rpc_task *task)
1600 {
1601 	struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
1602 	struct gss_cred *gss_cred = container_of(oldcred,
1603 						 struct gss_cred,
1604 						 gc_base);
1605 	struct rpc_auth *auth = oldcred->cr_auth;
1606 	struct auth_cred acred = {
1607 		.cred = oldcred->cr_cred,
1608 		.principal = gss_cred->gc_principal,
1609 	};
1610 	struct rpc_cred *new;
1611 
1612 	new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
1613 	if (IS_ERR(new))
1614 		return PTR_ERR(new);
1615 	task->tk_rqstp->rq_cred = new;
1616 	put_rpccred(oldcred);
1617 	return 0;
1618 }
1619 
1620 static int gss_cred_is_negative_entry(struct rpc_cred *cred)
1621 {
1622 	if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) {
1623 		unsigned long now = jiffies;
1624 		unsigned long begin, expire;
1625 		struct gss_cred *gss_cred;
1626 
1627 		gss_cred = container_of(cred, struct gss_cred, gc_base);
1628 		begin = gss_cred->gc_upcall_timestamp;
1629 		expire = begin + gss_expired_cred_retry_delay * HZ;
1630 
1631 		if (time_in_range_open(now, begin, expire))
1632 			return 1;
1633 	}
1634 	return 0;
1635 }
1636 
1637 /*
1638 * Refresh credentials. XXX - finish
1639 */
1640 static int
1641 gss_refresh(struct rpc_task *task)
1642 {
1643 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1644 	int ret = 0;
1645 
1646 	if (gss_cred_is_negative_entry(cred))
1647 		return -EKEYEXPIRED;
1648 
1649 	if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
1650 			!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
1651 		ret = gss_renew_cred(task);
1652 		if (ret < 0)
1653 			goto out;
1654 		cred = task->tk_rqstp->rq_cred;
1655 	}
1656 
1657 	if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
1658 		ret = gss_refresh_upcall(task);
1659 out:
1660 	return ret;
1661 }
1662 
1663 /* Dummy refresh routine: used only when destroying the context */
1664 static int
1665 gss_refresh_null(struct rpc_task *task)
1666 {
1667 	return 0;
1668 }
1669 
1670 static int
1671 gss_validate(struct rpc_task *task, struct xdr_stream *xdr)
1672 {
1673 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1674 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1675 	__be32		*p, *seq = NULL;
1676 	struct kvec	iov;
1677 	struct xdr_buf	verf_buf;
1678 	struct xdr_netobj mic;
1679 	u32		len, maj_stat;
1680 	int		status;
1681 
1682 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1683 	if (!p)
1684 		goto validate_failed;
1685 	if (*p++ != rpc_auth_gss)
1686 		goto validate_failed;
1687 	len = be32_to_cpup(p);
1688 	if (len > RPC_MAX_AUTH_SIZE)
1689 		goto validate_failed;
1690 	p = xdr_inline_decode(xdr, len);
1691 	if (!p)
1692 		goto validate_failed;
1693 
1694 	seq = kmalloc(4, GFP_NOFS);
1695 	if (!seq)
1696 		goto validate_failed;
1697 	*seq = cpu_to_be32(task->tk_rqstp->rq_seqno);
1698 	iov.iov_base = seq;
1699 	iov.iov_len = 4;
1700 	xdr_buf_from_iov(&iov, &verf_buf);
1701 	mic.data = (u8 *)p;
1702 	mic.len = len;
1703 	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic);
1704 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1705 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1706 	if (maj_stat)
1707 		goto bad_mic;
1708 
1709 	/* We leave it to unwrap to calculate au_rslack. For now we just
1710 	 * calculate the length of the verifier: */
1711 	cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2;
1712 	status = 0;
1713 out:
1714 	gss_put_ctx(ctx);
1715 	kfree(seq);
1716 	return status;
1717 
1718 validate_failed:
1719 	status = -EIO;
1720 	goto out;
1721 bad_mic:
1722 	trace_rpcgss_verify_mic(task, maj_stat);
1723 	status = -EACCES;
1724 	goto out;
1725 }
1726 
1727 static int gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1728 			      struct rpc_task *task, struct xdr_stream *xdr)
1729 {
1730 	struct rpc_rqst *rqstp = task->tk_rqstp;
1731 	struct xdr_buf integ_buf, *snd_buf = &rqstp->rq_snd_buf;
1732 	struct xdr_netobj mic;
1733 	__be32 *p, *integ_len;
1734 	u32 offset, maj_stat;
1735 
1736 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1737 	if (!p)
1738 		goto wrap_failed;
1739 	integ_len = p++;
1740 	*p = cpu_to_be32(rqstp->rq_seqno);
1741 
1742 	if (rpcauth_wrap_req_encode(task, xdr))
1743 		goto wrap_failed;
1744 
1745 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1746 	if (xdr_buf_subsegment(snd_buf, &integ_buf,
1747 				offset, snd_buf->len - offset))
1748 		goto wrap_failed;
1749 	*integ_len = cpu_to_be32(integ_buf.len);
1750 
1751 	p = xdr_reserve_space(xdr, 0);
1752 	if (!p)
1753 		goto wrap_failed;
1754 	mic.data = (u8 *)(p + 1);
1755 	maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1756 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1757 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1758 	else if (maj_stat)
1759 		goto bad_mic;
1760 	/* Check that the trailing MIC fit in the buffer, after the fact */
1761 	if (xdr_stream_encode_opaque_inline(xdr, (void **)&p, mic.len) < 0)
1762 		goto wrap_failed;
1763 	return 0;
1764 wrap_failed:
1765 	return -EMSGSIZE;
1766 bad_mic:
1767 	trace_rpcgss_get_mic(task, maj_stat);
1768 	return -EIO;
1769 }
1770 
1771 static void
1772 priv_release_snd_buf(struct rpc_rqst *rqstp)
1773 {
1774 	int i;
1775 
1776 	for (i=0; i < rqstp->rq_enc_pages_num; i++)
1777 		__free_page(rqstp->rq_enc_pages[i]);
1778 	kfree(rqstp->rq_enc_pages);
1779 	rqstp->rq_release_snd_buf = NULL;
1780 }
1781 
1782 static int
1783 alloc_enc_pages(struct rpc_rqst *rqstp)
1784 {
1785 	struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1786 	int first, last, i;
1787 
1788 	if (rqstp->rq_release_snd_buf)
1789 		rqstp->rq_release_snd_buf(rqstp);
1790 
1791 	if (snd_buf->page_len == 0) {
1792 		rqstp->rq_enc_pages_num = 0;
1793 		return 0;
1794 	}
1795 
1796 	first = snd_buf->page_base >> PAGE_SHIFT;
1797 	last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT;
1798 	rqstp->rq_enc_pages_num = last - first + 1 + 1;
1799 	rqstp->rq_enc_pages
1800 		= kmalloc_array(rqstp->rq_enc_pages_num,
1801 				sizeof(struct page *),
1802 				GFP_NOFS);
1803 	if (!rqstp->rq_enc_pages)
1804 		goto out;
1805 	for (i=0; i < rqstp->rq_enc_pages_num; i++) {
1806 		rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
1807 		if (rqstp->rq_enc_pages[i] == NULL)
1808 			goto out_free;
1809 	}
1810 	rqstp->rq_release_snd_buf = priv_release_snd_buf;
1811 	return 0;
1812 out_free:
1813 	rqstp->rq_enc_pages_num = i;
1814 	priv_release_snd_buf(rqstp);
1815 out:
1816 	return -EAGAIN;
1817 }
1818 
1819 static int gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1820 			     struct rpc_task *task, struct xdr_stream *xdr)
1821 {
1822 	struct rpc_rqst *rqstp = task->tk_rqstp;
1823 	struct xdr_buf	*snd_buf = &rqstp->rq_snd_buf;
1824 	u32		pad, offset, maj_stat;
1825 	int		status;
1826 	__be32		*p, *opaque_len;
1827 	struct page	**inpages;
1828 	int		first;
1829 	struct kvec	*iov;
1830 
1831 	status = -EIO;
1832 	p = xdr_reserve_space(xdr, 2 * sizeof(*p));
1833 	if (!p)
1834 		goto wrap_failed;
1835 	opaque_len = p++;
1836 	*p = cpu_to_be32(rqstp->rq_seqno);
1837 
1838 	if (rpcauth_wrap_req_encode(task, xdr))
1839 		goto wrap_failed;
1840 
1841 	status = alloc_enc_pages(rqstp);
1842 	if (unlikely(status))
1843 		goto wrap_failed;
1844 	first = snd_buf->page_base >> PAGE_SHIFT;
1845 	inpages = snd_buf->pages + first;
1846 	snd_buf->pages = rqstp->rq_enc_pages;
1847 	snd_buf->page_base -= first << PAGE_SHIFT;
1848 	/*
1849 	 * Move the tail into its own page, in case gss_wrap needs
1850 	 * more space in the head when wrapping.
1851 	 *
1852 	 * Still... Why can't gss_wrap just slide the tail down?
1853 	 */
1854 	if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
1855 		char *tmp;
1856 
1857 		tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
1858 		memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
1859 		snd_buf->tail[0].iov_base = tmp;
1860 	}
1861 	offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1862 	maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1863 	/* slack space should prevent this ever happening: */
1864 	if (unlikely(snd_buf->len > snd_buf->buflen))
1865 		goto wrap_failed;
1866 	/* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1867 	 * done anyway, so it's safe to put the request on the wire: */
1868 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1869 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1870 	else if (maj_stat)
1871 		goto bad_wrap;
1872 
1873 	*opaque_len = cpu_to_be32(snd_buf->len - offset);
1874 	/* guess whether the pad goes into the head or the tail: */
1875 	if (snd_buf->page_len || snd_buf->tail[0].iov_len)
1876 		iov = snd_buf->tail;
1877 	else
1878 		iov = snd_buf->head;
1879 	p = iov->iov_base + iov->iov_len;
1880 	pad = 3 - ((snd_buf->len - offset - 1) & 3);
1881 	memset(p, 0, pad);
1882 	iov->iov_len += pad;
1883 	snd_buf->len += pad;
1884 
1885 	return 0;
1886 wrap_failed:
1887 	return status;
1888 bad_wrap:
1889 	trace_rpcgss_wrap(task, maj_stat);
1890 	return -EIO;
1891 }
1892 
1893 static int gss_wrap_req(struct rpc_task *task, struct xdr_stream *xdr)
1894 {
1895 	struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1896 	struct gss_cred	*gss_cred = container_of(cred, struct gss_cred,
1897 			gc_base);
1898 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
1899 	int status;
1900 
1901 	status = -EIO;
1902 	if (ctx->gc_proc != RPC_GSS_PROC_DATA) {
1903 		/* The spec seems a little ambiguous here, but I think that not
1904 		 * wrapping context destruction requests makes the most sense.
1905 		 */
1906 		status = rpcauth_wrap_req_encode(task, xdr);
1907 		goto out;
1908 	}
1909 	switch (gss_cred->gc_service) {
1910 	case RPC_GSS_SVC_NONE:
1911 		status = rpcauth_wrap_req_encode(task, xdr);
1912 		break;
1913 	case RPC_GSS_SVC_INTEGRITY:
1914 		status = gss_wrap_req_integ(cred, ctx, task, xdr);
1915 		break;
1916 	case RPC_GSS_SVC_PRIVACY:
1917 		status = gss_wrap_req_priv(cred, ctx, task, xdr);
1918 		break;
1919 	default:
1920 		status = -EIO;
1921 	}
1922 out:
1923 	gss_put_ctx(ctx);
1924 	return status;
1925 }
1926 
1927 static int
1928 gss_unwrap_resp_auth(struct rpc_cred *cred)
1929 {
1930 	struct rpc_auth *auth = cred->cr_auth;
1931 
1932 	auth->au_rslack = auth->au_verfsize;
1933 	auth->au_ralign = auth->au_verfsize;
1934 	return 0;
1935 }
1936 
1937 static int
1938 gss_unwrap_resp_integ(struct rpc_task *task, struct rpc_cred *cred,
1939 		      struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
1940 		      struct xdr_stream *xdr)
1941 {
1942 	struct xdr_buf integ_buf, *rcv_buf = &rqstp->rq_rcv_buf;
1943 	u32 data_offset, mic_offset, integ_len, maj_stat;
1944 	struct rpc_auth *auth = cred->cr_auth;
1945 	struct xdr_netobj mic;
1946 	__be32 *p;
1947 
1948 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1949 	if (unlikely(!p))
1950 		goto unwrap_failed;
1951 	integ_len = be32_to_cpup(p++);
1952 	if (integ_len & 3)
1953 		goto unwrap_failed;
1954 	data_offset = (u8 *)(p) - (u8 *)rcv_buf->head[0].iov_base;
1955 	mic_offset = integ_len + data_offset;
1956 	if (mic_offset > rcv_buf->len)
1957 		goto unwrap_failed;
1958 	if (be32_to_cpup(p) != rqstp->rq_seqno)
1959 		goto bad_seqno;
1960 
1961 	if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, integ_len))
1962 		goto unwrap_failed;
1963 	if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset))
1964 		goto unwrap_failed;
1965 	maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic);
1966 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
1967 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
1968 	if (maj_stat != GSS_S_COMPLETE)
1969 		goto bad_mic;
1970 
1971 	auth->au_rslack = auth->au_verfsize + 2 + 1 + XDR_QUADLEN(mic.len);
1972 	auth->au_ralign = auth->au_verfsize + 2;
1973 	return 0;
1974 unwrap_failed:
1975 	trace_rpcgss_unwrap_failed(task);
1976 	return -EIO;
1977 bad_seqno:
1978 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(p));
1979 	return -EIO;
1980 bad_mic:
1981 	trace_rpcgss_verify_mic(task, maj_stat);
1982 	return -EIO;
1983 }
1984 
1985 static int
1986 gss_unwrap_resp_priv(struct rpc_task *task, struct rpc_cred *cred,
1987 		     struct gss_cl_ctx *ctx, struct rpc_rqst *rqstp,
1988 		     struct xdr_stream *xdr)
1989 {
1990 	struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
1991 	struct kvec *head = rqstp->rq_rcv_buf.head;
1992 	struct rpc_auth *auth = cred->cr_auth;
1993 	unsigned int savedlen = rcv_buf->len;
1994 	u32 offset, opaque_len, maj_stat;
1995 	__be32 *p;
1996 
1997 	p = xdr_inline_decode(xdr, 2 * sizeof(*p));
1998 	if (unlikely(!p))
1999 		goto unwrap_failed;
2000 	opaque_len = be32_to_cpup(p++);
2001 	offset = (u8 *)(p) - (u8 *)head->iov_base;
2002 	if (offset + opaque_len > rcv_buf->len)
2003 		goto unwrap_failed;
2004 	rcv_buf->len = offset + opaque_len;
2005 
2006 	maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf);
2007 	if (maj_stat == GSS_S_CONTEXT_EXPIRED)
2008 		clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
2009 	if (maj_stat != GSS_S_COMPLETE)
2010 		goto bad_unwrap;
2011 	/* gss_unwrap decrypted the sequence number */
2012 	if (be32_to_cpup(p++) != rqstp->rq_seqno)
2013 		goto bad_seqno;
2014 
2015 	/* gss_unwrap redacts the opaque blob from the head iovec.
2016 	 * rcv_buf has changed, thus the stream needs to be reset.
2017 	 */
2018 	xdr_init_decode(xdr, rcv_buf, p, rqstp);
2019 
2020 	auth->au_rslack = auth->au_verfsize + 2 +
2021 			  XDR_QUADLEN(savedlen - rcv_buf->len);
2022 	auth->au_ralign = auth->au_verfsize + 2 +
2023 			  XDR_QUADLEN(savedlen - rcv_buf->len);
2024 	return 0;
2025 unwrap_failed:
2026 	trace_rpcgss_unwrap_failed(task);
2027 	return -EIO;
2028 bad_seqno:
2029 	trace_rpcgss_bad_seqno(task, rqstp->rq_seqno, be32_to_cpup(--p));
2030 	return -EIO;
2031 bad_unwrap:
2032 	trace_rpcgss_unwrap(task, maj_stat);
2033 	return -EIO;
2034 }
2035 
2036 static bool
2037 gss_seq_is_newer(u32 new, u32 old)
2038 {
2039 	return (s32)(new - old) > 0;
2040 }
2041 
2042 static bool
2043 gss_xmit_need_reencode(struct rpc_task *task)
2044 {
2045 	struct rpc_rqst *req = task->tk_rqstp;
2046 	struct rpc_cred *cred = req->rq_cred;
2047 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2048 	u32 win, seq_xmit = 0;
2049 	bool ret = true;
2050 
2051 	if (!ctx)
2052 		goto out;
2053 
2054 	if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
2055 		goto out_ctx;
2056 
2057 	seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
2058 	while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
2059 		u32 tmp = seq_xmit;
2060 
2061 		seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
2062 		if (seq_xmit == tmp) {
2063 			ret = false;
2064 			goto out_ctx;
2065 		}
2066 	}
2067 
2068 	win = ctx->gc_win;
2069 	if (win > 0)
2070 		ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
2071 
2072 out_ctx:
2073 	gss_put_ctx(ctx);
2074 out:
2075 	trace_rpcgss_need_reencode(task, seq_xmit, ret);
2076 	return ret;
2077 }
2078 
2079 static int
2080 gss_unwrap_resp(struct rpc_task *task, struct xdr_stream *xdr)
2081 {
2082 	struct rpc_rqst *rqstp = task->tk_rqstp;
2083 	struct rpc_cred *cred = rqstp->rq_cred;
2084 	struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
2085 			gc_base);
2086 	struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
2087 	int status = -EIO;
2088 
2089 	if (ctx->gc_proc != RPC_GSS_PROC_DATA)
2090 		goto out_decode;
2091 	switch (gss_cred->gc_service) {
2092 	case RPC_GSS_SVC_NONE:
2093 		status = gss_unwrap_resp_auth(cred);
2094 		break;
2095 	case RPC_GSS_SVC_INTEGRITY:
2096 		status = gss_unwrap_resp_integ(task, cred, ctx, rqstp, xdr);
2097 		break;
2098 	case RPC_GSS_SVC_PRIVACY:
2099 		status = gss_unwrap_resp_priv(task, cred, ctx, rqstp, xdr);
2100 		break;
2101 	}
2102 	if (status)
2103 		goto out;
2104 
2105 out_decode:
2106 	status = rpcauth_unwrap_resp_decode(task, xdr);
2107 out:
2108 	gss_put_ctx(ctx);
2109 	return status;
2110 }
2111 
2112 static const struct rpc_authops authgss_ops = {
2113 	.owner		= THIS_MODULE,
2114 	.au_flavor	= RPC_AUTH_GSS,
2115 	.au_name	= "RPCSEC_GSS",
2116 	.create		= gss_create,
2117 	.destroy	= gss_destroy,
2118 	.hash_cred	= gss_hash_cred,
2119 	.lookup_cred	= gss_lookup_cred,
2120 	.crcreate	= gss_create_cred,
2121 	.list_pseudoflavors = gss_mech_list_pseudoflavors,
2122 	.info2flavor	= gss_mech_info2flavor,
2123 	.flavor2info	= gss_mech_flavor2info,
2124 };
2125 
2126 static const struct rpc_credops gss_credops = {
2127 	.cr_name		= "AUTH_GSS",
2128 	.crdestroy		= gss_destroy_cred,
2129 	.cr_init		= gss_cred_init,
2130 	.crmatch		= gss_match,
2131 	.crmarshal		= gss_marshal,
2132 	.crrefresh		= gss_refresh,
2133 	.crvalidate		= gss_validate,
2134 	.crwrap_req		= gss_wrap_req,
2135 	.crunwrap_resp		= gss_unwrap_resp,
2136 	.crkey_timeout		= gss_key_timeout,
2137 	.crstringify_acceptor	= gss_stringify_acceptor,
2138 	.crneed_reencode	= gss_xmit_need_reencode,
2139 };
2140 
2141 static const struct rpc_credops gss_nullops = {
2142 	.cr_name		= "AUTH_GSS",
2143 	.crdestroy		= gss_destroy_nullcred,
2144 	.crmatch		= gss_match,
2145 	.crmarshal		= gss_marshal,
2146 	.crrefresh		= gss_refresh_null,
2147 	.crvalidate		= gss_validate,
2148 	.crwrap_req		= gss_wrap_req,
2149 	.crunwrap_resp		= gss_unwrap_resp,
2150 	.crstringify_acceptor	= gss_stringify_acceptor,
2151 };
2152 
2153 static const struct rpc_pipe_ops gss_upcall_ops_v0 = {
2154 	.upcall		= gss_v0_upcall,
2155 	.downcall	= gss_pipe_downcall,
2156 	.destroy_msg	= gss_pipe_destroy_msg,
2157 	.open_pipe	= gss_pipe_open_v0,
2158 	.release_pipe	= gss_pipe_release,
2159 };
2160 
2161 static const struct rpc_pipe_ops gss_upcall_ops_v1 = {
2162 	.upcall		= gss_v1_upcall,
2163 	.downcall	= gss_pipe_downcall,
2164 	.destroy_msg	= gss_pipe_destroy_msg,
2165 	.open_pipe	= gss_pipe_open_v1,
2166 	.release_pipe	= gss_pipe_release,
2167 };
2168 
2169 static __net_init int rpcsec_gss_init_net(struct net *net)
2170 {
2171 	return gss_svc_init_net(net);
2172 }
2173 
2174 static __net_exit void rpcsec_gss_exit_net(struct net *net)
2175 {
2176 	gss_svc_shutdown_net(net);
2177 }
2178 
2179 static struct pernet_operations rpcsec_gss_net_ops = {
2180 	.init = rpcsec_gss_init_net,
2181 	.exit = rpcsec_gss_exit_net,
2182 };
2183 
2184 /*
2185  * Initialize RPCSEC_GSS module
2186  */
2187 static int __init init_rpcsec_gss(void)
2188 {
2189 	int err = 0;
2190 
2191 	err = rpcauth_register(&authgss_ops);
2192 	if (err)
2193 		goto out;
2194 	err = gss_svc_init();
2195 	if (err)
2196 		goto out_unregister;
2197 	err = register_pernet_subsys(&rpcsec_gss_net_ops);
2198 	if (err)
2199 		goto out_svc_exit;
2200 	rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version");
2201 	return 0;
2202 out_svc_exit:
2203 	gss_svc_shutdown();
2204 out_unregister:
2205 	rpcauth_unregister(&authgss_ops);
2206 out:
2207 	return err;
2208 }
2209 
2210 static void __exit exit_rpcsec_gss(void)
2211 {
2212 	unregister_pernet_subsys(&rpcsec_gss_net_ops);
2213 	gss_svc_shutdown();
2214 	rpcauth_unregister(&authgss_ops);
2215 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
2216 }
2217 
2218 MODULE_ALIAS("rpc-auth-6");
2219 MODULE_LICENSE("GPL");
2220 module_param_named(expired_cred_retry_delay,
2221 		   gss_expired_cred_retry_delay,
2222 		   uint, 0644);
2223 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until "
2224 		"the RPC engine retries an expired credential");
2225 
2226 module_param_named(key_expire_timeo,
2227 		   gss_key_expire_timeo,
2228 		   uint, 0644);
2229 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a "
2230 		"credential keys lifetime where the NFS layer cleans up "
2231 		"prior to key expiration");
2232 
2233 module_init(init_rpcsec_gss)
2234 module_exit(exit_rpcsec_gss)
2235