xref: /openbmc/linux/net/sunrpc/svcauth_unix.c (revision 6181b0c6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/types.h>
3 #include <linux/sched.h>
4 #include <linux/module.h>
5 #include <linux/sunrpc/types.h>
6 #include <linux/sunrpc/xdr.h>
7 #include <linux/sunrpc/svcsock.h>
8 #include <linux/sunrpc/svcauth.h>
9 #include <linux/sunrpc/gss_api.h>
10 #include <linux/sunrpc/addr.h>
11 #include <linux/err.h>
12 #include <linux/seq_file.h>
13 #include <linux/hash.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
16 #include <net/sock.h>
17 #include <net/ipv6.h>
18 #include <linux/kernel.h>
19 #include <linux/user_namespace.h>
20 #define RPCDBG_FACILITY	RPCDBG_AUTH
21 
22 
23 #include "netns.h"
24 
25 /*
26  * AUTHUNIX and AUTHNULL credentials are both handled here.
27  * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
28  * are always nobody (-2).  i.e. we do the same IP address checks for
29  * AUTHNULL as for AUTHUNIX, and that is done here.
30  */
31 
32 
33 struct unix_domain {
34 	struct auth_domain	h;
35 	/* other stuff later */
36 };
37 
38 extern struct auth_ops svcauth_null;
39 extern struct auth_ops svcauth_unix;
40 extern struct auth_ops svcauth_tls;
41 
42 static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
43 {
44 	struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
45 	struct unix_domain *ud = container_of(dom, struct unix_domain, h);
46 
47 	kfree(dom->name);
48 	kfree(ud);
49 }
50 
51 static void svcauth_unix_domain_release(struct auth_domain *dom)
52 {
53 	call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
54 }
55 
56 struct auth_domain *unix_domain_find(char *name)
57 {
58 	struct auth_domain *rv;
59 	struct unix_domain *new = NULL;
60 
61 	rv = auth_domain_find(name);
62 	while(1) {
63 		if (rv) {
64 			if (new && rv != &new->h)
65 				svcauth_unix_domain_release(&new->h);
66 
67 			if (rv->flavour != &svcauth_unix) {
68 				auth_domain_put(rv);
69 				return NULL;
70 			}
71 			return rv;
72 		}
73 
74 		new = kmalloc(sizeof(*new), GFP_KERNEL);
75 		if (new == NULL)
76 			return NULL;
77 		kref_init(&new->h.ref);
78 		new->h.name = kstrdup(name, GFP_KERNEL);
79 		if (new->h.name == NULL) {
80 			kfree(new);
81 			return NULL;
82 		}
83 		new->h.flavour = &svcauth_unix;
84 		rv = auth_domain_lookup(name, &new->h);
85 	}
86 }
87 EXPORT_SYMBOL_GPL(unix_domain_find);
88 
89 
90 /**************************************************
91  * cache for IP address to unix_domain
92  * as needed by AUTH_UNIX
93  */
94 #define	IP_HASHBITS	8
95 #define	IP_HASHMAX	(1<<IP_HASHBITS)
96 
97 struct ip_map {
98 	struct cache_head	h;
99 	char			m_class[8]; /* e.g. "nfsd" */
100 	struct in6_addr		m_addr;
101 	struct unix_domain	*m_client;
102 	struct rcu_head		m_rcu;
103 };
104 
105 static void ip_map_put(struct kref *kref)
106 {
107 	struct cache_head *item = container_of(kref, struct cache_head, ref);
108 	struct ip_map *im = container_of(item, struct ip_map,h);
109 
110 	if (test_bit(CACHE_VALID, &item->flags) &&
111 	    !test_bit(CACHE_NEGATIVE, &item->flags))
112 		auth_domain_put(&im->m_client->h);
113 	kfree_rcu(im, m_rcu);
114 }
115 
116 static inline int hash_ip6(const struct in6_addr *ip)
117 {
118 	return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
119 }
120 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
121 {
122 	struct ip_map *orig = container_of(corig, struct ip_map, h);
123 	struct ip_map *new = container_of(cnew, struct ip_map, h);
124 	return strcmp(orig->m_class, new->m_class) == 0 &&
125 	       ipv6_addr_equal(&orig->m_addr, &new->m_addr);
126 }
127 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
128 {
129 	struct ip_map *new = container_of(cnew, struct ip_map, h);
130 	struct ip_map *item = container_of(citem, struct ip_map, h);
131 
132 	strcpy(new->m_class, item->m_class);
133 	new->m_addr = item->m_addr;
134 }
135 static void update(struct cache_head *cnew, struct cache_head *citem)
136 {
137 	struct ip_map *new = container_of(cnew, struct ip_map, h);
138 	struct ip_map *item = container_of(citem, struct ip_map, h);
139 
140 	kref_get(&item->m_client->h.ref);
141 	new->m_client = item->m_client;
142 }
143 static struct cache_head *ip_map_alloc(void)
144 {
145 	struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
146 	if (i)
147 		return &i->h;
148 	else
149 		return NULL;
150 }
151 
152 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
153 {
154 	return sunrpc_cache_pipe_upcall(cd, h);
155 }
156 
157 static void ip_map_request(struct cache_detail *cd,
158 				  struct cache_head *h,
159 				  char **bpp, int *blen)
160 {
161 	char text_addr[40];
162 	struct ip_map *im = container_of(h, struct ip_map, h);
163 
164 	if (ipv6_addr_v4mapped(&(im->m_addr))) {
165 		snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
166 	} else {
167 		snprintf(text_addr, 40, "%pI6", &im->m_addr);
168 	}
169 	qword_add(bpp, blen, im->m_class);
170 	qword_add(bpp, blen, text_addr);
171 	(*bpp)[-1] = '\n';
172 }
173 
174 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
175 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry);
176 
177 static int ip_map_parse(struct cache_detail *cd,
178 			  char *mesg, int mlen)
179 {
180 	/* class ipaddress [domainname] */
181 	/* should be safe just to use the start of the input buffer
182 	 * for scratch: */
183 	char *buf = mesg;
184 	int len;
185 	char class[8];
186 	union {
187 		struct sockaddr		sa;
188 		struct sockaddr_in	s4;
189 		struct sockaddr_in6	s6;
190 	} address;
191 	struct sockaddr_in6 sin6;
192 	int err;
193 
194 	struct ip_map *ipmp;
195 	struct auth_domain *dom;
196 	time64_t expiry;
197 
198 	if (mesg[mlen-1] != '\n')
199 		return -EINVAL;
200 	mesg[mlen-1] = 0;
201 
202 	/* class */
203 	len = qword_get(&mesg, class, sizeof(class));
204 	if (len <= 0) return -EINVAL;
205 
206 	/* ip address */
207 	len = qword_get(&mesg, buf, mlen);
208 	if (len <= 0) return -EINVAL;
209 
210 	if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
211 		return -EINVAL;
212 	switch (address.sa.sa_family) {
213 	case AF_INET:
214 		/* Form a mapped IPv4 address in sin6 */
215 		sin6.sin6_family = AF_INET6;
216 		ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
217 				&sin6.sin6_addr);
218 		break;
219 #if IS_ENABLED(CONFIG_IPV6)
220 	case AF_INET6:
221 		memcpy(&sin6, &address.s6, sizeof(sin6));
222 		break;
223 #endif
224 	default:
225 		return -EINVAL;
226 	}
227 
228 	expiry = get_expiry(&mesg);
229 	if (expiry ==0)
230 		return -EINVAL;
231 
232 	/* domainname, or empty for NEGATIVE */
233 	len = qword_get(&mesg, buf, mlen);
234 	if (len < 0) return -EINVAL;
235 
236 	if (len) {
237 		dom = unix_domain_find(buf);
238 		if (dom == NULL)
239 			return -ENOENT;
240 	} else
241 		dom = NULL;
242 
243 	/* IPv6 scope IDs are ignored for now */
244 	ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
245 	if (ipmp) {
246 		err = __ip_map_update(cd, ipmp,
247 			     container_of(dom, struct unix_domain, h),
248 			     expiry);
249 	} else
250 		err = -ENOMEM;
251 
252 	if (dom)
253 		auth_domain_put(dom);
254 
255 	cache_flush();
256 	return err;
257 }
258 
259 static int ip_map_show(struct seq_file *m,
260 		       struct cache_detail *cd,
261 		       struct cache_head *h)
262 {
263 	struct ip_map *im;
264 	struct in6_addr addr;
265 	char *dom = "-no-domain-";
266 
267 	if (h == NULL) {
268 		seq_puts(m, "#class IP domain\n");
269 		return 0;
270 	}
271 	im = container_of(h, struct ip_map, h);
272 	/* class addr domain */
273 	addr = im->m_addr;
274 
275 	if (test_bit(CACHE_VALID, &h->flags) &&
276 	    !test_bit(CACHE_NEGATIVE, &h->flags))
277 		dom = im->m_client->h.name;
278 
279 	if (ipv6_addr_v4mapped(&addr)) {
280 		seq_printf(m, "%s %pI4 %s\n",
281 			im->m_class, &addr.s6_addr32[3], dom);
282 	} else {
283 		seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
284 	}
285 	return 0;
286 }
287 
288 
289 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
290 		struct in6_addr *addr)
291 {
292 	struct ip_map ip;
293 	struct cache_head *ch;
294 
295 	strcpy(ip.m_class, class);
296 	ip.m_addr = *addr;
297 	ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
298 				     hash_str(class, IP_HASHBITS) ^
299 				     hash_ip6(addr));
300 
301 	if (ch)
302 		return container_of(ch, struct ip_map, h);
303 	else
304 		return NULL;
305 }
306 
307 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
308 		struct unix_domain *udom, time64_t expiry)
309 {
310 	struct ip_map ip;
311 	struct cache_head *ch;
312 
313 	ip.m_client = udom;
314 	ip.h.flags = 0;
315 	if (!udom)
316 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
317 	ip.h.expiry_time = expiry;
318 	ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
319 				 hash_str(ipm->m_class, IP_HASHBITS) ^
320 				 hash_ip6(&ipm->m_addr));
321 	if (!ch)
322 		return -ENOMEM;
323 	cache_put(ch, cd);
324 	return 0;
325 }
326 
327 void svcauth_unix_purge(struct net *net)
328 {
329 	struct sunrpc_net *sn;
330 
331 	sn = net_generic(net, sunrpc_net_id);
332 	cache_purge(sn->ip_map_cache);
333 }
334 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
335 
336 static inline struct ip_map *
337 ip_map_cached_get(struct svc_xprt *xprt)
338 {
339 	struct ip_map *ipm = NULL;
340 	struct sunrpc_net *sn;
341 
342 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
343 		spin_lock(&xprt->xpt_lock);
344 		ipm = xprt->xpt_auth_cache;
345 		if (ipm != NULL) {
346 			sn = net_generic(xprt->xpt_net, sunrpc_net_id);
347 			if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
348 				/*
349 				 * The entry has been invalidated since it was
350 				 * remembered, e.g. by a second mount from the
351 				 * same IP address.
352 				 */
353 				xprt->xpt_auth_cache = NULL;
354 				spin_unlock(&xprt->xpt_lock);
355 				cache_put(&ipm->h, sn->ip_map_cache);
356 				return NULL;
357 			}
358 			cache_get(&ipm->h);
359 		}
360 		spin_unlock(&xprt->xpt_lock);
361 	}
362 	return ipm;
363 }
364 
365 static inline void
366 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
367 {
368 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
369 		spin_lock(&xprt->xpt_lock);
370 		if (xprt->xpt_auth_cache == NULL) {
371 			/* newly cached, keep the reference */
372 			xprt->xpt_auth_cache = ipm;
373 			ipm = NULL;
374 		}
375 		spin_unlock(&xprt->xpt_lock);
376 	}
377 	if (ipm) {
378 		struct sunrpc_net *sn;
379 
380 		sn = net_generic(xprt->xpt_net, sunrpc_net_id);
381 		cache_put(&ipm->h, sn->ip_map_cache);
382 	}
383 }
384 
385 void
386 svcauth_unix_info_release(struct svc_xprt *xpt)
387 {
388 	struct ip_map *ipm;
389 
390 	ipm = xpt->xpt_auth_cache;
391 	if (ipm != NULL) {
392 		struct sunrpc_net *sn;
393 
394 		sn = net_generic(xpt->xpt_net, sunrpc_net_id);
395 		cache_put(&ipm->h, sn->ip_map_cache);
396 	}
397 }
398 
399 /****************************************************************************
400  * auth.unix.gid cache
401  * simple cache to map a UID to a list of GIDs
402  * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
403  */
404 #define	GID_HASHBITS	8
405 #define	GID_HASHMAX	(1<<GID_HASHBITS)
406 
407 struct unix_gid {
408 	struct cache_head	h;
409 	kuid_t			uid;
410 	struct group_info	*gi;
411 	struct rcu_head		rcu;
412 };
413 
414 static int unix_gid_hash(kuid_t uid)
415 {
416 	return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
417 }
418 
419 static void unix_gid_put(struct kref *kref)
420 {
421 	struct cache_head *item = container_of(kref, struct cache_head, ref);
422 	struct unix_gid *ug = container_of(item, struct unix_gid, h);
423 	if (test_bit(CACHE_VALID, &item->flags) &&
424 	    !test_bit(CACHE_NEGATIVE, &item->flags))
425 		put_group_info(ug->gi);
426 	kfree_rcu(ug, rcu);
427 }
428 
429 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
430 {
431 	struct unix_gid *orig = container_of(corig, struct unix_gid, h);
432 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
433 	return uid_eq(orig->uid, new->uid);
434 }
435 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
436 {
437 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
438 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
439 	new->uid = item->uid;
440 }
441 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
442 {
443 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
444 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
445 
446 	get_group_info(item->gi);
447 	new->gi = item->gi;
448 }
449 static struct cache_head *unix_gid_alloc(void)
450 {
451 	struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
452 	if (g)
453 		return &g->h;
454 	else
455 		return NULL;
456 }
457 
458 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
459 {
460 	return sunrpc_cache_pipe_upcall_timeout(cd, h);
461 }
462 
463 static void unix_gid_request(struct cache_detail *cd,
464 			     struct cache_head *h,
465 			     char **bpp, int *blen)
466 {
467 	char tuid[20];
468 	struct unix_gid *ug = container_of(h, struct unix_gid, h);
469 
470 	snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
471 	qword_add(bpp, blen, tuid);
472 	(*bpp)[-1] = '\n';
473 }
474 
475 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
476 
477 static int unix_gid_parse(struct cache_detail *cd,
478 			char *mesg, int mlen)
479 {
480 	/* uid expiry Ngid gid0 gid1 ... gidN-1 */
481 	int id;
482 	kuid_t uid;
483 	int gids;
484 	int rv;
485 	int i;
486 	int err;
487 	time64_t expiry;
488 	struct unix_gid ug, *ugp;
489 
490 	if (mesg[mlen - 1] != '\n')
491 		return -EINVAL;
492 	mesg[mlen-1] = 0;
493 
494 	rv = get_int(&mesg, &id);
495 	if (rv)
496 		return -EINVAL;
497 	uid = make_kuid(current_user_ns(), id);
498 	ug.uid = uid;
499 
500 	expiry = get_expiry(&mesg);
501 	if (expiry == 0)
502 		return -EINVAL;
503 
504 	rv = get_int(&mesg, &gids);
505 	if (rv || gids < 0 || gids > 8192)
506 		return -EINVAL;
507 
508 	ug.gi = groups_alloc(gids);
509 	if (!ug.gi)
510 		return -ENOMEM;
511 
512 	for (i = 0 ; i < gids ; i++) {
513 		int gid;
514 		kgid_t kgid;
515 		rv = get_int(&mesg, &gid);
516 		err = -EINVAL;
517 		if (rv)
518 			goto out;
519 		kgid = make_kgid(current_user_ns(), gid);
520 		if (!gid_valid(kgid))
521 			goto out;
522 		ug.gi->gid[i] = kgid;
523 	}
524 
525 	groups_sort(ug.gi);
526 	ugp = unix_gid_lookup(cd, uid);
527 	if (ugp) {
528 		struct cache_head *ch;
529 		ug.h.flags = 0;
530 		ug.h.expiry_time = expiry;
531 		ch = sunrpc_cache_update(cd,
532 					 &ug.h, &ugp->h,
533 					 unix_gid_hash(uid));
534 		if (!ch)
535 			err = -ENOMEM;
536 		else {
537 			err = 0;
538 			cache_put(ch, cd);
539 		}
540 	} else
541 		err = -ENOMEM;
542  out:
543 	if (ug.gi)
544 		put_group_info(ug.gi);
545 	return err;
546 }
547 
548 static int unix_gid_show(struct seq_file *m,
549 			 struct cache_detail *cd,
550 			 struct cache_head *h)
551 {
552 	struct user_namespace *user_ns = m->file->f_cred->user_ns;
553 	struct unix_gid *ug;
554 	int i;
555 	int glen;
556 
557 	if (h == NULL) {
558 		seq_puts(m, "#uid cnt: gids...\n");
559 		return 0;
560 	}
561 	ug = container_of(h, struct unix_gid, h);
562 	if (test_bit(CACHE_VALID, &h->flags) &&
563 	    !test_bit(CACHE_NEGATIVE, &h->flags))
564 		glen = ug->gi->ngroups;
565 	else
566 		glen = 0;
567 
568 	seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
569 	for (i = 0; i < glen; i++)
570 		seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
571 	seq_printf(m, "\n");
572 	return 0;
573 }
574 
575 static const struct cache_detail unix_gid_cache_template = {
576 	.owner		= THIS_MODULE,
577 	.hash_size	= GID_HASHMAX,
578 	.name		= "auth.unix.gid",
579 	.cache_put	= unix_gid_put,
580 	.cache_upcall	= unix_gid_upcall,
581 	.cache_request	= unix_gid_request,
582 	.cache_parse	= unix_gid_parse,
583 	.cache_show	= unix_gid_show,
584 	.match		= unix_gid_match,
585 	.init		= unix_gid_init,
586 	.update		= unix_gid_update,
587 	.alloc		= unix_gid_alloc,
588 };
589 
590 int unix_gid_cache_create(struct net *net)
591 {
592 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
593 	struct cache_detail *cd;
594 	int err;
595 
596 	cd = cache_create_net(&unix_gid_cache_template, net);
597 	if (IS_ERR(cd))
598 		return PTR_ERR(cd);
599 	err = cache_register_net(cd, net);
600 	if (err) {
601 		cache_destroy_net(cd, net);
602 		return err;
603 	}
604 	sn->unix_gid_cache = cd;
605 	return 0;
606 }
607 
608 void unix_gid_cache_destroy(struct net *net)
609 {
610 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
611 	struct cache_detail *cd = sn->unix_gid_cache;
612 
613 	sn->unix_gid_cache = NULL;
614 	cache_purge(cd);
615 	cache_unregister_net(cd, net);
616 	cache_destroy_net(cd, net);
617 }
618 
619 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
620 {
621 	struct unix_gid ug;
622 	struct cache_head *ch;
623 
624 	ug.uid = uid;
625 	ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
626 	if (ch)
627 		return container_of(ch, struct unix_gid, h);
628 	else
629 		return NULL;
630 }
631 
632 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
633 {
634 	struct unix_gid *ug;
635 	struct group_info *gi;
636 	int ret;
637 	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
638 					    sunrpc_net_id);
639 
640 	ug = unix_gid_lookup(sn->unix_gid_cache, uid);
641 	if (!ug)
642 		return ERR_PTR(-EAGAIN);
643 	ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
644 	switch (ret) {
645 	case -ENOENT:
646 		return ERR_PTR(-ENOENT);
647 	case -ETIMEDOUT:
648 		return ERR_PTR(-ESHUTDOWN);
649 	case 0:
650 		gi = get_group_info(ug->gi);
651 		cache_put(&ug->h, sn->unix_gid_cache);
652 		return gi;
653 	default:
654 		return ERR_PTR(-EAGAIN);
655 	}
656 }
657 
658 int
659 svcauth_unix_set_client(struct svc_rqst *rqstp)
660 {
661 	struct sockaddr_in *sin;
662 	struct sockaddr_in6 *sin6, sin6_storage;
663 	struct ip_map *ipm;
664 	struct group_info *gi;
665 	struct svc_cred *cred = &rqstp->rq_cred;
666 	struct svc_xprt *xprt = rqstp->rq_xprt;
667 	struct net *net = xprt->xpt_net;
668 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
669 
670 	switch (rqstp->rq_addr.ss_family) {
671 	case AF_INET:
672 		sin = svc_addr_in(rqstp);
673 		sin6 = &sin6_storage;
674 		ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
675 		break;
676 	case AF_INET6:
677 		sin6 = svc_addr_in6(rqstp);
678 		break;
679 	default:
680 		BUG();
681 	}
682 
683 	rqstp->rq_client = NULL;
684 	if (rqstp->rq_proc == 0)
685 		goto out;
686 
687 	rqstp->rq_auth_stat = rpc_autherr_badcred;
688 	ipm = ip_map_cached_get(xprt);
689 	if (ipm == NULL)
690 		ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
691 				    &sin6->sin6_addr);
692 
693 	if (ipm == NULL)
694 		return SVC_DENIED;
695 
696 	switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
697 		default:
698 			BUG();
699 		case -ETIMEDOUT:
700 			return SVC_CLOSE;
701 		case -EAGAIN:
702 			return SVC_DROP;
703 		case -ENOENT:
704 			return SVC_DENIED;
705 		case 0:
706 			rqstp->rq_client = &ipm->m_client->h;
707 			kref_get(&rqstp->rq_client->ref);
708 			ip_map_cached_put(xprt, ipm);
709 			break;
710 	}
711 
712 	gi = unix_gid_find(cred->cr_uid, rqstp);
713 	switch (PTR_ERR(gi)) {
714 	case -EAGAIN:
715 		return SVC_DROP;
716 	case -ESHUTDOWN:
717 		return SVC_CLOSE;
718 	case -ENOENT:
719 		break;
720 	default:
721 		put_group_info(cred->cr_group_info);
722 		cred->cr_group_info = gi;
723 	}
724 
725 out:
726 	rqstp->rq_auth_stat = rpc_auth_ok;
727 	return SVC_OK;
728 }
729 
730 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
731 
732 /**
733  * svcauth_null_accept - Decode and validate incoming RPC_AUTH_NULL credential
734  * @rqstp: RPC transaction
735  *
736  * Return values:
737  *   %SVC_OK: Both credential and verifier are valid
738  *   %SVC_DENIED: Credential or verifier is not valid
739  *   %SVC_GARBAGE: Failed to decode credential or verifier
740  *   %SVC_CLOSE: Temporary failure
741  *
742  * rqstp->rq_auth_stat is set as mandated by RFC 5531.
743  */
744 static int
745 svcauth_null_accept(struct svc_rqst *rqstp)
746 {
747 	struct kvec	*resv = &rqstp->rq_res.head[0];
748 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
749 	struct svc_cred	*cred = &rqstp->rq_cred;
750 	u32 flavor, len;
751 	void *body;
752 
753 	svcxdr_init_decode(rqstp);
754 
755 	/* Length of Call's credential body field: */
756 	if (xdr_stream_decode_u32(xdr, &len) < 0)
757 		return SVC_GARBAGE;
758 	if (len != 0) {
759 		rqstp->rq_auth_stat = rpc_autherr_badcred;
760 		return SVC_DENIED;
761 	}
762 
763 	/* Call's verf field: */
764 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
765 		return SVC_GARBAGE;
766 	if (flavor != RPC_AUTH_NULL || len != 0) {
767 		rqstp->rq_auth_stat = rpc_autherr_badverf;
768 		return SVC_DENIED;
769 	}
770 
771 	/* Signal that mapping to nobody uid/gid is required */
772 	cred->cr_uid = INVALID_UID;
773 	cred->cr_gid = INVALID_GID;
774 	cred->cr_group_info = groups_alloc(0);
775 	if (cred->cr_group_info == NULL)
776 		return SVC_CLOSE; /* kmalloc failure - client must retry */
777 
778 	/* Put NULL verifier */
779 	svc_putnl(resv, RPC_AUTH_NULL);
780 	svc_putnl(resv, 0);
781 
782 	rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
783 	return SVC_OK;
784 }
785 
786 static int
787 svcauth_null_release(struct svc_rqst *rqstp)
788 {
789 	if (rqstp->rq_client)
790 		auth_domain_put(rqstp->rq_client);
791 	rqstp->rq_client = NULL;
792 	if (rqstp->rq_cred.cr_group_info)
793 		put_group_info(rqstp->rq_cred.cr_group_info);
794 	rqstp->rq_cred.cr_group_info = NULL;
795 
796 	return 0; /* don't drop */
797 }
798 
799 
800 struct auth_ops svcauth_null = {
801 	.name		= "null",
802 	.owner		= THIS_MODULE,
803 	.flavour	= RPC_AUTH_NULL,
804 	.accept 	= svcauth_null_accept,
805 	.release	= svcauth_null_release,
806 	.set_client	= svcauth_unix_set_client,
807 };
808 
809 
810 static int
811 svcauth_tls_accept(struct svc_rqst *rqstp)
812 {
813 	struct svc_cred	*cred = &rqstp->rq_cred;
814 	struct kvec *argv = rqstp->rq_arg.head;
815 	struct kvec *resv = rqstp->rq_res.head;
816 
817 	if (argv->iov_len < XDR_UNIT * 3)
818 		return SVC_GARBAGE;
819 
820 	/* Call's cred length */
821 	if (svc_getu32(argv) != xdr_zero) {
822 		rqstp->rq_auth_stat = rpc_autherr_badcred;
823 		return SVC_DENIED;
824 	}
825 
826 	/* Call's verifier flavor and its length */
827 	if (svc_getu32(argv) != rpc_auth_null ||
828 	    svc_getu32(argv) != xdr_zero) {
829 		rqstp->rq_auth_stat = rpc_autherr_badverf;
830 		return SVC_DENIED;
831 	}
832 
833 	/* AUTH_TLS is not valid on non-NULL procedures */
834 	if (rqstp->rq_proc != 0) {
835 		rqstp->rq_auth_stat = rpc_autherr_badcred;
836 		return SVC_DENIED;
837 	}
838 
839 	/* Mapping to nobody uid/gid is required */
840 	cred->cr_uid = INVALID_UID;
841 	cred->cr_gid = INVALID_GID;
842 	cred->cr_group_info = groups_alloc(0);
843 	if (cred->cr_group_info == NULL)
844 		return SVC_CLOSE; /* kmalloc failure - client must retry */
845 
846 	/* Reply's verifier */
847 	svc_putnl(resv, RPC_AUTH_NULL);
848 	if (rqstp->rq_xprt->xpt_ops->xpo_start_tls) {
849 		svc_putnl(resv, 8);
850 		memcpy(resv->iov_base + resv->iov_len, "STARTTLS", 8);
851 		resv->iov_len += 8;
852 	} else
853 		svc_putnl(resv, 0);
854 
855 	rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS;
856 	svcxdr_init_decode(rqstp);
857 	return SVC_OK;
858 }
859 
860 struct auth_ops svcauth_tls = {
861 	.name		= "tls",
862 	.owner		= THIS_MODULE,
863 	.flavour	= RPC_AUTH_TLS,
864 	.accept 	= svcauth_tls_accept,
865 	.release	= svcauth_null_release,
866 	.set_client	= svcauth_unix_set_client,
867 };
868 
869 
870 /**
871  * svcauth_unix_accept - Decode and validate incoming RPC_AUTH_SYS credential
872  * @rqstp: RPC transaction
873  *
874  * Return values:
875  *   %SVC_OK: Both credential and verifier are valid
876  *   %SVC_DENIED: Credential or verifier is not valid
877  *   %SVC_GARBAGE: Failed to decode credential or verifier
878  *   %SVC_CLOSE: Temporary failure
879  *
880  * rqstp->rq_auth_stat is set as mandated by RFC 5531.
881  */
882 static int
883 svcauth_unix_accept(struct svc_rqst *rqstp)
884 {
885 	struct kvec	*resv = &rqstp->rq_res.head[0];
886 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
887 	struct svc_cred	*cred = &rqstp->rq_cred;
888 	struct user_namespace *userns;
889 	u32 flavor, len, i;
890 	void *body;
891 	__be32 *p;
892 
893 	svcxdr_init_decode(rqstp);
894 
895 	/*
896 	 * This implementation ignores the length of the Call's
897 	 * credential body field and the timestamp and machinename
898 	 * fields.
899 	 */
900 	p = xdr_inline_decode(xdr, XDR_UNIT * 3);
901 	if (!p)
902 		return SVC_GARBAGE;
903 	len = be32_to_cpup(p + 2);
904 	if (len > RPC_MAX_MACHINENAME)
905 		return SVC_GARBAGE;
906 	if (!xdr_inline_decode(xdr, len))
907 		return SVC_GARBAGE;
908 
909 	/*
910 	 * Note: we skip uid_valid()/gid_valid() checks here for
911 	 * backwards compatibility with clients that use -1 id's.
912 	 * Instead, -1 uid or gid is later mapped to the
913 	 * (export-specific) anonymous id by nfsd_setuser.
914 	 * Supplementary gid's will be left alone.
915 	 */
916 	userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ?
917 		rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns;
918 	if (xdr_stream_decode_u32(xdr, &i) < 0)
919 		return SVC_GARBAGE;
920 	cred->cr_uid = make_kuid(userns, i);
921 	if (xdr_stream_decode_u32(xdr, &i) < 0)
922 		return SVC_GARBAGE;
923 	cred->cr_gid = make_kgid(userns, i);
924 
925 	if (xdr_stream_decode_u32(xdr, &len) < 0)
926 		return SVC_GARBAGE;
927 	if (len > UNX_NGROUPS)
928 		goto badcred;
929 	p = xdr_inline_decode(xdr, XDR_UNIT * len);
930 	if (!p)
931 		return SVC_GARBAGE;
932 	cred->cr_group_info = groups_alloc(len);
933 	if (cred->cr_group_info == NULL)
934 		return SVC_CLOSE;
935 	for (i = 0; i < len; i++) {
936 		kgid_t kgid = make_kgid(userns, be32_to_cpup(p++));
937 		cred->cr_group_info->gid[i] = kgid;
938 	}
939 	groups_sort(cred->cr_group_info);
940 
941 	/* Call's verf field: */
942 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
943 		return SVC_GARBAGE;
944 	if (flavor != RPC_AUTH_NULL || len != 0) {
945 		rqstp->rq_auth_stat = rpc_autherr_badverf;
946 		return SVC_DENIED;
947 	}
948 
949 	/* Put NULL verifier */
950 	svc_putnl(resv, RPC_AUTH_NULL);
951 	svc_putnl(resv, 0);
952 
953 	rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
954 	return SVC_OK;
955 
956 badcred:
957 	rqstp->rq_auth_stat = rpc_autherr_badcred;
958 	return SVC_DENIED;
959 }
960 
961 static int
962 svcauth_unix_release(struct svc_rqst *rqstp)
963 {
964 	/* Verifier (such as it is) is already in place.
965 	 */
966 	if (rqstp->rq_client)
967 		auth_domain_put(rqstp->rq_client);
968 	rqstp->rq_client = NULL;
969 	if (rqstp->rq_cred.cr_group_info)
970 		put_group_info(rqstp->rq_cred.cr_group_info);
971 	rqstp->rq_cred.cr_group_info = NULL;
972 
973 	return 0;
974 }
975 
976 
977 struct auth_ops svcauth_unix = {
978 	.name		= "unix",
979 	.owner		= THIS_MODULE,
980 	.flavour	= RPC_AUTH_UNIX,
981 	.accept 	= svcauth_unix_accept,
982 	.release	= svcauth_unix_release,
983 	.domain_release	= svcauth_unix_domain_release,
984 	.set_client	= svcauth_unix_set_client,
985 };
986 
987 static const struct cache_detail ip_map_cache_template = {
988 	.owner		= THIS_MODULE,
989 	.hash_size	= IP_HASHMAX,
990 	.name		= "auth.unix.ip",
991 	.cache_put	= ip_map_put,
992 	.cache_upcall	= ip_map_upcall,
993 	.cache_request	= ip_map_request,
994 	.cache_parse	= ip_map_parse,
995 	.cache_show	= ip_map_show,
996 	.match		= ip_map_match,
997 	.init		= ip_map_init,
998 	.update		= update,
999 	.alloc		= ip_map_alloc,
1000 };
1001 
1002 int ip_map_cache_create(struct net *net)
1003 {
1004 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1005 	struct cache_detail *cd;
1006 	int err;
1007 
1008 	cd = cache_create_net(&ip_map_cache_template, net);
1009 	if (IS_ERR(cd))
1010 		return PTR_ERR(cd);
1011 	err = cache_register_net(cd, net);
1012 	if (err) {
1013 		cache_destroy_net(cd, net);
1014 		return err;
1015 	}
1016 	sn->ip_map_cache = cd;
1017 	return 0;
1018 }
1019 
1020 void ip_map_cache_destroy(struct net *net)
1021 {
1022 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1023 	struct cache_detail *cd = sn->ip_map_cache;
1024 
1025 	sn->ip_map_cache = NULL;
1026 	cache_purge(cd);
1027 	cache_unregister_net(cd, net);
1028 	cache_destroy_net(cd, net);
1029 }
1030