xref: /openbmc/linux/net/sunrpc/svcauth_unix.c (revision e368cd72)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/types.h>
3 #include <linux/sched.h>
4 #include <linux/module.h>
5 #include <linux/sunrpc/types.h>
6 #include <linux/sunrpc/xdr.h>
7 #include <linux/sunrpc/svcsock.h>
8 #include <linux/sunrpc/svcauth.h>
9 #include <linux/sunrpc/gss_api.h>
10 #include <linux/sunrpc/addr.h>
11 #include <linux/err.h>
12 #include <linux/seq_file.h>
13 #include <linux/hash.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
16 #include <net/sock.h>
17 #include <net/ipv6.h>
18 #include <linux/kernel.h>
19 #include <linux/user_namespace.h>
20 #define RPCDBG_FACILITY	RPCDBG_AUTH
21 
22 
23 #include "netns.h"
24 
25 /*
26  * AUTHUNIX and AUTHNULL credentials are both handled here.
27  * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
28  * are always nobody (-2).  i.e. we do the same IP address checks for
29  * AUTHNULL as for AUTHUNIX, and that is done here.
30  */
31 
32 
33 struct unix_domain {
34 	struct auth_domain	h;
35 	/* other stuff later */
36 };
37 
38 extern struct auth_ops svcauth_null;
39 extern struct auth_ops svcauth_unix;
40 
41 static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
42 {
43 	struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
44 	struct unix_domain *ud = container_of(dom, struct unix_domain, h);
45 
46 	kfree(dom->name);
47 	kfree(ud);
48 }
49 
50 static void svcauth_unix_domain_release(struct auth_domain *dom)
51 {
52 	call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
53 }
54 
55 struct auth_domain *unix_domain_find(char *name)
56 {
57 	struct auth_domain *rv;
58 	struct unix_domain *new = NULL;
59 
60 	rv = auth_domain_find(name);
61 	while(1) {
62 		if (rv) {
63 			if (new && rv != &new->h)
64 				svcauth_unix_domain_release(&new->h);
65 
66 			if (rv->flavour != &svcauth_unix) {
67 				auth_domain_put(rv);
68 				return NULL;
69 			}
70 			return rv;
71 		}
72 
73 		new = kmalloc(sizeof(*new), GFP_KERNEL);
74 		if (new == NULL)
75 			return NULL;
76 		kref_init(&new->h.ref);
77 		new->h.name = kstrdup(name, GFP_KERNEL);
78 		if (new->h.name == NULL) {
79 			kfree(new);
80 			return NULL;
81 		}
82 		new->h.flavour = &svcauth_unix;
83 		rv = auth_domain_lookup(name, &new->h);
84 	}
85 }
86 EXPORT_SYMBOL_GPL(unix_domain_find);
87 
88 
89 /**************************************************
90  * cache for IP address to unix_domain
91  * as needed by AUTH_UNIX
92  */
93 #define	IP_HASHBITS	8
94 #define	IP_HASHMAX	(1<<IP_HASHBITS)
95 
96 struct ip_map {
97 	struct cache_head	h;
98 	char			m_class[8]; /* e.g. "nfsd" */
99 	struct in6_addr		m_addr;
100 	struct unix_domain	*m_client;
101 	struct rcu_head		m_rcu;
102 };
103 
104 static void ip_map_put(struct kref *kref)
105 {
106 	struct cache_head *item = container_of(kref, struct cache_head, ref);
107 	struct ip_map *im = container_of(item, struct ip_map,h);
108 
109 	if (test_bit(CACHE_VALID, &item->flags) &&
110 	    !test_bit(CACHE_NEGATIVE, &item->flags))
111 		auth_domain_put(&im->m_client->h);
112 	kfree_rcu(im, m_rcu);
113 }
114 
115 static inline int hash_ip6(const struct in6_addr *ip)
116 {
117 	return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
118 }
119 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
120 {
121 	struct ip_map *orig = container_of(corig, struct ip_map, h);
122 	struct ip_map *new = container_of(cnew, struct ip_map, h);
123 	return strcmp(orig->m_class, new->m_class) == 0 &&
124 	       ipv6_addr_equal(&orig->m_addr, &new->m_addr);
125 }
126 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
127 {
128 	struct ip_map *new = container_of(cnew, struct ip_map, h);
129 	struct ip_map *item = container_of(citem, struct ip_map, h);
130 
131 	strcpy(new->m_class, item->m_class);
132 	new->m_addr = item->m_addr;
133 }
134 static void update(struct cache_head *cnew, struct cache_head *citem)
135 {
136 	struct ip_map *new = container_of(cnew, struct ip_map, h);
137 	struct ip_map *item = container_of(citem, struct ip_map, h);
138 
139 	kref_get(&item->m_client->h.ref);
140 	new->m_client = item->m_client;
141 }
142 static struct cache_head *ip_map_alloc(void)
143 {
144 	struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
145 	if (i)
146 		return &i->h;
147 	else
148 		return NULL;
149 }
150 
151 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
152 {
153 	return sunrpc_cache_pipe_upcall(cd, h);
154 }
155 
156 static void ip_map_request(struct cache_detail *cd,
157 				  struct cache_head *h,
158 				  char **bpp, int *blen)
159 {
160 	char text_addr[40];
161 	struct ip_map *im = container_of(h, struct ip_map, h);
162 
163 	if (ipv6_addr_v4mapped(&(im->m_addr))) {
164 		snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
165 	} else {
166 		snprintf(text_addr, 40, "%pI6", &im->m_addr);
167 	}
168 	qword_add(bpp, blen, im->m_class);
169 	qword_add(bpp, blen, text_addr);
170 	(*bpp)[-1] = '\n';
171 }
172 
173 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
174 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry);
175 
176 static int ip_map_parse(struct cache_detail *cd,
177 			  char *mesg, int mlen)
178 {
179 	/* class ipaddress [domainname] */
180 	/* should be safe just to use the start of the input buffer
181 	 * for scratch: */
182 	char *buf = mesg;
183 	int len;
184 	char class[8];
185 	union {
186 		struct sockaddr		sa;
187 		struct sockaddr_in	s4;
188 		struct sockaddr_in6	s6;
189 	} address;
190 	struct sockaddr_in6 sin6;
191 	int err;
192 
193 	struct ip_map *ipmp;
194 	struct auth_domain *dom;
195 	time64_t expiry;
196 
197 	if (mesg[mlen-1] != '\n')
198 		return -EINVAL;
199 	mesg[mlen-1] = 0;
200 
201 	/* class */
202 	len = qword_get(&mesg, class, sizeof(class));
203 	if (len <= 0) return -EINVAL;
204 
205 	/* ip address */
206 	len = qword_get(&mesg, buf, mlen);
207 	if (len <= 0) return -EINVAL;
208 
209 	if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
210 		return -EINVAL;
211 	switch (address.sa.sa_family) {
212 	case AF_INET:
213 		/* Form a mapped IPv4 address in sin6 */
214 		sin6.sin6_family = AF_INET6;
215 		ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
216 				&sin6.sin6_addr);
217 		break;
218 #if IS_ENABLED(CONFIG_IPV6)
219 	case AF_INET6:
220 		memcpy(&sin6, &address.s6, sizeof(sin6));
221 		break;
222 #endif
223 	default:
224 		return -EINVAL;
225 	}
226 
227 	expiry = get_expiry(&mesg);
228 	if (expiry ==0)
229 		return -EINVAL;
230 
231 	/* domainname, or empty for NEGATIVE */
232 	len = qword_get(&mesg, buf, mlen);
233 	if (len < 0) return -EINVAL;
234 
235 	if (len) {
236 		dom = unix_domain_find(buf);
237 		if (dom == NULL)
238 			return -ENOENT;
239 	} else
240 		dom = NULL;
241 
242 	/* IPv6 scope IDs are ignored for now */
243 	ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
244 	if (ipmp) {
245 		err = __ip_map_update(cd, ipmp,
246 			     container_of(dom, struct unix_domain, h),
247 			     expiry);
248 	} else
249 		err = -ENOMEM;
250 
251 	if (dom)
252 		auth_domain_put(dom);
253 
254 	cache_flush();
255 	return err;
256 }
257 
258 static int ip_map_show(struct seq_file *m,
259 		       struct cache_detail *cd,
260 		       struct cache_head *h)
261 {
262 	struct ip_map *im;
263 	struct in6_addr addr;
264 	char *dom = "-no-domain-";
265 
266 	if (h == NULL) {
267 		seq_puts(m, "#class IP domain\n");
268 		return 0;
269 	}
270 	im = container_of(h, struct ip_map, h);
271 	/* class addr domain */
272 	addr = im->m_addr;
273 
274 	if (test_bit(CACHE_VALID, &h->flags) &&
275 	    !test_bit(CACHE_NEGATIVE, &h->flags))
276 		dom = im->m_client->h.name;
277 
278 	if (ipv6_addr_v4mapped(&addr)) {
279 		seq_printf(m, "%s %pI4 %s\n",
280 			im->m_class, &addr.s6_addr32[3], dom);
281 	} else {
282 		seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
283 	}
284 	return 0;
285 }
286 
287 
288 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
289 		struct in6_addr *addr)
290 {
291 	struct ip_map ip;
292 	struct cache_head *ch;
293 
294 	strcpy(ip.m_class, class);
295 	ip.m_addr = *addr;
296 	ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
297 				     hash_str(class, IP_HASHBITS) ^
298 				     hash_ip6(addr));
299 
300 	if (ch)
301 		return container_of(ch, struct ip_map, h);
302 	else
303 		return NULL;
304 }
305 
306 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
307 		struct unix_domain *udom, time64_t expiry)
308 {
309 	struct ip_map ip;
310 	struct cache_head *ch;
311 
312 	ip.m_client = udom;
313 	ip.h.flags = 0;
314 	if (!udom)
315 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
316 	ip.h.expiry_time = expiry;
317 	ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
318 				 hash_str(ipm->m_class, IP_HASHBITS) ^
319 				 hash_ip6(&ipm->m_addr));
320 	if (!ch)
321 		return -ENOMEM;
322 	cache_put(ch, cd);
323 	return 0;
324 }
325 
326 void svcauth_unix_purge(struct net *net)
327 {
328 	struct sunrpc_net *sn;
329 
330 	sn = net_generic(net, sunrpc_net_id);
331 	cache_purge(sn->ip_map_cache);
332 }
333 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
334 
335 static inline struct ip_map *
336 ip_map_cached_get(struct svc_xprt *xprt)
337 {
338 	struct ip_map *ipm = NULL;
339 	struct sunrpc_net *sn;
340 
341 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
342 		spin_lock(&xprt->xpt_lock);
343 		ipm = xprt->xpt_auth_cache;
344 		if (ipm != NULL) {
345 			sn = net_generic(xprt->xpt_net, sunrpc_net_id);
346 			if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
347 				/*
348 				 * The entry has been invalidated since it was
349 				 * remembered, e.g. by a second mount from the
350 				 * same IP address.
351 				 */
352 				xprt->xpt_auth_cache = NULL;
353 				spin_unlock(&xprt->xpt_lock);
354 				cache_put(&ipm->h, sn->ip_map_cache);
355 				return NULL;
356 			}
357 			cache_get(&ipm->h);
358 		}
359 		spin_unlock(&xprt->xpt_lock);
360 	}
361 	return ipm;
362 }
363 
364 static inline void
365 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
366 {
367 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
368 		spin_lock(&xprt->xpt_lock);
369 		if (xprt->xpt_auth_cache == NULL) {
370 			/* newly cached, keep the reference */
371 			xprt->xpt_auth_cache = ipm;
372 			ipm = NULL;
373 		}
374 		spin_unlock(&xprt->xpt_lock);
375 	}
376 	if (ipm) {
377 		struct sunrpc_net *sn;
378 
379 		sn = net_generic(xprt->xpt_net, sunrpc_net_id);
380 		cache_put(&ipm->h, sn->ip_map_cache);
381 	}
382 }
383 
384 void
385 svcauth_unix_info_release(struct svc_xprt *xpt)
386 {
387 	struct ip_map *ipm;
388 
389 	ipm = xpt->xpt_auth_cache;
390 	if (ipm != NULL) {
391 		struct sunrpc_net *sn;
392 
393 		sn = net_generic(xpt->xpt_net, sunrpc_net_id);
394 		cache_put(&ipm->h, sn->ip_map_cache);
395 	}
396 }
397 
398 /****************************************************************************
399  * auth.unix.gid cache
400  * simple cache to map a UID to a list of GIDs
401  * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
402  */
403 #define	GID_HASHBITS	8
404 #define	GID_HASHMAX	(1<<GID_HASHBITS)
405 
406 struct unix_gid {
407 	struct cache_head	h;
408 	kuid_t			uid;
409 	struct group_info	*gi;
410 	struct rcu_head		rcu;
411 };
412 
413 static int unix_gid_hash(kuid_t uid)
414 {
415 	return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
416 }
417 
418 static void unix_gid_put(struct kref *kref)
419 {
420 	struct cache_head *item = container_of(kref, struct cache_head, ref);
421 	struct unix_gid *ug = container_of(item, struct unix_gid, h);
422 	if (test_bit(CACHE_VALID, &item->flags) &&
423 	    !test_bit(CACHE_NEGATIVE, &item->flags))
424 		put_group_info(ug->gi);
425 	kfree_rcu(ug, rcu);
426 }
427 
428 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
429 {
430 	struct unix_gid *orig = container_of(corig, struct unix_gid, h);
431 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
432 	return uid_eq(orig->uid, new->uid);
433 }
434 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
435 {
436 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
437 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
438 	new->uid = item->uid;
439 }
440 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
441 {
442 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
443 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
444 
445 	get_group_info(item->gi);
446 	new->gi = item->gi;
447 }
448 static struct cache_head *unix_gid_alloc(void)
449 {
450 	struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
451 	if (g)
452 		return &g->h;
453 	else
454 		return NULL;
455 }
456 
457 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
458 {
459 	return sunrpc_cache_pipe_upcall_timeout(cd, h);
460 }
461 
462 static void unix_gid_request(struct cache_detail *cd,
463 			     struct cache_head *h,
464 			     char **bpp, int *blen)
465 {
466 	char tuid[20];
467 	struct unix_gid *ug = container_of(h, struct unix_gid, h);
468 
469 	snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
470 	qword_add(bpp, blen, tuid);
471 	(*bpp)[-1] = '\n';
472 }
473 
474 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
475 
476 static int unix_gid_parse(struct cache_detail *cd,
477 			char *mesg, int mlen)
478 {
479 	/* uid expiry Ngid gid0 gid1 ... gidN-1 */
480 	int id;
481 	kuid_t uid;
482 	int gids;
483 	int rv;
484 	int i;
485 	int err;
486 	time64_t expiry;
487 	struct unix_gid ug, *ugp;
488 
489 	if (mesg[mlen - 1] != '\n')
490 		return -EINVAL;
491 	mesg[mlen-1] = 0;
492 
493 	rv = get_int(&mesg, &id);
494 	if (rv)
495 		return -EINVAL;
496 	uid = make_kuid(current_user_ns(), id);
497 	ug.uid = uid;
498 
499 	expiry = get_expiry(&mesg);
500 	if (expiry == 0)
501 		return -EINVAL;
502 
503 	rv = get_int(&mesg, &gids);
504 	if (rv || gids < 0 || gids > 8192)
505 		return -EINVAL;
506 
507 	ug.gi = groups_alloc(gids);
508 	if (!ug.gi)
509 		return -ENOMEM;
510 
511 	for (i = 0 ; i < gids ; i++) {
512 		int gid;
513 		kgid_t kgid;
514 		rv = get_int(&mesg, &gid);
515 		err = -EINVAL;
516 		if (rv)
517 			goto out;
518 		kgid = make_kgid(current_user_ns(), gid);
519 		if (!gid_valid(kgid))
520 			goto out;
521 		ug.gi->gid[i] = kgid;
522 	}
523 
524 	groups_sort(ug.gi);
525 	ugp = unix_gid_lookup(cd, uid);
526 	if (ugp) {
527 		struct cache_head *ch;
528 		ug.h.flags = 0;
529 		ug.h.expiry_time = expiry;
530 		ch = sunrpc_cache_update(cd,
531 					 &ug.h, &ugp->h,
532 					 unix_gid_hash(uid));
533 		if (!ch)
534 			err = -ENOMEM;
535 		else {
536 			err = 0;
537 			cache_put(ch, cd);
538 		}
539 	} else
540 		err = -ENOMEM;
541  out:
542 	if (ug.gi)
543 		put_group_info(ug.gi);
544 	return err;
545 }
546 
547 static int unix_gid_show(struct seq_file *m,
548 			 struct cache_detail *cd,
549 			 struct cache_head *h)
550 {
551 	struct user_namespace *user_ns = m->file->f_cred->user_ns;
552 	struct unix_gid *ug;
553 	int i;
554 	int glen;
555 
556 	if (h == NULL) {
557 		seq_puts(m, "#uid cnt: gids...\n");
558 		return 0;
559 	}
560 	ug = container_of(h, struct unix_gid, h);
561 	if (test_bit(CACHE_VALID, &h->flags) &&
562 	    !test_bit(CACHE_NEGATIVE, &h->flags))
563 		glen = ug->gi->ngroups;
564 	else
565 		glen = 0;
566 
567 	seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
568 	for (i = 0; i < glen; i++)
569 		seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
570 	seq_printf(m, "\n");
571 	return 0;
572 }
573 
574 static const struct cache_detail unix_gid_cache_template = {
575 	.owner		= THIS_MODULE,
576 	.hash_size	= GID_HASHMAX,
577 	.name		= "auth.unix.gid",
578 	.cache_put	= unix_gid_put,
579 	.cache_upcall	= unix_gid_upcall,
580 	.cache_request	= unix_gid_request,
581 	.cache_parse	= unix_gid_parse,
582 	.cache_show	= unix_gid_show,
583 	.match		= unix_gid_match,
584 	.init		= unix_gid_init,
585 	.update		= unix_gid_update,
586 	.alloc		= unix_gid_alloc,
587 };
588 
589 int unix_gid_cache_create(struct net *net)
590 {
591 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
592 	struct cache_detail *cd;
593 	int err;
594 
595 	cd = cache_create_net(&unix_gid_cache_template, net);
596 	if (IS_ERR(cd))
597 		return PTR_ERR(cd);
598 	err = cache_register_net(cd, net);
599 	if (err) {
600 		cache_destroy_net(cd, net);
601 		return err;
602 	}
603 	sn->unix_gid_cache = cd;
604 	return 0;
605 }
606 
607 void unix_gid_cache_destroy(struct net *net)
608 {
609 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
610 	struct cache_detail *cd = sn->unix_gid_cache;
611 
612 	sn->unix_gid_cache = NULL;
613 	cache_purge(cd);
614 	cache_unregister_net(cd, net);
615 	cache_destroy_net(cd, net);
616 }
617 
618 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
619 {
620 	struct unix_gid ug;
621 	struct cache_head *ch;
622 
623 	ug.uid = uid;
624 	ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
625 	if (ch)
626 		return container_of(ch, struct unix_gid, h);
627 	else
628 		return NULL;
629 }
630 
631 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
632 {
633 	struct unix_gid *ug;
634 	struct group_info *gi;
635 	int ret;
636 	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
637 					    sunrpc_net_id);
638 
639 	ug = unix_gid_lookup(sn->unix_gid_cache, uid);
640 	if (!ug)
641 		return ERR_PTR(-EAGAIN);
642 	ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
643 	switch (ret) {
644 	case -ENOENT:
645 		return ERR_PTR(-ENOENT);
646 	case -ETIMEDOUT:
647 		return ERR_PTR(-ESHUTDOWN);
648 	case 0:
649 		gi = get_group_info(ug->gi);
650 		cache_put(&ug->h, sn->unix_gid_cache);
651 		return gi;
652 	default:
653 		return ERR_PTR(-EAGAIN);
654 	}
655 }
656 
657 int
658 svcauth_unix_set_client(struct svc_rqst *rqstp)
659 {
660 	struct sockaddr_in *sin;
661 	struct sockaddr_in6 *sin6, sin6_storage;
662 	struct ip_map *ipm;
663 	struct group_info *gi;
664 	struct svc_cred *cred = &rqstp->rq_cred;
665 	struct svc_xprt *xprt = rqstp->rq_xprt;
666 	struct net *net = xprt->xpt_net;
667 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
668 
669 	switch (rqstp->rq_addr.ss_family) {
670 	case AF_INET:
671 		sin = svc_addr_in(rqstp);
672 		sin6 = &sin6_storage;
673 		ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
674 		break;
675 	case AF_INET6:
676 		sin6 = svc_addr_in6(rqstp);
677 		break;
678 	default:
679 		BUG();
680 	}
681 
682 	rqstp->rq_client = NULL;
683 	if (rqstp->rq_proc == 0)
684 		return SVC_OK;
685 
686 	ipm = ip_map_cached_get(xprt);
687 	if (ipm == NULL)
688 		ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class,
689 				    &sin6->sin6_addr);
690 
691 	if (ipm == NULL)
692 		return SVC_DENIED;
693 
694 	switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
695 		default:
696 			BUG();
697 		case -ETIMEDOUT:
698 			return SVC_CLOSE;
699 		case -EAGAIN:
700 			return SVC_DROP;
701 		case -ENOENT:
702 			return SVC_DENIED;
703 		case 0:
704 			rqstp->rq_client = &ipm->m_client->h;
705 			kref_get(&rqstp->rq_client->ref);
706 			ip_map_cached_put(xprt, ipm);
707 			break;
708 	}
709 
710 	gi = unix_gid_find(cred->cr_uid, rqstp);
711 	switch (PTR_ERR(gi)) {
712 	case -EAGAIN:
713 		return SVC_DROP;
714 	case -ESHUTDOWN:
715 		return SVC_CLOSE;
716 	case -ENOENT:
717 		break;
718 	default:
719 		put_group_info(cred->cr_group_info);
720 		cred->cr_group_info = gi;
721 	}
722 	return SVC_OK;
723 }
724 
725 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
726 
727 static int
728 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
729 {
730 	struct kvec	*argv = &rqstp->rq_arg.head[0];
731 	struct kvec	*resv = &rqstp->rq_res.head[0];
732 	struct svc_cred	*cred = &rqstp->rq_cred;
733 
734 	if (argv->iov_len < 3*4)
735 		return SVC_GARBAGE;
736 
737 	if (svc_getu32(argv) != 0) {
738 		dprintk("svc: bad null cred\n");
739 		*authp = rpc_autherr_badcred;
740 		return SVC_DENIED;
741 	}
742 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
743 		dprintk("svc: bad null verf\n");
744 		*authp = rpc_autherr_badverf;
745 		return SVC_DENIED;
746 	}
747 
748 	/* Signal that mapping to nobody uid/gid is required */
749 	cred->cr_uid = INVALID_UID;
750 	cred->cr_gid = INVALID_GID;
751 	cred->cr_group_info = groups_alloc(0);
752 	if (cred->cr_group_info == NULL)
753 		return SVC_CLOSE; /* kmalloc failure - client must retry */
754 
755 	/* Put NULL verifier */
756 	svc_putnl(resv, RPC_AUTH_NULL);
757 	svc_putnl(resv, 0);
758 
759 	rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
760 	return SVC_OK;
761 }
762 
763 static int
764 svcauth_null_release(struct svc_rqst *rqstp)
765 {
766 	if (rqstp->rq_client)
767 		auth_domain_put(rqstp->rq_client);
768 	rqstp->rq_client = NULL;
769 	if (rqstp->rq_cred.cr_group_info)
770 		put_group_info(rqstp->rq_cred.cr_group_info);
771 	rqstp->rq_cred.cr_group_info = NULL;
772 
773 	return 0; /* don't drop */
774 }
775 
776 
777 struct auth_ops svcauth_null = {
778 	.name		= "null",
779 	.owner		= THIS_MODULE,
780 	.flavour	= RPC_AUTH_NULL,
781 	.accept 	= svcauth_null_accept,
782 	.release	= svcauth_null_release,
783 	.set_client	= svcauth_unix_set_client,
784 };
785 
786 
787 static int
788 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
789 {
790 	struct kvec	*argv = &rqstp->rq_arg.head[0];
791 	struct kvec	*resv = &rqstp->rq_res.head[0];
792 	struct svc_cred	*cred = &rqstp->rq_cred;
793 	struct user_namespace *userns;
794 	u32		slen, i;
795 	int		len   = argv->iov_len;
796 
797 	if ((len -= 3*4) < 0)
798 		return SVC_GARBAGE;
799 
800 	svc_getu32(argv);			/* length */
801 	svc_getu32(argv);			/* time stamp */
802 	slen = XDR_QUADLEN(svc_getnl(argv));	/* machname length */
803 	if (slen > 64 || (len -= (slen + 3)*4) < 0)
804 		goto badcred;
805 	argv->iov_base = (void*)((__be32*)argv->iov_base + slen);	/* skip machname */
806 	argv->iov_len -= slen*4;
807 	/*
808 	 * Note: we skip uid_valid()/gid_valid() checks here for
809 	 * backwards compatibility with clients that use -1 id's.
810 	 * Instead, -1 uid or gid is later mapped to the
811 	 * (export-specific) anonymous id by nfsd_setuser.
812 	 * Supplementary gid's will be left alone.
813 	 */
814 	userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ?
815 		rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns;
816 	cred->cr_uid = make_kuid(userns, svc_getnl(argv)); /* uid */
817 	cred->cr_gid = make_kgid(userns, svc_getnl(argv)); /* gid */
818 	slen = svc_getnl(argv);			/* gids length */
819 	if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0)
820 		goto badcred;
821 	cred->cr_group_info = groups_alloc(slen);
822 	if (cred->cr_group_info == NULL)
823 		return SVC_CLOSE;
824 	for (i = 0; i < slen; i++) {
825 		kgid_t kgid = make_kgid(userns, svc_getnl(argv));
826 		cred->cr_group_info->gid[i] = kgid;
827 	}
828 	groups_sort(cred->cr_group_info);
829 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
830 		*authp = rpc_autherr_badverf;
831 		return SVC_DENIED;
832 	}
833 
834 	/* Put NULL verifier */
835 	svc_putnl(resv, RPC_AUTH_NULL);
836 	svc_putnl(resv, 0);
837 
838 	rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
839 	return SVC_OK;
840 
841 badcred:
842 	*authp = rpc_autherr_badcred;
843 	return SVC_DENIED;
844 }
845 
846 static int
847 svcauth_unix_release(struct svc_rqst *rqstp)
848 {
849 	/* Verifier (such as it is) is already in place.
850 	 */
851 	if (rqstp->rq_client)
852 		auth_domain_put(rqstp->rq_client);
853 	rqstp->rq_client = NULL;
854 	if (rqstp->rq_cred.cr_group_info)
855 		put_group_info(rqstp->rq_cred.cr_group_info);
856 	rqstp->rq_cred.cr_group_info = NULL;
857 
858 	return 0;
859 }
860 
861 
862 struct auth_ops svcauth_unix = {
863 	.name		= "unix",
864 	.owner		= THIS_MODULE,
865 	.flavour	= RPC_AUTH_UNIX,
866 	.accept 	= svcauth_unix_accept,
867 	.release	= svcauth_unix_release,
868 	.domain_release	= svcauth_unix_domain_release,
869 	.set_client	= svcauth_unix_set_client,
870 };
871 
872 static const struct cache_detail ip_map_cache_template = {
873 	.owner		= THIS_MODULE,
874 	.hash_size	= IP_HASHMAX,
875 	.name		= "auth.unix.ip",
876 	.cache_put	= ip_map_put,
877 	.cache_upcall	= ip_map_upcall,
878 	.cache_request	= ip_map_request,
879 	.cache_parse	= ip_map_parse,
880 	.cache_show	= ip_map_show,
881 	.match		= ip_map_match,
882 	.init		= ip_map_init,
883 	.update		= update,
884 	.alloc		= ip_map_alloc,
885 };
886 
887 int ip_map_cache_create(struct net *net)
888 {
889 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
890 	struct cache_detail *cd;
891 	int err;
892 
893 	cd = cache_create_net(&ip_map_cache_template, net);
894 	if (IS_ERR(cd))
895 		return PTR_ERR(cd);
896 	err = cache_register_net(cd, net);
897 	if (err) {
898 		cache_destroy_net(cd, net);
899 		return err;
900 	}
901 	sn->ip_map_cache = cd;
902 	return 0;
903 }
904 
905 void ip_map_cache_destroy(struct net *net)
906 {
907 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
908 	struct cache_detail *cd = sn->ip_map_cache;
909 
910 	sn->ip_map_cache = NULL;
911 	cache_purge(cd);
912 	cache_unregister_net(cd, net);
913 	cache_destroy_net(cd, net);
914 }
915