xref: /openbmc/linux/net/sunrpc/svcauth_unix.c (revision a09d2831)
1 #include <linux/types.h>
2 #include <linux/sched.h>
3 #include <linux/module.h>
4 #include <linux/sunrpc/types.h>
5 #include <linux/sunrpc/xdr.h>
6 #include <linux/sunrpc/svcsock.h>
7 #include <linux/sunrpc/svcauth.h>
8 #include <linux/sunrpc/gss_api.h>
9 #include <linux/err.h>
10 #include <linux/seq_file.h>
11 #include <linux/hash.h>
12 #include <linux/string.h>
13 #include <net/sock.h>
14 #include <net/ipv6.h>
15 #include <linux/kernel.h>
16 #define RPCDBG_FACILITY	RPCDBG_AUTH
17 
18 
19 /*
20  * AUTHUNIX and AUTHNULL credentials are both handled here.
21  * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
22  * are always nobody (-2).  i.e. we do the same IP address checks for
23  * AUTHNULL as for AUTHUNIX, and that is done here.
24  */
25 
26 
27 struct unix_domain {
28 	struct auth_domain	h;
29 	int	addr_changes;
30 	/* other stuff later */
31 };
32 
33 extern struct auth_ops svcauth_unix;
34 
35 struct auth_domain *unix_domain_find(char *name)
36 {
37 	struct auth_domain *rv;
38 	struct unix_domain *new = NULL;
39 
40 	rv = auth_domain_lookup(name, NULL);
41 	while(1) {
42 		if (rv) {
43 			if (new && rv != &new->h)
44 				auth_domain_put(&new->h);
45 
46 			if (rv->flavour != &svcauth_unix) {
47 				auth_domain_put(rv);
48 				return NULL;
49 			}
50 			return rv;
51 		}
52 
53 		new = kmalloc(sizeof(*new), GFP_KERNEL);
54 		if (new == NULL)
55 			return NULL;
56 		kref_init(&new->h.ref);
57 		new->h.name = kstrdup(name, GFP_KERNEL);
58 		if (new->h.name == NULL) {
59 			kfree(new);
60 			return NULL;
61 		}
62 		new->h.flavour = &svcauth_unix;
63 		new->addr_changes = 0;
64 		rv = auth_domain_lookup(name, &new->h);
65 	}
66 }
67 EXPORT_SYMBOL_GPL(unix_domain_find);
68 
69 static void svcauth_unix_domain_release(struct auth_domain *dom)
70 {
71 	struct unix_domain *ud = container_of(dom, struct unix_domain, h);
72 
73 	kfree(dom->name);
74 	kfree(ud);
75 }
76 
77 
78 /**************************************************
79  * cache for IP address to unix_domain
80  * as needed by AUTH_UNIX
81  */
82 #define	IP_HASHBITS	8
83 #define	IP_HASHMAX	(1<<IP_HASHBITS)
84 #define	IP_HASHMASK	(IP_HASHMAX-1)
85 
86 struct ip_map {
87 	struct cache_head	h;
88 	char			m_class[8]; /* e.g. "nfsd" */
89 	struct in6_addr		m_addr;
90 	struct unix_domain	*m_client;
91 	int			m_add_change;
92 };
93 static struct cache_head	*ip_table[IP_HASHMAX];
94 
95 static void ip_map_put(struct kref *kref)
96 {
97 	struct cache_head *item = container_of(kref, struct cache_head, ref);
98 	struct ip_map *im = container_of(item, struct ip_map,h);
99 
100 	if (test_bit(CACHE_VALID, &item->flags) &&
101 	    !test_bit(CACHE_NEGATIVE, &item->flags))
102 		auth_domain_put(&im->m_client->h);
103 	kfree(im);
104 }
105 
106 #if IP_HASHBITS == 8
107 /* hash_long on a 64 bit machine is currently REALLY BAD for
108  * IP addresses in reverse-endian (i.e. on a little-endian machine).
109  * So use a trivial but reliable hash instead
110  */
111 static inline int hash_ip(__be32 ip)
112 {
113 	int hash = (__force u32)ip ^ ((__force u32)ip>>16);
114 	return (hash ^ (hash>>8)) & 0xff;
115 }
116 #endif
117 static inline int hash_ip6(struct in6_addr ip)
118 {
119 	return (hash_ip(ip.s6_addr32[0]) ^
120 		hash_ip(ip.s6_addr32[1]) ^
121 		hash_ip(ip.s6_addr32[2]) ^
122 		hash_ip(ip.s6_addr32[3]));
123 }
124 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
125 {
126 	struct ip_map *orig = container_of(corig, struct ip_map, h);
127 	struct ip_map *new = container_of(cnew, struct ip_map, h);
128 	return strcmp(orig->m_class, new->m_class) == 0 &&
129 	       ipv6_addr_equal(&orig->m_addr, &new->m_addr);
130 }
131 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
132 {
133 	struct ip_map *new = container_of(cnew, struct ip_map, h);
134 	struct ip_map *item = container_of(citem, struct ip_map, h);
135 
136 	strcpy(new->m_class, item->m_class);
137 	ipv6_addr_copy(&new->m_addr, &item->m_addr);
138 }
139 static void update(struct cache_head *cnew, struct cache_head *citem)
140 {
141 	struct ip_map *new = container_of(cnew, struct ip_map, h);
142 	struct ip_map *item = container_of(citem, struct ip_map, h);
143 
144 	kref_get(&item->m_client->h.ref);
145 	new->m_client = item->m_client;
146 	new->m_add_change = item->m_add_change;
147 }
148 static struct cache_head *ip_map_alloc(void)
149 {
150 	struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
151 	if (i)
152 		return &i->h;
153 	else
154 		return NULL;
155 }
156 
157 static void ip_map_request(struct cache_detail *cd,
158 				  struct cache_head *h,
159 				  char **bpp, int *blen)
160 {
161 	char text_addr[40];
162 	struct ip_map *im = container_of(h, struct ip_map, h);
163 
164 	if (ipv6_addr_v4mapped(&(im->m_addr))) {
165 		snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
166 	} else {
167 		snprintf(text_addr, 40, "%pI6", &im->m_addr);
168 	}
169 	qword_add(bpp, blen, im->m_class);
170 	qword_add(bpp, blen, text_addr);
171 	(*bpp)[-1] = '\n';
172 }
173 
174 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
175 {
176 	return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
177 }
178 
179 static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
180 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
181 
182 static int ip_map_parse(struct cache_detail *cd,
183 			  char *mesg, int mlen)
184 {
185 	/* class ipaddress [domainname] */
186 	/* should be safe just to use the start of the input buffer
187 	 * for scratch: */
188 	char *buf = mesg;
189 	int len;
190 	int b1, b2, b3, b4, b5, b6, b7, b8;
191 	char c;
192 	char class[8];
193 	struct in6_addr addr;
194 	int err;
195 
196 	struct ip_map *ipmp;
197 	struct auth_domain *dom;
198 	time_t expiry;
199 
200 	if (mesg[mlen-1] != '\n')
201 		return -EINVAL;
202 	mesg[mlen-1] = 0;
203 
204 	/* class */
205 	len = qword_get(&mesg, class, sizeof(class));
206 	if (len <= 0) return -EINVAL;
207 
208 	/* ip address */
209 	len = qword_get(&mesg, buf, mlen);
210 	if (len <= 0) return -EINVAL;
211 
212 	if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) == 4) {
213 		addr.s6_addr32[0] = 0;
214 		addr.s6_addr32[1] = 0;
215 		addr.s6_addr32[2] = htonl(0xffff);
216 		addr.s6_addr32[3] =
217 			htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
218        } else if (sscanf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x%c",
219 			&b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) {
220 		addr.s6_addr16[0] = htons(b1);
221 		addr.s6_addr16[1] = htons(b2);
222 		addr.s6_addr16[2] = htons(b3);
223 		addr.s6_addr16[3] = htons(b4);
224 		addr.s6_addr16[4] = htons(b5);
225 		addr.s6_addr16[5] = htons(b6);
226 		addr.s6_addr16[6] = htons(b7);
227 		addr.s6_addr16[7] = htons(b8);
228        } else
229 		return -EINVAL;
230 
231 	expiry = get_expiry(&mesg);
232 	if (expiry ==0)
233 		return -EINVAL;
234 
235 	/* domainname, or empty for NEGATIVE */
236 	len = qword_get(&mesg, buf, mlen);
237 	if (len < 0) return -EINVAL;
238 
239 	if (len) {
240 		dom = unix_domain_find(buf);
241 		if (dom == NULL)
242 			return -ENOENT;
243 	} else
244 		dom = NULL;
245 
246 	ipmp = ip_map_lookup(class, &addr);
247 	if (ipmp) {
248 		err = ip_map_update(ipmp,
249 			     container_of(dom, struct unix_domain, h),
250 			     expiry);
251 	} else
252 		err = -ENOMEM;
253 
254 	if (dom)
255 		auth_domain_put(dom);
256 
257 	cache_flush();
258 	return err;
259 }
260 
261 static int ip_map_show(struct seq_file *m,
262 		       struct cache_detail *cd,
263 		       struct cache_head *h)
264 {
265 	struct ip_map *im;
266 	struct in6_addr addr;
267 	char *dom = "-no-domain-";
268 
269 	if (h == NULL) {
270 		seq_puts(m, "#class IP domain\n");
271 		return 0;
272 	}
273 	im = container_of(h, struct ip_map, h);
274 	/* class addr domain */
275 	ipv6_addr_copy(&addr, &im->m_addr);
276 
277 	if (test_bit(CACHE_VALID, &h->flags) &&
278 	    !test_bit(CACHE_NEGATIVE, &h->flags))
279 		dom = im->m_client->h.name;
280 
281 	if (ipv6_addr_v4mapped(&addr)) {
282 		seq_printf(m, "%s %pI4 %s\n",
283 			im->m_class, &addr.s6_addr32[3], dom);
284 	} else {
285 		seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
286 	}
287 	return 0;
288 }
289 
290 
291 struct cache_detail ip_map_cache = {
292 	.owner		= THIS_MODULE,
293 	.hash_size	= IP_HASHMAX,
294 	.hash_table	= ip_table,
295 	.name		= "auth.unix.ip",
296 	.cache_put	= ip_map_put,
297 	.cache_upcall	= ip_map_upcall,
298 	.cache_parse	= ip_map_parse,
299 	.cache_show	= ip_map_show,
300 	.match		= ip_map_match,
301 	.init		= ip_map_init,
302 	.update		= update,
303 	.alloc		= ip_map_alloc,
304 };
305 
306 static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
307 {
308 	struct ip_map ip;
309 	struct cache_head *ch;
310 
311 	strcpy(ip.m_class, class);
312 	ipv6_addr_copy(&ip.m_addr, addr);
313 	ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
314 				 hash_str(class, IP_HASHBITS) ^
315 				 hash_ip6(*addr));
316 
317 	if (ch)
318 		return container_of(ch, struct ip_map, h);
319 	else
320 		return NULL;
321 }
322 
323 static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry)
324 {
325 	struct ip_map ip;
326 	struct cache_head *ch;
327 
328 	ip.m_client = udom;
329 	ip.h.flags = 0;
330 	if (!udom)
331 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
332 	else {
333 		ip.m_add_change = udom->addr_changes;
334 		/* if this is from the legacy set_client system call,
335 		 * we need m_add_change to be one higher
336 		 */
337 		if (expiry == NEVER)
338 			ip.m_add_change++;
339 	}
340 	ip.h.expiry_time = expiry;
341 	ch = sunrpc_cache_update(&ip_map_cache,
342 				 &ip.h, &ipm->h,
343 				 hash_str(ipm->m_class, IP_HASHBITS) ^
344 				 hash_ip6(ipm->m_addr));
345 	if (!ch)
346 		return -ENOMEM;
347 	cache_put(ch, &ip_map_cache);
348 	return 0;
349 }
350 
351 int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
352 {
353 	struct unix_domain *udom;
354 	struct ip_map *ipmp;
355 
356 	if (dom->flavour != &svcauth_unix)
357 		return -EINVAL;
358 	udom = container_of(dom, struct unix_domain, h);
359 	ipmp = ip_map_lookup("nfsd", addr);
360 
361 	if (ipmp)
362 		return ip_map_update(ipmp, udom, NEVER);
363 	else
364 		return -ENOMEM;
365 }
366 EXPORT_SYMBOL_GPL(auth_unix_add_addr);
367 
368 int auth_unix_forget_old(struct auth_domain *dom)
369 {
370 	struct unix_domain *udom;
371 
372 	if (dom->flavour != &svcauth_unix)
373 		return -EINVAL;
374 	udom = container_of(dom, struct unix_domain, h);
375 	udom->addr_changes++;
376 	return 0;
377 }
378 EXPORT_SYMBOL_GPL(auth_unix_forget_old);
379 
380 struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
381 {
382 	struct ip_map *ipm;
383 	struct auth_domain *rv;
384 
385 	ipm = ip_map_lookup("nfsd", addr);
386 
387 	if (!ipm)
388 		return NULL;
389 	if (cache_check(&ip_map_cache, &ipm->h, NULL))
390 		return NULL;
391 
392 	if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
393 		if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
394 			auth_domain_put(&ipm->m_client->h);
395 		rv = NULL;
396 	} else {
397 		rv = &ipm->m_client->h;
398 		kref_get(&rv->ref);
399 	}
400 	cache_put(&ipm->h, &ip_map_cache);
401 	return rv;
402 }
403 EXPORT_SYMBOL_GPL(auth_unix_lookup);
404 
405 void svcauth_unix_purge(void)
406 {
407 	cache_purge(&ip_map_cache);
408 }
409 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
410 
411 static inline struct ip_map *
412 ip_map_cached_get(struct svc_rqst *rqstp)
413 {
414 	struct ip_map *ipm = NULL;
415 	struct svc_xprt *xprt = rqstp->rq_xprt;
416 
417 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
418 		spin_lock(&xprt->xpt_lock);
419 		ipm = xprt->xpt_auth_cache;
420 		if (ipm != NULL) {
421 			if (!cache_valid(&ipm->h)) {
422 				/*
423 				 * The entry has been invalidated since it was
424 				 * remembered, e.g. by a second mount from the
425 				 * same IP address.
426 				 */
427 				xprt->xpt_auth_cache = NULL;
428 				spin_unlock(&xprt->xpt_lock);
429 				cache_put(&ipm->h, &ip_map_cache);
430 				return NULL;
431 			}
432 			cache_get(&ipm->h);
433 		}
434 		spin_unlock(&xprt->xpt_lock);
435 	}
436 	return ipm;
437 }
438 
439 static inline void
440 ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
441 {
442 	struct svc_xprt *xprt = rqstp->rq_xprt;
443 
444 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
445 		spin_lock(&xprt->xpt_lock);
446 		if (xprt->xpt_auth_cache == NULL) {
447 			/* newly cached, keep the reference */
448 			xprt->xpt_auth_cache = ipm;
449 			ipm = NULL;
450 		}
451 		spin_unlock(&xprt->xpt_lock);
452 	}
453 	if (ipm)
454 		cache_put(&ipm->h, &ip_map_cache);
455 }
456 
457 void
458 svcauth_unix_info_release(void *info)
459 {
460 	struct ip_map *ipm = info;
461 	cache_put(&ipm->h, &ip_map_cache);
462 }
463 
464 /****************************************************************************
465  * auth.unix.gid cache
466  * simple cache to map a UID to a list of GIDs
467  * because AUTH_UNIX aka AUTH_SYS has a max of 16
468  */
469 #define	GID_HASHBITS	8
470 #define	GID_HASHMAX	(1<<GID_HASHBITS)
471 #define	GID_HASHMASK	(GID_HASHMAX - 1)
472 
473 struct unix_gid {
474 	struct cache_head	h;
475 	uid_t			uid;
476 	struct group_info	*gi;
477 };
478 static struct cache_head	*gid_table[GID_HASHMAX];
479 
480 static void unix_gid_put(struct kref *kref)
481 {
482 	struct cache_head *item = container_of(kref, struct cache_head, ref);
483 	struct unix_gid *ug = container_of(item, struct unix_gid, h);
484 	if (test_bit(CACHE_VALID, &item->flags) &&
485 	    !test_bit(CACHE_NEGATIVE, &item->flags))
486 		put_group_info(ug->gi);
487 	kfree(ug);
488 }
489 
490 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
491 {
492 	struct unix_gid *orig = container_of(corig, struct unix_gid, h);
493 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
494 	return orig->uid == new->uid;
495 }
496 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
497 {
498 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
499 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
500 	new->uid = item->uid;
501 }
502 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
503 {
504 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
505 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
506 
507 	get_group_info(item->gi);
508 	new->gi = item->gi;
509 }
510 static struct cache_head *unix_gid_alloc(void)
511 {
512 	struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
513 	if (g)
514 		return &g->h;
515 	else
516 		return NULL;
517 }
518 
519 static void unix_gid_request(struct cache_detail *cd,
520 			     struct cache_head *h,
521 			     char **bpp, int *blen)
522 {
523 	char tuid[20];
524 	struct unix_gid *ug = container_of(h, struct unix_gid, h);
525 
526 	snprintf(tuid, 20, "%u", ug->uid);
527 	qword_add(bpp, blen, tuid);
528 	(*bpp)[-1] = '\n';
529 }
530 
531 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
532 {
533 	return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
534 }
535 
536 static struct unix_gid *unix_gid_lookup(uid_t uid);
537 extern struct cache_detail unix_gid_cache;
538 
539 static int unix_gid_parse(struct cache_detail *cd,
540 			char *mesg, int mlen)
541 {
542 	/* uid expiry Ngid gid0 gid1 ... gidN-1 */
543 	int uid;
544 	int gids;
545 	int rv;
546 	int i;
547 	int err;
548 	time_t expiry;
549 	struct unix_gid ug, *ugp;
550 
551 	if (mlen <= 0 || mesg[mlen-1] != '\n')
552 		return -EINVAL;
553 	mesg[mlen-1] = 0;
554 
555 	rv = get_int(&mesg, &uid);
556 	if (rv)
557 		return -EINVAL;
558 	ug.uid = uid;
559 
560 	expiry = get_expiry(&mesg);
561 	if (expiry == 0)
562 		return -EINVAL;
563 
564 	rv = get_int(&mesg, &gids);
565 	if (rv || gids < 0 || gids > 8192)
566 		return -EINVAL;
567 
568 	ug.gi = groups_alloc(gids);
569 	if (!ug.gi)
570 		return -ENOMEM;
571 
572 	for (i = 0 ; i < gids ; i++) {
573 		int gid;
574 		rv = get_int(&mesg, &gid);
575 		err = -EINVAL;
576 		if (rv)
577 			goto out;
578 		GROUP_AT(ug.gi, i) = gid;
579 	}
580 
581 	ugp = unix_gid_lookup(uid);
582 	if (ugp) {
583 		struct cache_head *ch;
584 		ug.h.flags = 0;
585 		ug.h.expiry_time = expiry;
586 		ch = sunrpc_cache_update(&unix_gid_cache,
587 					 &ug.h, &ugp->h,
588 					 hash_long(uid, GID_HASHBITS));
589 		if (!ch)
590 			err = -ENOMEM;
591 		else {
592 			err = 0;
593 			cache_put(ch, &unix_gid_cache);
594 		}
595 	} else
596 		err = -ENOMEM;
597  out:
598 	if (ug.gi)
599 		put_group_info(ug.gi);
600 	return err;
601 }
602 
603 static int unix_gid_show(struct seq_file *m,
604 			 struct cache_detail *cd,
605 			 struct cache_head *h)
606 {
607 	struct unix_gid *ug;
608 	int i;
609 	int glen;
610 
611 	if (h == NULL) {
612 		seq_puts(m, "#uid cnt: gids...\n");
613 		return 0;
614 	}
615 	ug = container_of(h, struct unix_gid, h);
616 	if (test_bit(CACHE_VALID, &h->flags) &&
617 	    !test_bit(CACHE_NEGATIVE, &h->flags))
618 		glen = ug->gi->ngroups;
619 	else
620 		glen = 0;
621 
622 	seq_printf(m, "%d %d:", ug->uid, glen);
623 	for (i = 0; i < glen; i++)
624 		seq_printf(m, " %d", GROUP_AT(ug->gi, i));
625 	seq_printf(m, "\n");
626 	return 0;
627 }
628 
629 struct cache_detail unix_gid_cache = {
630 	.owner		= THIS_MODULE,
631 	.hash_size	= GID_HASHMAX,
632 	.hash_table	= gid_table,
633 	.name		= "auth.unix.gid",
634 	.cache_put	= unix_gid_put,
635 	.cache_upcall	= unix_gid_upcall,
636 	.cache_parse	= unix_gid_parse,
637 	.cache_show	= unix_gid_show,
638 	.match		= unix_gid_match,
639 	.init		= unix_gid_init,
640 	.update		= unix_gid_update,
641 	.alloc		= unix_gid_alloc,
642 };
643 
644 static struct unix_gid *unix_gid_lookup(uid_t uid)
645 {
646 	struct unix_gid ug;
647 	struct cache_head *ch;
648 
649 	ug.uid = uid;
650 	ch = sunrpc_cache_lookup(&unix_gid_cache, &ug.h,
651 				 hash_long(uid, GID_HASHBITS));
652 	if (ch)
653 		return container_of(ch, struct unix_gid, h);
654 	else
655 		return NULL;
656 }
657 
658 static struct group_info *unix_gid_find(uid_t uid, struct svc_rqst *rqstp)
659 {
660 	struct unix_gid *ug;
661 	struct group_info *gi;
662 	int ret;
663 
664 	ug = unix_gid_lookup(uid);
665 	if (!ug)
666 		return ERR_PTR(-EAGAIN);
667 	ret = cache_check(&unix_gid_cache, &ug->h, &rqstp->rq_chandle);
668 	switch (ret) {
669 	case -ENOENT:
670 		return ERR_PTR(-ENOENT);
671 	case 0:
672 		gi = get_group_info(ug->gi);
673 		cache_put(&ug->h, &unix_gid_cache);
674 		return gi;
675 	default:
676 		return ERR_PTR(-EAGAIN);
677 	}
678 }
679 
680 int
681 svcauth_unix_set_client(struct svc_rqst *rqstp)
682 {
683 	struct sockaddr_in *sin;
684 	struct sockaddr_in6 *sin6, sin6_storage;
685 	struct ip_map *ipm;
686 	struct group_info *gi;
687 	struct svc_cred *cred = &rqstp->rq_cred;
688 
689 	switch (rqstp->rq_addr.ss_family) {
690 	case AF_INET:
691 		sin = svc_addr_in(rqstp);
692 		sin6 = &sin6_storage;
693 		ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
694 		break;
695 	case AF_INET6:
696 		sin6 = svc_addr_in6(rqstp);
697 		break;
698 	default:
699 		BUG();
700 	}
701 
702 	rqstp->rq_client = NULL;
703 	if (rqstp->rq_proc == 0)
704 		return SVC_OK;
705 
706 	ipm = ip_map_cached_get(rqstp);
707 	if (ipm == NULL)
708 		ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
709 				    &sin6->sin6_addr);
710 
711 	if (ipm == NULL)
712 		return SVC_DENIED;
713 
714 	switch (cache_check(&ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
715 		default:
716 			BUG();
717 		case -EAGAIN:
718 		case -ETIMEDOUT:
719 			return SVC_DROP;
720 		case -ENOENT:
721 			return SVC_DENIED;
722 		case 0:
723 			rqstp->rq_client = &ipm->m_client->h;
724 			kref_get(&rqstp->rq_client->ref);
725 			ip_map_cached_put(rqstp, ipm);
726 			break;
727 	}
728 
729 	gi = unix_gid_find(cred->cr_uid, rqstp);
730 	switch (PTR_ERR(gi)) {
731 	case -EAGAIN:
732 		return SVC_DROP;
733 	case -ENOENT:
734 		break;
735 	default:
736 		put_group_info(cred->cr_group_info);
737 		cred->cr_group_info = gi;
738 	}
739 	return SVC_OK;
740 }
741 
742 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
743 
744 static int
745 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
746 {
747 	struct kvec	*argv = &rqstp->rq_arg.head[0];
748 	struct kvec	*resv = &rqstp->rq_res.head[0];
749 	struct svc_cred	*cred = &rqstp->rq_cred;
750 
751 	cred->cr_group_info = NULL;
752 	rqstp->rq_client = NULL;
753 
754 	if (argv->iov_len < 3*4)
755 		return SVC_GARBAGE;
756 
757 	if (svc_getu32(argv) != 0) {
758 		dprintk("svc: bad null cred\n");
759 		*authp = rpc_autherr_badcred;
760 		return SVC_DENIED;
761 	}
762 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
763 		dprintk("svc: bad null verf\n");
764 		*authp = rpc_autherr_badverf;
765 		return SVC_DENIED;
766 	}
767 
768 	/* Signal that mapping to nobody uid/gid is required */
769 	cred->cr_uid = (uid_t) -1;
770 	cred->cr_gid = (gid_t) -1;
771 	cred->cr_group_info = groups_alloc(0);
772 	if (cred->cr_group_info == NULL)
773 		return SVC_DROP; /* kmalloc failure - client must retry */
774 
775 	/* Put NULL verifier */
776 	svc_putnl(resv, RPC_AUTH_NULL);
777 	svc_putnl(resv, 0);
778 
779 	rqstp->rq_flavor = RPC_AUTH_NULL;
780 	return SVC_OK;
781 }
782 
783 static int
784 svcauth_null_release(struct svc_rqst *rqstp)
785 {
786 	if (rqstp->rq_client)
787 		auth_domain_put(rqstp->rq_client);
788 	rqstp->rq_client = NULL;
789 	if (rqstp->rq_cred.cr_group_info)
790 		put_group_info(rqstp->rq_cred.cr_group_info);
791 	rqstp->rq_cred.cr_group_info = NULL;
792 
793 	return 0; /* don't drop */
794 }
795 
796 
797 struct auth_ops svcauth_null = {
798 	.name		= "null",
799 	.owner		= THIS_MODULE,
800 	.flavour	= RPC_AUTH_NULL,
801 	.accept 	= svcauth_null_accept,
802 	.release	= svcauth_null_release,
803 	.set_client	= svcauth_unix_set_client,
804 };
805 
806 
807 static int
808 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
809 {
810 	struct kvec	*argv = &rqstp->rq_arg.head[0];
811 	struct kvec	*resv = &rqstp->rq_res.head[0];
812 	struct svc_cred	*cred = &rqstp->rq_cred;
813 	u32		slen, i;
814 	int		len   = argv->iov_len;
815 
816 	cred->cr_group_info = NULL;
817 	rqstp->rq_client = NULL;
818 
819 	if ((len -= 3*4) < 0)
820 		return SVC_GARBAGE;
821 
822 	svc_getu32(argv);			/* length */
823 	svc_getu32(argv);			/* time stamp */
824 	slen = XDR_QUADLEN(svc_getnl(argv));	/* machname length */
825 	if (slen > 64 || (len -= (slen + 3)*4) < 0)
826 		goto badcred;
827 	argv->iov_base = (void*)((__be32*)argv->iov_base + slen);	/* skip machname */
828 	argv->iov_len -= slen*4;
829 
830 	cred->cr_uid = svc_getnl(argv);		/* uid */
831 	cred->cr_gid = svc_getnl(argv);		/* gid */
832 	slen = svc_getnl(argv);			/* gids length */
833 	if (slen > 16 || (len -= (slen + 2)*4) < 0)
834 		goto badcred;
835 	cred->cr_group_info = groups_alloc(slen);
836 	if (cred->cr_group_info == NULL)
837 		return SVC_DROP;
838 	for (i = 0; i < slen; i++)
839 		GROUP_AT(cred->cr_group_info, i) = svc_getnl(argv);
840 	if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
841 		*authp = rpc_autherr_badverf;
842 		return SVC_DENIED;
843 	}
844 
845 	/* Put NULL verifier */
846 	svc_putnl(resv, RPC_AUTH_NULL);
847 	svc_putnl(resv, 0);
848 
849 	rqstp->rq_flavor = RPC_AUTH_UNIX;
850 	return SVC_OK;
851 
852 badcred:
853 	*authp = rpc_autherr_badcred;
854 	return SVC_DENIED;
855 }
856 
857 static int
858 svcauth_unix_release(struct svc_rqst *rqstp)
859 {
860 	/* Verifier (such as it is) is already in place.
861 	 */
862 	if (rqstp->rq_client)
863 		auth_domain_put(rqstp->rq_client);
864 	rqstp->rq_client = NULL;
865 	if (rqstp->rq_cred.cr_group_info)
866 		put_group_info(rqstp->rq_cred.cr_group_info);
867 	rqstp->rq_cred.cr_group_info = NULL;
868 
869 	return 0;
870 }
871 
872 
873 struct auth_ops svcauth_unix = {
874 	.name		= "unix",
875 	.owner		= THIS_MODULE,
876 	.flavour	= RPC_AUTH_UNIX,
877 	.accept 	= svcauth_unix_accept,
878 	.release	= svcauth_unix_release,
879 	.domain_release	= svcauth_unix_domain_release,
880 	.set_client	= svcauth_unix_set_client,
881 };
882 
883