1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/types.h> 3 #include <linux/sched.h> 4 #include <linux/module.h> 5 #include <linux/sunrpc/types.h> 6 #include <linux/sunrpc/xdr.h> 7 #include <linux/sunrpc/svcsock.h> 8 #include <linux/sunrpc/svcauth.h> 9 #include <linux/sunrpc/gss_api.h> 10 #include <linux/sunrpc/addr.h> 11 #include <linux/err.h> 12 #include <linux/seq_file.h> 13 #include <linux/hash.h> 14 #include <linux/string.h> 15 #include <linux/slab.h> 16 #include <net/sock.h> 17 #include <net/ipv6.h> 18 #include <linux/kernel.h> 19 #include <linux/user_namespace.h> 20 #define RPCDBG_FACILITY RPCDBG_AUTH 21 22 23 #include "netns.h" 24 25 /* 26 * AUTHUNIX and AUTHNULL credentials are both handled here. 27 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid 28 * are always nobody (-2). i.e. we do the same IP address checks for 29 * AUTHNULL as for AUTHUNIX, and that is done here. 30 */ 31 32 33 struct unix_domain { 34 struct auth_domain h; 35 /* other stuff later */ 36 }; 37 38 extern struct auth_ops svcauth_null; 39 extern struct auth_ops svcauth_unix; 40 41 static void svcauth_unix_domain_release_rcu(struct rcu_head *head) 42 { 43 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); 44 struct unix_domain *ud = container_of(dom, struct unix_domain, h); 45 46 kfree(dom->name); 47 kfree(ud); 48 } 49 50 static void svcauth_unix_domain_release(struct auth_domain *dom) 51 { 52 call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu); 53 } 54 55 struct auth_domain *unix_domain_find(char *name) 56 { 57 struct auth_domain *rv; 58 struct unix_domain *new = NULL; 59 60 rv = auth_domain_find(name); 61 while(1) { 62 if (rv) { 63 if (new && rv != &new->h) 64 svcauth_unix_domain_release(&new->h); 65 66 if (rv->flavour != &svcauth_unix) { 67 auth_domain_put(rv); 68 return NULL; 69 } 70 return rv; 71 } 72 73 new = kmalloc(sizeof(*new), GFP_KERNEL); 74 if (new == NULL) 75 return NULL; 76 kref_init(&new->h.ref); 77 new->h.name = kstrdup(name, GFP_KERNEL); 78 if (new->h.name == NULL) { 79 kfree(new); 80 return NULL; 81 } 82 new->h.flavour = &svcauth_unix; 83 rv = auth_domain_lookup(name, &new->h); 84 } 85 } 86 EXPORT_SYMBOL_GPL(unix_domain_find); 87 88 89 /************************************************** 90 * cache for IP address to unix_domain 91 * as needed by AUTH_UNIX 92 */ 93 #define IP_HASHBITS 8 94 #define IP_HASHMAX (1<<IP_HASHBITS) 95 96 struct ip_map { 97 struct cache_head h; 98 char m_class[8]; /* e.g. "nfsd" */ 99 struct in6_addr m_addr; 100 struct unix_domain *m_client; 101 struct rcu_head m_rcu; 102 }; 103 104 static void ip_map_put(struct kref *kref) 105 { 106 struct cache_head *item = container_of(kref, struct cache_head, ref); 107 struct ip_map *im = container_of(item, struct ip_map,h); 108 109 if (test_bit(CACHE_VALID, &item->flags) && 110 !test_bit(CACHE_NEGATIVE, &item->flags)) 111 auth_domain_put(&im->m_client->h); 112 kfree_rcu(im, m_rcu); 113 } 114 115 static inline int hash_ip6(const struct in6_addr *ip) 116 { 117 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS); 118 } 119 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 120 { 121 struct ip_map *orig = container_of(corig, struct ip_map, h); 122 struct ip_map *new = container_of(cnew, struct ip_map, h); 123 return strcmp(orig->m_class, new->m_class) == 0 && 124 ipv6_addr_equal(&orig->m_addr, &new->m_addr); 125 } 126 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 127 { 128 struct ip_map *new = container_of(cnew, struct ip_map, h); 129 struct ip_map *item = container_of(citem, struct ip_map, h); 130 131 strcpy(new->m_class, item->m_class); 132 new->m_addr = item->m_addr; 133 } 134 static void update(struct cache_head *cnew, struct cache_head *citem) 135 { 136 struct ip_map *new = container_of(cnew, struct ip_map, h); 137 struct ip_map *item = container_of(citem, struct ip_map, h); 138 139 kref_get(&item->m_client->h.ref); 140 new->m_client = item->m_client; 141 } 142 static struct cache_head *ip_map_alloc(void) 143 { 144 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); 145 if (i) 146 return &i->h; 147 else 148 return NULL; 149 } 150 151 static void ip_map_request(struct cache_detail *cd, 152 struct cache_head *h, 153 char **bpp, int *blen) 154 { 155 char text_addr[40]; 156 struct ip_map *im = container_of(h, struct ip_map, h); 157 158 if (ipv6_addr_v4mapped(&(im->m_addr))) { 159 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]); 160 } else { 161 snprintf(text_addr, 40, "%pI6", &im->m_addr); 162 } 163 qword_add(bpp, blen, im->m_class); 164 qword_add(bpp, blen, text_addr); 165 (*bpp)[-1] = '\n'; 166 } 167 168 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); 169 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry); 170 171 static int ip_map_parse(struct cache_detail *cd, 172 char *mesg, int mlen) 173 { 174 /* class ipaddress [domainname] */ 175 /* should be safe just to use the start of the input buffer 176 * for scratch: */ 177 char *buf = mesg; 178 int len; 179 char class[8]; 180 union { 181 struct sockaddr sa; 182 struct sockaddr_in s4; 183 struct sockaddr_in6 s6; 184 } address; 185 struct sockaddr_in6 sin6; 186 int err; 187 188 struct ip_map *ipmp; 189 struct auth_domain *dom; 190 time64_t expiry; 191 192 if (mesg[mlen-1] != '\n') 193 return -EINVAL; 194 mesg[mlen-1] = 0; 195 196 /* class */ 197 len = qword_get(&mesg, class, sizeof(class)); 198 if (len <= 0) return -EINVAL; 199 200 /* ip address */ 201 len = qword_get(&mesg, buf, mlen); 202 if (len <= 0) return -EINVAL; 203 204 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0) 205 return -EINVAL; 206 switch (address.sa.sa_family) { 207 case AF_INET: 208 /* Form a mapped IPv4 address in sin6 */ 209 sin6.sin6_family = AF_INET6; 210 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr, 211 &sin6.sin6_addr); 212 break; 213 #if IS_ENABLED(CONFIG_IPV6) 214 case AF_INET6: 215 memcpy(&sin6, &address.s6, sizeof(sin6)); 216 break; 217 #endif 218 default: 219 return -EINVAL; 220 } 221 222 expiry = get_expiry(&mesg); 223 if (expiry ==0) 224 return -EINVAL; 225 226 /* domainname, or empty for NEGATIVE */ 227 len = qword_get(&mesg, buf, mlen); 228 if (len < 0) return -EINVAL; 229 230 if (len) { 231 dom = unix_domain_find(buf); 232 if (dom == NULL) 233 return -ENOENT; 234 } else 235 dom = NULL; 236 237 /* IPv6 scope IDs are ignored for now */ 238 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr); 239 if (ipmp) { 240 err = __ip_map_update(cd, ipmp, 241 container_of(dom, struct unix_domain, h), 242 expiry); 243 } else 244 err = -ENOMEM; 245 246 if (dom) 247 auth_domain_put(dom); 248 249 cache_flush(); 250 return err; 251 } 252 253 static int ip_map_show(struct seq_file *m, 254 struct cache_detail *cd, 255 struct cache_head *h) 256 { 257 struct ip_map *im; 258 struct in6_addr addr; 259 char *dom = "-no-domain-"; 260 261 if (h == NULL) { 262 seq_puts(m, "#class IP domain\n"); 263 return 0; 264 } 265 im = container_of(h, struct ip_map, h); 266 /* class addr domain */ 267 addr = im->m_addr; 268 269 if (test_bit(CACHE_VALID, &h->flags) && 270 !test_bit(CACHE_NEGATIVE, &h->flags)) 271 dom = im->m_client->h.name; 272 273 if (ipv6_addr_v4mapped(&addr)) { 274 seq_printf(m, "%s %pI4 %s\n", 275 im->m_class, &addr.s6_addr32[3], dom); 276 } else { 277 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom); 278 } 279 return 0; 280 } 281 282 283 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, 284 struct in6_addr *addr) 285 { 286 struct ip_map ip; 287 struct cache_head *ch; 288 289 strcpy(ip.m_class, class); 290 ip.m_addr = *addr; 291 ch = sunrpc_cache_lookup_rcu(cd, &ip.h, 292 hash_str(class, IP_HASHBITS) ^ 293 hash_ip6(addr)); 294 295 if (ch) 296 return container_of(ch, struct ip_map, h); 297 else 298 return NULL; 299 } 300 301 static inline struct ip_map *ip_map_lookup(struct net *net, char *class, 302 struct in6_addr *addr) 303 { 304 struct sunrpc_net *sn; 305 306 sn = net_generic(net, sunrpc_net_id); 307 return __ip_map_lookup(sn->ip_map_cache, class, addr); 308 } 309 310 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, 311 struct unix_domain *udom, time64_t expiry) 312 { 313 struct ip_map ip; 314 struct cache_head *ch; 315 316 ip.m_client = udom; 317 ip.h.flags = 0; 318 if (!udom) 319 set_bit(CACHE_NEGATIVE, &ip.h.flags); 320 ip.h.expiry_time = expiry; 321 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, 322 hash_str(ipm->m_class, IP_HASHBITS) ^ 323 hash_ip6(&ipm->m_addr)); 324 if (!ch) 325 return -ENOMEM; 326 cache_put(ch, cd); 327 return 0; 328 } 329 330 static inline int ip_map_update(struct net *net, struct ip_map *ipm, 331 struct unix_domain *udom, time64_t expiry) 332 { 333 struct sunrpc_net *sn; 334 335 sn = net_generic(net, sunrpc_net_id); 336 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); 337 } 338 339 void svcauth_unix_purge(struct net *net) 340 { 341 struct sunrpc_net *sn; 342 343 sn = net_generic(net, sunrpc_net_id); 344 cache_purge(sn->ip_map_cache); 345 } 346 EXPORT_SYMBOL_GPL(svcauth_unix_purge); 347 348 static inline struct ip_map * 349 ip_map_cached_get(struct svc_xprt *xprt) 350 { 351 struct ip_map *ipm = NULL; 352 struct sunrpc_net *sn; 353 354 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 355 spin_lock(&xprt->xpt_lock); 356 ipm = xprt->xpt_auth_cache; 357 if (ipm != NULL) { 358 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 359 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) { 360 /* 361 * The entry has been invalidated since it was 362 * remembered, e.g. by a second mount from the 363 * same IP address. 364 */ 365 xprt->xpt_auth_cache = NULL; 366 spin_unlock(&xprt->xpt_lock); 367 cache_put(&ipm->h, sn->ip_map_cache); 368 return NULL; 369 } 370 cache_get(&ipm->h); 371 } 372 spin_unlock(&xprt->xpt_lock); 373 } 374 return ipm; 375 } 376 377 static inline void 378 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm) 379 { 380 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 381 spin_lock(&xprt->xpt_lock); 382 if (xprt->xpt_auth_cache == NULL) { 383 /* newly cached, keep the reference */ 384 xprt->xpt_auth_cache = ipm; 385 ipm = NULL; 386 } 387 spin_unlock(&xprt->xpt_lock); 388 } 389 if (ipm) { 390 struct sunrpc_net *sn; 391 392 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 393 cache_put(&ipm->h, sn->ip_map_cache); 394 } 395 } 396 397 void 398 svcauth_unix_info_release(struct svc_xprt *xpt) 399 { 400 struct ip_map *ipm; 401 402 ipm = xpt->xpt_auth_cache; 403 if (ipm != NULL) { 404 struct sunrpc_net *sn; 405 406 sn = net_generic(xpt->xpt_net, sunrpc_net_id); 407 cache_put(&ipm->h, sn->ip_map_cache); 408 } 409 } 410 411 /**************************************************************************** 412 * auth.unix.gid cache 413 * simple cache to map a UID to a list of GIDs 414 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS 415 */ 416 #define GID_HASHBITS 8 417 #define GID_HASHMAX (1<<GID_HASHBITS) 418 419 struct unix_gid { 420 struct cache_head h; 421 kuid_t uid; 422 struct group_info *gi; 423 struct rcu_head rcu; 424 }; 425 426 static int unix_gid_hash(kuid_t uid) 427 { 428 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); 429 } 430 431 static void unix_gid_put(struct kref *kref) 432 { 433 struct cache_head *item = container_of(kref, struct cache_head, ref); 434 struct unix_gid *ug = container_of(item, struct unix_gid, h); 435 if (test_bit(CACHE_VALID, &item->flags) && 436 !test_bit(CACHE_NEGATIVE, &item->flags)) 437 put_group_info(ug->gi); 438 kfree_rcu(ug, rcu); 439 } 440 441 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) 442 { 443 struct unix_gid *orig = container_of(corig, struct unix_gid, h); 444 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 445 return uid_eq(orig->uid, new->uid); 446 } 447 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) 448 { 449 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 450 struct unix_gid *item = container_of(citem, struct unix_gid, h); 451 new->uid = item->uid; 452 } 453 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem) 454 { 455 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 456 struct unix_gid *item = container_of(citem, struct unix_gid, h); 457 458 get_group_info(item->gi); 459 new->gi = item->gi; 460 } 461 static struct cache_head *unix_gid_alloc(void) 462 { 463 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL); 464 if (g) 465 return &g->h; 466 else 467 return NULL; 468 } 469 470 static void unix_gid_request(struct cache_detail *cd, 471 struct cache_head *h, 472 char **bpp, int *blen) 473 { 474 char tuid[20]; 475 struct unix_gid *ug = container_of(h, struct unix_gid, h); 476 477 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid)); 478 qword_add(bpp, blen, tuid); 479 (*bpp)[-1] = '\n'; 480 } 481 482 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); 483 484 static int unix_gid_parse(struct cache_detail *cd, 485 char *mesg, int mlen) 486 { 487 /* uid expiry Ngid gid0 gid1 ... gidN-1 */ 488 int id; 489 kuid_t uid; 490 int gids; 491 int rv; 492 int i; 493 int err; 494 time64_t expiry; 495 struct unix_gid ug, *ugp; 496 497 if (mesg[mlen - 1] != '\n') 498 return -EINVAL; 499 mesg[mlen-1] = 0; 500 501 rv = get_int(&mesg, &id); 502 if (rv) 503 return -EINVAL; 504 uid = make_kuid(current_user_ns(), id); 505 ug.uid = uid; 506 507 expiry = get_expiry(&mesg); 508 if (expiry == 0) 509 return -EINVAL; 510 511 rv = get_int(&mesg, &gids); 512 if (rv || gids < 0 || gids > 8192) 513 return -EINVAL; 514 515 ug.gi = groups_alloc(gids); 516 if (!ug.gi) 517 return -ENOMEM; 518 519 for (i = 0 ; i < gids ; i++) { 520 int gid; 521 kgid_t kgid; 522 rv = get_int(&mesg, &gid); 523 err = -EINVAL; 524 if (rv) 525 goto out; 526 kgid = make_kgid(current_user_ns(), gid); 527 if (!gid_valid(kgid)) 528 goto out; 529 ug.gi->gid[i] = kgid; 530 } 531 532 groups_sort(ug.gi); 533 ugp = unix_gid_lookup(cd, uid); 534 if (ugp) { 535 struct cache_head *ch; 536 ug.h.flags = 0; 537 ug.h.expiry_time = expiry; 538 ch = sunrpc_cache_update(cd, 539 &ug.h, &ugp->h, 540 unix_gid_hash(uid)); 541 if (!ch) 542 err = -ENOMEM; 543 else { 544 err = 0; 545 cache_put(ch, cd); 546 } 547 } else 548 err = -ENOMEM; 549 out: 550 if (ug.gi) 551 put_group_info(ug.gi); 552 return err; 553 } 554 555 static int unix_gid_show(struct seq_file *m, 556 struct cache_detail *cd, 557 struct cache_head *h) 558 { 559 struct user_namespace *user_ns = m->file->f_cred->user_ns; 560 struct unix_gid *ug; 561 int i; 562 int glen; 563 564 if (h == NULL) { 565 seq_puts(m, "#uid cnt: gids...\n"); 566 return 0; 567 } 568 ug = container_of(h, struct unix_gid, h); 569 if (test_bit(CACHE_VALID, &h->flags) && 570 !test_bit(CACHE_NEGATIVE, &h->flags)) 571 glen = ug->gi->ngroups; 572 else 573 glen = 0; 574 575 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen); 576 for (i = 0; i < glen; i++) 577 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i])); 578 seq_printf(m, "\n"); 579 return 0; 580 } 581 582 static const struct cache_detail unix_gid_cache_template = { 583 .owner = THIS_MODULE, 584 .hash_size = GID_HASHMAX, 585 .name = "auth.unix.gid", 586 .cache_put = unix_gid_put, 587 .cache_request = unix_gid_request, 588 .cache_parse = unix_gid_parse, 589 .cache_show = unix_gid_show, 590 .match = unix_gid_match, 591 .init = unix_gid_init, 592 .update = unix_gid_update, 593 .alloc = unix_gid_alloc, 594 }; 595 596 int unix_gid_cache_create(struct net *net) 597 { 598 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 599 struct cache_detail *cd; 600 int err; 601 602 cd = cache_create_net(&unix_gid_cache_template, net); 603 if (IS_ERR(cd)) 604 return PTR_ERR(cd); 605 err = cache_register_net(cd, net); 606 if (err) { 607 cache_destroy_net(cd, net); 608 return err; 609 } 610 sn->unix_gid_cache = cd; 611 return 0; 612 } 613 614 void unix_gid_cache_destroy(struct net *net) 615 { 616 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 617 struct cache_detail *cd = sn->unix_gid_cache; 618 619 sn->unix_gid_cache = NULL; 620 cache_purge(cd); 621 cache_unregister_net(cd, net); 622 cache_destroy_net(cd, net); 623 } 624 625 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid) 626 { 627 struct unix_gid ug; 628 struct cache_head *ch; 629 630 ug.uid = uid; 631 ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid)); 632 if (ch) 633 return container_of(ch, struct unix_gid, h); 634 else 635 return NULL; 636 } 637 638 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) 639 { 640 struct unix_gid *ug; 641 struct group_info *gi; 642 int ret; 643 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 644 sunrpc_net_id); 645 646 ug = unix_gid_lookup(sn->unix_gid_cache, uid); 647 if (!ug) 648 return ERR_PTR(-EAGAIN); 649 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle); 650 switch (ret) { 651 case -ENOENT: 652 return ERR_PTR(-ENOENT); 653 case -ETIMEDOUT: 654 return ERR_PTR(-ESHUTDOWN); 655 case 0: 656 gi = get_group_info(ug->gi); 657 cache_put(&ug->h, sn->unix_gid_cache); 658 return gi; 659 default: 660 return ERR_PTR(-EAGAIN); 661 } 662 } 663 664 int 665 svcauth_unix_set_client(struct svc_rqst *rqstp) 666 { 667 struct sockaddr_in *sin; 668 struct sockaddr_in6 *sin6, sin6_storage; 669 struct ip_map *ipm; 670 struct group_info *gi; 671 struct svc_cred *cred = &rqstp->rq_cred; 672 struct svc_xprt *xprt = rqstp->rq_xprt; 673 struct net *net = xprt->xpt_net; 674 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 675 676 switch (rqstp->rq_addr.ss_family) { 677 case AF_INET: 678 sin = svc_addr_in(rqstp); 679 sin6 = &sin6_storage; 680 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr); 681 break; 682 case AF_INET6: 683 sin6 = svc_addr_in6(rqstp); 684 break; 685 default: 686 BUG(); 687 } 688 689 rqstp->rq_client = NULL; 690 if (rqstp->rq_proc == 0) 691 return SVC_OK; 692 693 ipm = ip_map_cached_get(xprt); 694 if (ipm == NULL) 695 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class, 696 &sin6->sin6_addr); 697 698 if (ipm == NULL) 699 return SVC_DENIED; 700 701 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { 702 default: 703 BUG(); 704 case -ETIMEDOUT: 705 return SVC_CLOSE; 706 case -EAGAIN: 707 return SVC_DROP; 708 case -ENOENT: 709 return SVC_DENIED; 710 case 0: 711 rqstp->rq_client = &ipm->m_client->h; 712 kref_get(&rqstp->rq_client->ref); 713 ip_map_cached_put(xprt, ipm); 714 break; 715 } 716 717 gi = unix_gid_find(cred->cr_uid, rqstp); 718 switch (PTR_ERR(gi)) { 719 case -EAGAIN: 720 return SVC_DROP; 721 case -ESHUTDOWN: 722 return SVC_CLOSE; 723 case -ENOENT: 724 break; 725 default: 726 put_group_info(cred->cr_group_info); 727 cred->cr_group_info = gi; 728 } 729 return SVC_OK; 730 } 731 732 EXPORT_SYMBOL_GPL(svcauth_unix_set_client); 733 734 static int 735 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) 736 { 737 struct kvec *argv = &rqstp->rq_arg.head[0]; 738 struct kvec *resv = &rqstp->rq_res.head[0]; 739 struct svc_cred *cred = &rqstp->rq_cred; 740 741 if (argv->iov_len < 3*4) 742 return SVC_GARBAGE; 743 744 if (svc_getu32(argv) != 0) { 745 dprintk("svc: bad null cred\n"); 746 *authp = rpc_autherr_badcred; 747 return SVC_DENIED; 748 } 749 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 750 dprintk("svc: bad null verf\n"); 751 *authp = rpc_autherr_badverf; 752 return SVC_DENIED; 753 } 754 755 /* Signal that mapping to nobody uid/gid is required */ 756 cred->cr_uid = INVALID_UID; 757 cred->cr_gid = INVALID_GID; 758 cred->cr_group_info = groups_alloc(0); 759 if (cred->cr_group_info == NULL) 760 return SVC_CLOSE; /* kmalloc failure - client must retry */ 761 762 /* Put NULL verifier */ 763 svc_putnl(resv, RPC_AUTH_NULL); 764 svc_putnl(resv, 0); 765 766 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL; 767 return SVC_OK; 768 } 769 770 static int 771 svcauth_null_release(struct svc_rqst *rqstp) 772 { 773 if (rqstp->rq_client) 774 auth_domain_put(rqstp->rq_client); 775 rqstp->rq_client = NULL; 776 if (rqstp->rq_cred.cr_group_info) 777 put_group_info(rqstp->rq_cred.cr_group_info); 778 rqstp->rq_cred.cr_group_info = NULL; 779 780 return 0; /* don't drop */ 781 } 782 783 784 struct auth_ops svcauth_null = { 785 .name = "null", 786 .owner = THIS_MODULE, 787 .flavour = RPC_AUTH_NULL, 788 .accept = svcauth_null_accept, 789 .release = svcauth_null_release, 790 .set_client = svcauth_unix_set_client, 791 }; 792 793 794 static int 795 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) 796 { 797 struct kvec *argv = &rqstp->rq_arg.head[0]; 798 struct kvec *resv = &rqstp->rq_res.head[0]; 799 struct svc_cred *cred = &rqstp->rq_cred; 800 struct user_namespace *userns; 801 u32 slen, i; 802 int len = argv->iov_len; 803 804 if ((len -= 3*4) < 0) 805 return SVC_GARBAGE; 806 807 svc_getu32(argv); /* length */ 808 svc_getu32(argv); /* time stamp */ 809 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */ 810 if (slen > 64 || (len -= (slen + 3)*4) < 0) 811 goto badcred; 812 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ 813 argv->iov_len -= slen*4; 814 /* 815 * Note: we skip uid_valid()/gid_valid() checks here for 816 * backwards compatibility with clients that use -1 id's. 817 * Instead, -1 uid or gid is later mapped to the 818 * (export-specific) anonymous id by nfsd_setuser. 819 * Supplementary gid's will be left alone. 820 */ 821 userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ? 822 rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns; 823 cred->cr_uid = make_kuid(userns, svc_getnl(argv)); /* uid */ 824 cred->cr_gid = make_kgid(userns, svc_getnl(argv)); /* gid */ 825 slen = svc_getnl(argv); /* gids length */ 826 if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0) 827 goto badcred; 828 cred->cr_group_info = groups_alloc(slen); 829 if (cred->cr_group_info == NULL) 830 return SVC_CLOSE; 831 for (i = 0; i < slen; i++) { 832 kgid_t kgid = make_kgid(userns, svc_getnl(argv)); 833 cred->cr_group_info->gid[i] = kgid; 834 } 835 groups_sort(cred->cr_group_info); 836 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 837 *authp = rpc_autherr_badverf; 838 return SVC_DENIED; 839 } 840 841 /* Put NULL verifier */ 842 svc_putnl(resv, RPC_AUTH_NULL); 843 svc_putnl(resv, 0); 844 845 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX; 846 return SVC_OK; 847 848 badcred: 849 *authp = rpc_autherr_badcred; 850 return SVC_DENIED; 851 } 852 853 static int 854 svcauth_unix_release(struct svc_rqst *rqstp) 855 { 856 /* Verifier (such as it is) is already in place. 857 */ 858 if (rqstp->rq_client) 859 auth_domain_put(rqstp->rq_client); 860 rqstp->rq_client = NULL; 861 if (rqstp->rq_cred.cr_group_info) 862 put_group_info(rqstp->rq_cred.cr_group_info); 863 rqstp->rq_cred.cr_group_info = NULL; 864 865 return 0; 866 } 867 868 869 struct auth_ops svcauth_unix = { 870 .name = "unix", 871 .owner = THIS_MODULE, 872 .flavour = RPC_AUTH_UNIX, 873 .accept = svcauth_unix_accept, 874 .release = svcauth_unix_release, 875 .domain_release = svcauth_unix_domain_release, 876 .set_client = svcauth_unix_set_client, 877 }; 878 879 static const struct cache_detail ip_map_cache_template = { 880 .owner = THIS_MODULE, 881 .hash_size = IP_HASHMAX, 882 .name = "auth.unix.ip", 883 .cache_put = ip_map_put, 884 .cache_request = ip_map_request, 885 .cache_parse = ip_map_parse, 886 .cache_show = ip_map_show, 887 .match = ip_map_match, 888 .init = ip_map_init, 889 .update = update, 890 .alloc = ip_map_alloc, 891 }; 892 893 int ip_map_cache_create(struct net *net) 894 { 895 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 896 struct cache_detail *cd; 897 int err; 898 899 cd = cache_create_net(&ip_map_cache_template, net); 900 if (IS_ERR(cd)) 901 return PTR_ERR(cd); 902 err = cache_register_net(cd, net); 903 if (err) { 904 cache_destroy_net(cd, net); 905 return err; 906 } 907 sn->ip_map_cache = cd; 908 return 0; 909 } 910 911 void ip_map_cache_destroy(struct net *net) 912 { 913 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 914 struct cache_detail *cd = sn->ip_map_cache; 915 916 sn->ip_map_cache = NULL; 917 cache_purge(cd); 918 cache_unregister_net(cd, net); 919 cache_destroy_net(cd, net); 920 } 921