1 #include <linux/types.h> 2 #include <linux/sched.h> 3 #include <linux/module.h> 4 #include <linux/sunrpc/types.h> 5 #include <linux/sunrpc/xdr.h> 6 #include <linux/sunrpc/svcsock.h> 7 #include <linux/sunrpc/svcauth.h> 8 #include <linux/sunrpc/gss_api.h> 9 #include <linux/sunrpc/addr.h> 10 #include <linux/err.h> 11 #include <linux/seq_file.h> 12 #include <linux/hash.h> 13 #include <linux/string.h> 14 #include <linux/slab.h> 15 #include <net/sock.h> 16 #include <net/ipv6.h> 17 #include <linux/kernel.h> 18 #include <linux/user_namespace.h> 19 #define RPCDBG_FACILITY RPCDBG_AUTH 20 21 22 #include "netns.h" 23 24 /* 25 * AUTHUNIX and AUTHNULL credentials are both handled here. 26 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid 27 * are always nobody (-2). i.e. we do the same IP address checks for 28 * AUTHNULL as for AUTHUNIX, and that is done here. 29 */ 30 31 32 struct unix_domain { 33 struct auth_domain h; 34 /* other stuff later */ 35 }; 36 37 extern struct auth_ops svcauth_null; 38 extern struct auth_ops svcauth_unix; 39 40 static void svcauth_unix_domain_release_rcu(struct rcu_head *head) 41 { 42 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); 43 struct unix_domain *ud = container_of(dom, struct unix_domain, h); 44 45 kfree(dom->name); 46 kfree(ud); 47 } 48 49 static void svcauth_unix_domain_release(struct auth_domain *dom) 50 { 51 call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu); 52 } 53 54 struct auth_domain *unix_domain_find(char *name) 55 { 56 struct auth_domain *rv; 57 struct unix_domain *new = NULL; 58 59 rv = auth_domain_find(name); 60 while(1) { 61 if (rv) { 62 if (new && rv != &new->h) 63 svcauth_unix_domain_release(&new->h); 64 65 if (rv->flavour != &svcauth_unix) { 66 auth_domain_put(rv); 67 return NULL; 68 } 69 return rv; 70 } 71 72 new = kmalloc(sizeof(*new), GFP_KERNEL); 73 if (new == NULL) 74 return NULL; 75 kref_init(&new->h.ref); 76 new->h.name = kstrdup(name, GFP_KERNEL); 77 if (new->h.name == NULL) { 78 kfree(new); 79 return NULL; 80 } 81 new->h.flavour = &svcauth_unix; 82 rv = auth_domain_lookup(name, &new->h); 83 } 84 } 85 EXPORT_SYMBOL_GPL(unix_domain_find); 86 87 88 /************************************************** 89 * cache for IP address to unix_domain 90 * as needed by AUTH_UNIX 91 */ 92 #define IP_HASHBITS 8 93 #define IP_HASHMAX (1<<IP_HASHBITS) 94 95 struct ip_map { 96 struct cache_head h; 97 char m_class[8]; /* e.g. "nfsd" */ 98 struct in6_addr m_addr; 99 struct unix_domain *m_client; 100 struct rcu_head m_rcu; 101 }; 102 103 static void ip_map_put(struct kref *kref) 104 { 105 struct cache_head *item = container_of(kref, struct cache_head, ref); 106 struct ip_map *im = container_of(item, struct ip_map,h); 107 108 if (test_bit(CACHE_VALID, &item->flags) && 109 !test_bit(CACHE_NEGATIVE, &item->flags)) 110 auth_domain_put(&im->m_client->h); 111 kfree_rcu(im, m_rcu); 112 } 113 114 static inline int hash_ip6(const struct in6_addr *ip) 115 { 116 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS); 117 } 118 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 119 { 120 struct ip_map *orig = container_of(corig, struct ip_map, h); 121 struct ip_map *new = container_of(cnew, struct ip_map, h); 122 return strcmp(orig->m_class, new->m_class) == 0 && 123 ipv6_addr_equal(&orig->m_addr, &new->m_addr); 124 } 125 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 126 { 127 struct ip_map *new = container_of(cnew, struct ip_map, h); 128 struct ip_map *item = container_of(citem, struct ip_map, h); 129 130 strcpy(new->m_class, item->m_class); 131 new->m_addr = item->m_addr; 132 } 133 static void update(struct cache_head *cnew, struct cache_head *citem) 134 { 135 struct ip_map *new = container_of(cnew, struct ip_map, h); 136 struct ip_map *item = container_of(citem, struct ip_map, h); 137 138 kref_get(&item->m_client->h.ref); 139 new->m_client = item->m_client; 140 } 141 static struct cache_head *ip_map_alloc(void) 142 { 143 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); 144 if (i) 145 return &i->h; 146 else 147 return NULL; 148 } 149 150 static void ip_map_request(struct cache_detail *cd, 151 struct cache_head *h, 152 char **bpp, int *blen) 153 { 154 char text_addr[40]; 155 struct ip_map *im = container_of(h, struct ip_map, h); 156 157 if (ipv6_addr_v4mapped(&(im->m_addr))) { 158 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]); 159 } else { 160 snprintf(text_addr, 40, "%pI6", &im->m_addr); 161 } 162 qword_add(bpp, blen, im->m_class); 163 qword_add(bpp, blen, text_addr); 164 (*bpp)[-1] = '\n'; 165 } 166 167 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); 168 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry); 169 170 static int ip_map_parse(struct cache_detail *cd, 171 char *mesg, int mlen) 172 { 173 /* class ipaddress [domainname] */ 174 /* should be safe just to use the start of the input buffer 175 * for scratch: */ 176 char *buf = mesg; 177 int len; 178 char class[8]; 179 union { 180 struct sockaddr sa; 181 struct sockaddr_in s4; 182 struct sockaddr_in6 s6; 183 } address; 184 struct sockaddr_in6 sin6; 185 int err; 186 187 struct ip_map *ipmp; 188 struct auth_domain *dom; 189 time_t expiry; 190 191 if (mesg[mlen-1] != '\n') 192 return -EINVAL; 193 mesg[mlen-1] = 0; 194 195 /* class */ 196 len = qword_get(&mesg, class, sizeof(class)); 197 if (len <= 0) return -EINVAL; 198 199 /* ip address */ 200 len = qword_get(&mesg, buf, mlen); 201 if (len <= 0) return -EINVAL; 202 203 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0) 204 return -EINVAL; 205 switch (address.sa.sa_family) { 206 case AF_INET: 207 /* Form a mapped IPv4 address in sin6 */ 208 sin6.sin6_family = AF_INET6; 209 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr, 210 &sin6.sin6_addr); 211 break; 212 #if IS_ENABLED(CONFIG_IPV6) 213 case AF_INET6: 214 memcpy(&sin6, &address.s6, sizeof(sin6)); 215 break; 216 #endif 217 default: 218 return -EINVAL; 219 } 220 221 expiry = get_expiry(&mesg); 222 if (expiry ==0) 223 return -EINVAL; 224 225 /* domainname, or empty for NEGATIVE */ 226 len = qword_get(&mesg, buf, mlen); 227 if (len < 0) return -EINVAL; 228 229 if (len) { 230 dom = unix_domain_find(buf); 231 if (dom == NULL) 232 return -ENOENT; 233 } else 234 dom = NULL; 235 236 /* IPv6 scope IDs are ignored for now */ 237 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr); 238 if (ipmp) { 239 err = __ip_map_update(cd, ipmp, 240 container_of(dom, struct unix_domain, h), 241 expiry); 242 } else 243 err = -ENOMEM; 244 245 if (dom) 246 auth_domain_put(dom); 247 248 cache_flush(); 249 return err; 250 } 251 252 static int ip_map_show(struct seq_file *m, 253 struct cache_detail *cd, 254 struct cache_head *h) 255 { 256 struct ip_map *im; 257 struct in6_addr addr; 258 char *dom = "-no-domain-"; 259 260 if (h == NULL) { 261 seq_puts(m, "#class IP domain\n"); 262 return 0; 263 } 264 im = container_of(h, struct ip_map, h); 265 /* class addr domain */ 266 addr = im->m_addr; 267 268 if (test_bit(CACHE_VALID, &h->flags) && 269 !test_bit(CACHE_NEGATIVE, &h->flags)) 270 dom = im->m_client->h.name; 271 272 if (ipv6_addr_v4mapped(&addr)) { 273 seq_printf(m, "%s %pI4 %s\n", 274 im->m_class, &addr.s6_addr32[3], dom); 275 } else { 276 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom); 277 } 278 return 0; 279 } 280 281 282 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, 283 struct in6_addr *addr) 284 { 285 struct ip_map ip; 286 struct cache_head *ch; 287 288 strcpy(ip.m_class, class); 289 ip.m_addr = *addr; 290 ch = sunrpc_cache_lookup_rcu(cd, &ip.h, 291 hash_str(class, IP_HASHBITS) ^ 292 hash_ip6(addr)); 293 294 if (ch) 295 return container_of(ch, struct ip_map, h); 296 else 297 return NULL; 298 } 299 300 static inline struct ip_map *ip_map_lookup(struct net *net, char *class, 301 struct in6_addr *addr) 302 { 303 struct sunrpc_net *sn; 304 305 sn = net_generic(net, sunrpc_net_id); 306 return __ip_map_lookup(sn->ip_map_cache, class, addr); 307 } 308 309 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, 310 struct unix_domain *udom, time_t expiry) 311 { 312 struct ip_map ip; 313 struct cache_head *ch; 314 315 ip.m_client = udom; 316 ip.h.flags = 0; 317 if (!udom) 318 set_bit(CACHE_NEGATIVE, &ip.h.flags); 319 ip.h.expiry_time = expiry; 320 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, 321 hash_str(ipm->m_class, IP_HASHBITS) ^ 322 hash_ip6(&ipm->m_addr)); 323 if (!ch) 324 return -ENOMEM; 325 cache_put(ch, cd); 326 return 0; 327 } 328 329 static inline int ip_map_update(struct net *net, struct ip_map *ipm, 330 struct unix_domain *udom, time_t expiry) 331 { 332 struct sunrpc_net *sn; 333 334 sn = net_generic(net, sunrpc_net_id); 335 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); 336 } 337 338 void svcauth_unix_purge(struct net *net) 339 { 340 struct sunrpc_net *sn; 341 342 sn = net_generic(net, sunrpc_net_id); 343 cache_purge(sn->ip_map_cache); 344 } 345 EXPORT_SYMBOL_GPL(svcauth_unix_purge); 346 347 static inline struct ip_map * 348 ip_map_cached_get(struct svc_xprt *xprt) 349 { 350 struct ip_map *ipm = NULL; 351 struct sunrpc_net *sn; 352 353 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 354 spin_lock(&xprt->xpt_lock); 355 ipm = xprt->xpt_auth_cache; 356 if (ipm != NULL) { 357 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 358 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) { 359 /* 360 * The entry has been invalidated since it was 361 * remembered, e.g. by a second mount from the 362 * same IP address. 363 */ 364 xprt->xpt_auth_cache = NULL; 365 spin_unlock(&xprt->xpt_lock); 366 cache_put(&ipm->h, sn->ip_map_cache); 367 return NULL; 368 } 369 cache_get(&ipm->h); 370 } 371 spin_unlock(&xprt->xpt_lock); 372 } 373 return ipm; 374 } 375 376 static inline void 377 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm) 378 { 379 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 380 spin_lock(&xprt->xpt_lock); 381 if (xprt->xpt_auth_cache == NULL) { 382 /* newly cached, keep the reference */ 383 xprt->xpt_auth_cache = ipm; 384 ipm = NULL; 385 } 386 spin_unlock(&xprt->xpt_lock); 387 } 388 if (ipm) { 389 struct sunrpc_net *sn; 390 391 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 392 cache_put(&ipm->h, sn->ip_map_cache); 393 } 394 } 395 396 void 397 svcauth_unix_info_release(struct svc_xprt *xpt) 398 { 399 struct ip_map *ipm; 400 401 ipm = xpt->xpt_auth_cache; 402 if (ipm != NULL) { 403 struct sunrpc_net *sn; 404 405 sn = net_generic(xpt->xpt_net, sunrpc_net_id); 406 cache_put(&ipm->h, sn->ip_map_cache); 407 } 408 } 409 410 /**************************************************************************** 411 * auth.unix.gid cache 412 * simple cache to map a UID to a list of GIDs 413 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS 414 */ 415 #define GID_HASHBITS 8 416 #define GID_HASHMAX (1<<GID_HASHBITS) 417 418 struct unix_gid { 419 struct cache_head h; 420 kuid_t uid; 421 struct group_info *gi; 422 struct rcu_head rcu; 423 }; 424 425 static int unix_gid_hash(kuid_t uid) 426 { 427 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); 428 } 429 430 static void unix_gid_put(struct kref *kref) 431 { 432 struct cache_head *item = container_of(kref, struct cache_head, ref); 433 struct unix_gid *ug = container_of(item, struct unix_gid, h); 434 if (test_bit(CACHE_VALID, &item->flags) && 435 !test_bit(CACHE_NEGATIVE, &item->flags)) 436 put_group_info(ug->gi); 437 kfree_rcu(ug, rcu); 438 } 439 440 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) 441 { 442 struct unix_gid *orig = container_of(corig, struct unix_gid, h); 443 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 444 return uid_eq(orig->uid, new->uid); 445 } 446 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) 447 { 448 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 449 struct unix_gid *item = container_of(citem, struct unix_gid, h); 450 new->uid = item->uid; 451 } 452 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem) 453 { 454 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 455 struct unix_gid *item = container_of(citem, struct unix_gid, h); 456 457 get_group_info(item->gi); 458 new->gi = item->gi; 459 } 460 static struct cache_head *unix_gid_alloc(void) 461 { 462 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL); 463 if (g) 464 return &g->h; 465 else 466 return NULL; 467 } 468 469 static void unix_gid_request(struct cache_detail *cd, 470 struct cache_head *h, 471 char **bpp, int *blen) 472 { 473 char tuid[20]; 474 struct unix_gid *ug = container_of(h, struct unix_gid, h); 475 476 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid)); 477 qword_add(bpp, blen, tuid); 478 (*bpp)[-1] = '\n'; 479 } 480 481 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); 482 483 static int unix_gid_parse(struct cache_detail *cd, 484 char *mesg, int mlen) 485 { 486 /* uid expiry Ngid gid0 gid1 ... gidN-1 */ 487 int id; 488 kuid_t uid; 489 int gids; 490 int rv; 491 int i; 492 int err; 493 time_t expiry; 494 struct unix_gid ug, *ugp; 495 496 if (mesg[mlen - 1] != '\n') 497 return -EINVAL; 498 mesg[mlen-1] = 0; 499 500 rv = get_int(&mesg, &id); 501 if (rv) 502 return -EINVAL; 503 uid = make_kuid(&init_user_ns, id); 504 ug.uid = uid; 505 506 expiry = get_expiry(&mesg); 507 if (expiry == 0) 508 return -EINVAL; 509 510 rv = get_int(&mesg, &gids); 511 if (rv || gids < 0 || gids > 8192) 512 return -EINVAL; 513 514 ug.gi = groups_alloc(gids); 515 if (!ug.gi) 516 return -ENOMEM; 517 518 for (i = 0 ; i < gids ; i++) { 519 int gid; 520 kgid_t kgid; 521 rv = get_int(&mesg, &gid); 522 err = -EINVAL; 523 if (rv) 524 goto out; 525 kgid = make_kgid(&init_user_ns, gid); 526 if (!gid_valid(kgid)) 527 goto out; 528 ug.gi->gid[i] = kgid; 529 } 530 531 groups_sort(ug.gi); 532 ugp = unix_gid_lookup(cd, uid); 533 if (ugp) { 534 struct cache_head *ch; 535 ug.h.flags = 0; 536 ug.h.expiry_time = expiry; 537 ch = sunrpc_cache_update(cd, 538 &ug.h, &ugp->h, 539 unix_gid_hash(uid)); 540 if (!ch) 541 err = -ENOMEM; 542 else { 543 err = 0; 544 cache_put(ch, cd); 545 } 546 } else 547 err = -ENOMEM; 548 out: 549 if (ug.gi) 550 put_group_info(ug.gi); 551 return err; 552 } 553 554 static int unix_gid_show(struct seq_file *m, 555 struct cache_detail *cd, 556 struct cache_head *h) 557 { 558 struct user_namespace *user_ns = &init_user_ns; 559 struct unix_gid *ug; 560 int i; 561 int glen; 562 563 if (h == NULL) { 564 seq_puts(m, "#uid cnt: gids...\n"); 565 return 0; 566 } 567 ug = container_of(h, struct unix_gid, h); 568 if (test_bit(CACHE_VALID, &h->flags) && 569 !test_bit(CACHE_NEGATIVE, &h->flags)) 570 glen = ug->gi->ngroups; 571 else 572 glen = 0; 573 574 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen); 575 for (i = 0; i < glen; i++) 576 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i])); 577 seq_printf(m, "\n"); 578 return 0; 579 } 580 581 static const struct cache_detail unix_gid_cache_template = { 582 .owner = THIS_MODULE, 583 .hash_size = GID_HASHMAX, 584 .name = "auth.unix.gid", 585 .cache_put = unix_gid_put, 586 .cache_request = unix_gid_request, 587 .cache_parse = unix_gid_parse, 588 .cache_show = unix_gid_show, 589 .match = unix_gid_match, 590 .init = unix_gid_init, 591 .update = unix_gid_update, 592 .alloc = unix_gid_alloc, 593 }; 594 595 int unix_gid_cache_create(struct net *net) 596 { 597 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 598 struct cache_detail *cd; 599 int err; 600 601 cd = cache_create_net(&unix_gid_cache_template, net); 602 if (IS_ERR(cd)) 603 return PTR_ERR(cd); 604 err = cache_register_net(cd, net); 605 if (err) { 606 cache_destroy_net(cd, net); 607 return err; 608 } 609 sn->unix_gid_cache = cd; 610 return 0; 611 } 612 613 void unix_gid_cache_destroy(struct net *net) 614 { 615 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 616 struct cache_detail *cd = sn->unix_gid_cache; 617 618 sn->unix_gid_cache = NULL; 619 cache_purge(cd); 620 cache_unregister_net(cd, net); 621 cache_destroy_net(cd, net); 622 } 623 624 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid) 625 { 626 struct unix_gid ug; 627 struct cache_head *ch; 628 629 ug.uid = uid; 630 ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid)); 631 if (ch) 632 return container_of(ch, struct unix_gid, h); 633 else 634 return NULL; 635 } 636 637 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) 638 { 639 struct unix_gid *ug; 640 struct group_info *gi; 641 int ret; 642 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 643 sunrpc_net_id); 644 645 ug = unix_gid_lookup(sn->unix_gid_cache, uid); 646 if (!ug) 647 return ERR_PTR(-EAGAIN); 648 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle); 649 switch (ret) { 650 case -ENOENT: 651 return ERR_PTR(-ENOENT); 652 case -ETIMEDOUT: 653 return ERR_PTR(-ESHUTDOWN); 654 case 0: 655 gi = get_group_info(ug->gi); 656 cache_put(&ug->h, sn->unix_gid_cache); 657 return gi; 658 default: 659 return ERR_PTR(-EAGAIN); 660 } 661 } 662 663 int 664 svcauth_unix_set_client(struct svc_rqst *rqstp) 665 { 666 struct sockaddr_in *sin; 667 struct sockaddr_in6 *sin6, sin6_storage; 668 struct ip_map *ipm; 669 struct group_info *gi; 670 struct svc_cred *cred = &rqstp->rq_cred; 671 struct svc_xprt *xprt = rqstp->rq_xprt; 672 struct net *net = xprt->xpt_net; 673 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 674 675 switch (rqstp->rq_addr.ss_family) { 676 case AF_INET: 677 sin = svc_addr_in(rqstp); 678 sin6 = &sin6_storage; 679 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr); 680 break; 681 case AF_INET6: 682 sin6 = svc_addr_in6(rqstp); 683 break; 684 default: 685 BUG(); 686 } 687 688 rqstp->rq_client = NULL; 689 if (rqstp->rq_proc == 0) 690 return SVC_OK; 691 692 ipm = ip_map_cached_get(xprt); 693 if (ipm == NULL) 694 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class, 695 &sin6->sin6_addr); 696 697 if (ipm == NULL) 698 return SVC_DENIED; 699 700 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { 701 default: 702 BUG(); 703 case -ETIMEDOUT: 704 return SVC_CLOSE; 705 case -EAGAIN: 706 return SVC_DROP; 707 case -ENOENT: 708 return SVC_DENIED; 709 case 0: 710 rqstp->rq_client = &ipm->m_client->h; 711 kref_get(&rqstp->rq_client->ref); 712 ip_map_cached_put(xprt, ipm); 713 break; 714 } 715 716 gi = unix_gid_find(cred->cr_uid, rqstp); 717 switch (PTR_ERR(gi)) { 718 case -EAGAIN: 719 return SVC_DROP; 720 case -ESHUTDOWN: 721 return SVC_CLOSE; 722 case -ENOENT: 723 break; 724 default: 725 put_group_info(cred->cr_group_info); 726 cred->cr_group_info = gi; 727 } 728 return SVC_OK; 729 } 730 731 EXPORT_SYMBOL_GPL(svcauth_unix_set_client); 732 733 static int 734 svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp) 735 { 736 struct kvec *argv = &rqstp->rq_arg.head[0]; 737 struct kvec *resv = &rqstp->rq_res.head[0]; 738 struct svc_cred *cred = &rqstp->rq_cred; 739 740 if (argv->iov_len < 3*4) 741 return SVC_GARBAGE; 742 743 if (svc_getu32(argv) != 0) { 744 dprintk("svc: bad null cred\n"); 745 *authp = rpc_autherr_badcred; 746 return SVC_DENIED; 747 } 748 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 749 dprintk("svc: bad null verf\n"); 750 *authp = rpc_autherr_badverf; 751 return SVC_DENIED; 752 } 753 754 /* Signal that mapping to nobody uid/gid is required */ 755 cred->cr_uid = INVALID_UID; 756 cred->cr_gid = INVALID_GID; 757 cred->cr_group_info = groups_alloc(0); 758 if (cred->cr_group_info == NULL) 759 return SVC_CLOSE; /* kmalloc failure - client must retry */ 760 761 /* Put NULL verifier */ 762 svc_putnl(resv, RPC_AUTH_NULL); 763 svc_putnl(resv, 0); 764 765 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL; 766 return SVC_OK; 767 } 768 769 static int 770 svcauth_null_release(struct svc_rqst *rqstp) 771 { 772 if (rqstp->rq_client) 773 auth_domain_put(rqstp->rq_client); 774 rqstp->rq_client = NULL; 775 if (rqstp->rq_cred.cr_group_info) 776 put_group_info(rqstp->rq_cred.cr_group_info); 777 rqstp->rq_cred.cr_group_info = NULL; 778 779 return 0; /* don't drop */ 780 } 781 782 783 struct auth_ops svcauth_null = { 784 .name = "null", 785 .owner = THIS_MODULE, 786 .flavour = RPC_AUTH_NULL, 787 .accept = svcauth_null_accept, 788 .release = svcauth_null_release, 789 .set_client = svcauth_unix_set_client, 790 }; 791 792 793 static int 794 svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) 795 { 796 struct kvec *argv = &rqstp->rq_arg.head[0]; 797 struct kvec *resv = &rqstp->rq_res.head[0]; 798 struct svc_cred *cred = &rqstp->rq_cred; 799 u32 slen, i; 800 int len = argv->iov_len; 801 802 if ((len -= 3*4) < 0) 803 return SVC_GARBAGE; 804 805 svc_getu32(argv); /* length */ 806 svc_getu32(argv); /* time stamp */ 807 slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */ 808 if (slen > 64 || (len -= (slen + 3)*4) < 0) 809 goto badcred; 810 argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */ 811 argv->iov_len -= slen*4; 812 /* 813 * Note: we skip uid_valid()/gid_valid() checks here for 814 * backwards compatibility with clients that use -1 id's. 815 * Instead, -1 uid or gid is later mapped to the 816 * (export-specific) anonymous id by nfsd_setuser. 817 * Supplementary gid's will be left alone. 818 */ 819 cred->cr_uid = make_kuid(&init_user_ns, svc_getnl(argv)); /* uid */ 820 cred->cr_gid = make_kgid(&init_user_ns, svc_getnl(argv)); /* gid */ 821 slen = svc_getnl(argv); /* gids length */ 822 if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0) 823 goto badcred; 824 cred->cr_group_info = groups_alloc(slen); 825 if (cred->cr_group_info == NULL) 826 return SVC_CLOSE; 827 for (i = 0; i < slen; i++) { 828 kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); 829 cred->cr_group_info->gid[i] = kgid; 830 } 831 groups_sort(cred->cr_group_info); 832 if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { 833 *authp = rpc_autherr_badverf; 834 return SVC_DENIED; 835 } 836 837 /* Put NULL verifier */ 838 svc_putnl(resv, RPC_AUTH_NULL); 839 svc_putnl(resv, 0); 840 841 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX; 842 return SVC_OK; 843 844 badcred: 845 *authp = rpc_autherr_badcred; 846 return SVC_DENIED; 847 } 848 849 static int 850 svcauth_unix_release(struct svc_rqst *rqstp) 851 { 852 /* Verifier (such as it is) is already in place. 853 */ 854 if (rqstp->rq_client) 855 auth_domain_put(rqstp->rq_client); 856 rqstp->rq_client = NULL; 857 if (rqstp->rq_cred.cr_group_info) 858 put_group_info(rqstp->rq_cred.cr_group_info); 859 rqstp->rq_cred.cr_group_info = NULL; 860 861 return 0; 862 } 863 864 865 struct auth_ops svcauth_unix = { 866 .name = "unix", 867 .owner = THIS_MODULE, 868 .flavour = RPC_AUTH_UNIX, 869 .accept = svcauth_unix_accept, 870 .release = svcauth_unix_release, 871 .domain_release = svcauth_unix_domain_release, 872 .set_client = svcauth_unix_set_client, 873 }; 874 875 static const struct cache_detail ip_map_cache_template = { 876 .owner = THIS_MODULE, 877 .hash_size = IP_HASHMAX, 878 .name = "auth.unix.ip", 879 .cache_put = ip_map_put, 880 .cache_request = ip_map_request, 881 .cache_parse = ip_map_parse, 882 .cache_show = ip_map_show, 883 .match = ip_map_match, 884 .init = ip_map_init, 885 .update = update, 886 .alloc = ip_map_alloc, 887 }; 888 889 int ip_map_cache_create(struct net *net) 890 { 891 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 892 struct cache_detail *cd; 893 int err; 894 895 cd = cache_create_net(&ip_map_cache_template, net); 896 if (IS_ERR(cd)) 897 return PTR_ERR(cd); 898 err = cache_register_net(cd, net); 899 if (err) { 900 cache_destroy_net(cd, net); 901 return err; 902 } 903 sn->ip_map_cache = cd; 904 return 0; 905 } 906 907 void ip_map_cache_destroy(struct net *net) 908 { 909 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 910 struct cache_detail *cd = sn->ip_map_cache; 911 912 sn->ip_map_cache = NULL; 913 cache_purge(cd); 914 cache_unregister_net(cd, net); 915 cache_destroy_net(cd, net); 916 } 917