1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/types.h> 3 #include <linux/sched.h> 4 #include <linux/module.h> 5 #include <linux/sunrpc/types.h> 6 #include <linux/sunrpc/xdr.h> 7 #include <linux/sunrpc/svcsock.h> 8 #include <linux/sunrpc/svcauth.h> 9 #include <linux/sunrpc/gss_api.h> 10 #include <linux/sunrpc/addr.h> 11 #include <linux/err.h> 12 #include <linux/seq_file.h> 13 #include <linux/hash.h> 14 #include <linux/string.h> 15 #include <linux/slab.h> 16 #include <net/sock.h> 17 #include <net/ipv6.h> 18 #include <linux/kernel.h> 19 #include <linux/user_namespace.h> 20 #define RPCDBG_FACILITY RPCDBG_AUTH 21 22 23 #include "netns.h" 24 25 /* 26 * AUTHUNIX and AUTHNULL credentials are both handled here. 27 * AUTHNULL is treated just like AUTHUNIX except that the uid/gid 28 * are always nobody (-2). i.e. we do the same IP address checks for 29 * AUTHNULL as for AUTHUNIX, and that is done here. 30 */ 31 32 33 struct unix_domain { 34 struct auth_domain h; 35 /* other stuff later */ 36 }; 37 38 extern struct auth_ops svcauth_null; 39 extern struct auth_ops svcauth_unix; 40 extern struct auth_ops svcauth_tls; 41 42 static void svcauth_unix_domain_release_rcu(struct rcu_head *head) 43 { 44 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); 45 struct unix_domain *ud = container_of(dom, struct unix_domain, h); 46 47 kfree(dom->name); 48 kfree(ud); 49 } 50 51 static void svcauth_unix_domain_release(struct auth_domain *dom) 52 { 53 call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu); 54 } 55 56 struct auth_domain *unix_domain_find(char *name) 57 { 58 struct auth_domain *rv; 59 struct unix_domain *new = NULL; 60 61 rv = auth_domain_find(name); 62 while(1) { 63 if (rv) { 64 if (new && rv != &new->h) 65 svcauth_unix_domain_release(&new->h); 66 67 if (rv->flavour != &svcauth_unix) { 68 auth_domain_put(rv); 69 return NULL; 70 } 71 return rv; 72 } 73 74 new = kmalloc(sizeof(*new), GFP_KERNEL); 75 if (new == NULL) 76 return NULL; 77 kref_init(&new->h.ref); 78 new->h.name = kstrdup(name, GFP_KERNEL); 79 if (new->h.name == NULL) { 80 kfree(new); 81 return NULL; 82 } 83 new->h.flavour = &svcauth_unix; 84 rv = auth_domain_lookup(name, &new->h); 85 } 86 } 87 EXPORT_SYMBOL_GPL(unix_domain_find); 88 89 90 /************************************************** 91 * cache for IP address to unix_domain 92 * as needed by AUTH_UNIX 93 */ 94 #define IP_HASHBITS 8 95 #define IP_HASHMAX (1<<IP_HASHBITS) 96 97 struct ip_map { 98 struct cache_head h; 99 char m_class[8]; /* e.g. "nfsd" */ 100 struct in6_addr m_addr; 101 struct unix_domain *m_client; 102 struct rcu_head m_rcu; 103 }; 104 105 static void ip_map_put(struct kref *kref) 106 { 107 struct cache_head *item = container_of(kref, struct cache_head, ref); 108 struct ip_map *im = container_of(item, struct ip_map,h); 109 110 if (test_bit(CACHE_VALID, &item->flags) && 111 !test_bit(CACHE_NEGATIVE, &item->flags)) 112 auth_domain_put(&im->m_client->h); 113 kfree_rcu(im, m_rcu); 114 } 115 116 static inline int hash_ip6(const struct in6_addr *ip) 117 { 118 return hash_32(ipv6_addr_hash(ip), IP_HASHBITS); 119 } 120 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 121 { 122 struct ip_map *orig = container_of(corig, struct ip_map, h); 123 struct ip_map *new = container_of(cnew, struct ip_map, h); 124 return strcmp(orig->m_class, new->m_class) == 0 && 125 ipv6_addr_equal(&orig->m_addr, &new->m_addr); 126 } 127 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 128 { 129 struct ip_map *new = container_of(cnew, struct ip_map, h); 130 struct ip_map *item = container_of(citem, struct ip_map, h); 131 132 strcpy(new->m_class, item->m_class); 133 new->m_addr = item->m_addr; 134 } 135 static void update(struct cache_head *cnew, struct cache_head *citem) 136 { 137 struct ip_map *new = container_of(cnew, struct ip_map, h); 138 struct ip_map *item = container_of(citem, struct ip_map, h); 139 140 kref_get(&item->m_client->h.ref); 141 new->m_client = item->m_client; 142 } 143 static struct cache_head *ip_map_alloc(void) 144 { 145 struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL); 146 if (i) 147 return &i->h; 148 else 149 return NULL; 150 } 151 152 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h) 153 { 154 return sunrpc_cache_pipe_upcall(cd, h); 155 } 156 157 static void ip_map_request(struct cache_detail *cd, 158 struct cache_head *h, 159 char **bpp, int *blen) 160 { 161 char text_addr[40]; 162 struct ip_map *im = container_of(h, struct ip_map, h); 163 164 if (ipv6_addr_v4mapped(&(im->m_addr))) { 165 snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]); 166 } else { 167 snprintf(text_addr, 40, "%pI6", &im->m_addr); 168 } 169 qword_add(bpp, blen, im->m_class); 170 qword_add(bpp, blen, text_addr); 171 (*bpp)[-1] = '\n'; 172 } 173 174 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); 175 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry); 176 177 static int ip_map_parse(struct cache_detail *cd, 178 char *mesg, int mlen) 179 { 180 /* class ipaddress [domainname] */ 181 /* should be safe just to use the start of the input buffer 182 * for scratch: */ 183 char *buf = mesg; 184 int len; 185 char class[8]; 186 union { 187 struct sockaddr sa; 188 struct sockaddr_in s4; 189 struct sockaddr_in6 s6; 190 } address; 191 struct sockaddr_in6 sin6; 192 int err; 193 194 struct ip_map *ipmp; 195 struct auth_domain *dom; 196 time64_t expiry; 197 198 if (mesg[mlen-1] != '\n') 199 return -EINVAL; 200 mesg[mlen-1] = 0; 201 202 /* class */ 203 len = qword_get(&mesg, class, sizeof(class)); 204 if (len <= 0) return -EINVAL; 205 206 /* ip address */ 207 len = qword_get(&mesg, buf, mlen); 208 if (len <= 0) return -EINVAL; 209 210 if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0) 211 return -EINVAL; 212 switch (address.sa.sa_family) { 213 case AF_INET: 214 /* Form a mapped IPv4 address in sin6 */ 215 sin6.sin6_family = AF_INET6; 216 ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr, 217 &sin6.sin6_addr); 218 break; 219 #if IS_ENABLED(CONFIG_IPV6) 220 case AF_INET6: 221 memcpy(&sin6, &address.s6, sizeof(sin6)); 222 break; 223 #endif 224 default: 225 return -EINVAL; 226 } 227 228 expiry = get_expiry(&mesg); 229 if (expiry ==0) 230 return -EINVAL; 231 232 /* domainname, or empty for NEGATIVE */ 233 len = qword_get(&mesg, buf, mlen); 234 if (len < 0) return -EINVAL; 235 236 if (len) { 237 dom = unix_domain_find(buf); 238 if (dom == NULL) 239 return -ENOENT; 240 } else 241 dom = NULL; 242 243 /* IPv6 scope IDs are ignored for now */ 244 ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr); 245 if (ipmp) { 246 err = __ip_map_update(cd, ipmp, 247 container_of(dom, struct unix_domain, h), 248 expiry); 249 } else 250 err = -ENOMEM; 251 252 if (dom) 253 auth_domain_put(dom); 254 255 cache_flush(); 256 return err; 257 } 258 259 static int ip_map_show(struct seq_file *m, 260 struct cache_detail *cd, 261 struct cache_head *h) 262 { 263 struct ip_map *im; 264 struct in6_addr addr; 265 char *dom = "-no-domain-"; 266 267 if (h == NULL) { 268 seq_puts(m, "#class IP domain\n"); 269 return 0; 270 } 271 im = container_of(h, struct ip_map, h); 272 /* class addr domain */ 273 addr = im->m_addr; 274 275 if (test_bit(CACHE_VALID, &h->flags) && 276 !test_bit(CACHE_NEGATIVE, &h->flags)) 277 dom = im->m_client->h.name; 278 279 if (ipv6_addr_v4mapped(&addr)) { 280 seq_printf(m, "%s %pI4 %s\n", 281 im->m_class, &addr.s6_addr32[3], dom); 282 } else { 283 seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom); 284 } 285 return 0; 286 } 287 288 289 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, 290 struct in6_addr *addr) 291 { 292 struct ip_map ip; 293 struct cache_head *ch; 294 295 strcpy(ip.m_class, class); 296 ip.m_addr = *addr; 297 ch = sunrpc_cache_lookup_rcu(cd, &ip.h, 298 hash_str(class, IP_HASHBITS) ^ 299 hash_ip6(addr)); 300 301 if (ch) 302 return container_of(ch, struct ip_map, h); 303 else 304 return NULL; 305 } 306 307 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, 308 struct unix_domain *udom, time64_t expiry) 309 { 310 struct ip_map ip; 311 struct cache_head *ch; 312 313 ip.m_client = udom; 314 ip.h.flags = 0; 315 if (!udom) 316 set_bit(CACHE_NEGATIVE, &ip.h.flags); 317 ip.h.expiry_time = expiry; 318 ch = sunrpc_cache_update(cd, &ip.h, &ipm->h, 319 hash_str(ipm->m_class, IP_HASHBITS) ^ 320 hash_ip6(&ipm->m_addr)); 321 if (!ch) 322 return -ENOMEM; 323 cache_put(ch, cd); 324 return 0; 325 } 326 327 void svcauth_unix_purge(struct net *net) 328 { 329 struct sunrpc_net *sn; 330 331 sn = net_generic(net, sunrpc_net_id); 332 cache_purge(sn->ip_map_cache); 333 } 334 EXPORT_SYMBOL_GPL(svcauth_unix_purge); 335 336 static inline struct ip_map * 337 ip_map_cached_get(struct svc_xprt *xprt) 338 { 339 struct ip_map *ipm = NULL; 340 struct sunrpc_net *sn; 341 342 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 343 spin_lock(&xprt->xpt_lock); 344 ipm = xprt->xpt_auth_cache; 345 if (ipm != NULL) { 346 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 347 if (cache_is_expired(sn->ip_map_cache, &ipm->h)) { 348 /* 349 * The entry has been invalidated since it was 350 * remembered, e.g. by a second mount from the 351 * same IP address. 352 */ 353 xprt->xpt_auth_cache = NULL; 354 spin_unlock(&xprt->xpt_lock); 355 cache_put(&ipm->h, sn->ip_map_cache); 356 return NULL; 357 } 358 cache_get(&ipm->h); 359 } 360 spin_unlock(&xprt->xpt_lock); 361 } 362 return ipm; 363 } 364 365 static inline void 366 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm) 367 { 368 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) { 369 spin_lock(&xprt->xpt_lock); 370 if (xprt->xpt_auth_cache == NULL) { 371 /* newly cached, keep the reference */ 372 xprt->xpt_auth_cache = ipm; 373 ipm = NULL; 374 } 375 spin_unlock(&xprt->xpt_lock); 376 } 377 if (ipm) { 378 struct sunrpc_net *sn; 379 380 sn = net_generic(xprt->xpt_net, sunrpc_net_id); 381 cache_put(&ipm->h, sn->ip_map_cache); 382 } 383 } 384 385 void 386 svcauth_unix_info_release(struct svc_xprt *xpt) 387 { 388 struct ip_map *ipm; 389 390 ipm = xpt->xpt_auth_cache; 391 if (ipm != NULL) { 392 struct sunrpc_net *sn; 393 394 sn = net_generic(xpt->xpt_net, sunrpc_net_id); 395 cache_put(&ipm->h, sn->ip_map_cache); 396 } 397 } 398 399 /**************************************************************************** 400 * auth.unix.gid cache 401 * simple cache to map a UID to a list of GIDs 402 * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS 403 */ 404 #define GID_HASHBITS 8 405 #define GID_HASHMAX (1<<GID_HASHBITS) 406 407 struct unix_gid { 408 struct cache_head h; 409 kuid_t uid; 410 struct group_info *gi; 411 struct rcu_head rcu; 412 }; 413 414 static int unix_gid_hash(kuid_t uid) 415 { 416 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS); 417 } 418 419 static void unix_gid_free(struct rcu_head *rcu) 420 { 421 struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu); 422 struct cache_head *item = &ug->h; 423 424 if (test_bit(CACHE_VALID, &item->flags) && 425 !test_bit(CACHE_NEGATIVE, &item->flags)) 426 put_group_info(ug->gi); 427 kfree(ug); 428 } 429 430 static void unix_gid_put(struct kref *kref) 431 { 432 struct cache_head *item = container_of(kref, struct cache_head, ref); 433 struct unix_gid *ug = container_of(item, struct unix_gid, h); 434 435 call_rcu(&ug->rcu, unix_gid_free); 436 } 437 438 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew) 439 { 440 struct unix_gid *orig = container_of(corig, struct unix_gid, h); 441 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 442 return uid_eq(orig->uid, new->uid); 443 } 444 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem) 445 { 446 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 447 struct unix_gid *item = container_of(citem, struct unix_gid, h); 448 new->uid = item->uid; 449 } 450 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem) 451 { 452 struct unix_gid *new = container_of(cnew, struct unix_gid, h); 453 struct unix_gid *item = container_of(citem, struct unix_gid, h); 454 455 get_group_info(item->gi); 456 new->gi = item->gi; 457 } 458 static struct cache_head *unix_gid_alloc(void) 459 { 460 struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL); 461 if (g) 462 return &g->h; 463 else 464 return NULL; 465 } 466 467 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h) 468 { 469 return sunrpc_cache_pipe_upcall_timeout(cd, h); 470 } 471 472 static void unix_gid_request(struct cache_detail *cd, 473 struct cache_head *h, 474 char **bpp, int *blen) 475 { 476 char tuid[20]; 477 struct unix_gid *ug = container_of(h, struct unix_gid, h); 478 479 snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid)); 480 qword_add(bpp, blen, tuid); 481 (*bpp)[-1] = '\n'; 482 } 483 484 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); 485 486 static int unix_gid_parse(struct cache_detail *cd, 487 char *mesg, int mlen) 488 { 489 /* uid expiry Ngid gid0 gid1 ... gidN-1 */ 490 int id; 491 kuid_t uid; 492 int gids; 493 int rv; 494 int i; 495 int err; 496 time64_t expiry; 497 struct unix_gid ug, *ugp; 498 499 if (mesg[mlen - 1] != '\n') 500 return -EINVAL; 501 mesg[mlen-1] = 0; 502 503 rv = get_int(&mesg, &id); 504 if (rv) 505 return -EINVAL; 506 uid = make_kuid(current_user_ns(), id); 507 ug.uid = uid; 508 509 expiry = get_expiry(&mesg); 510 if (expiry == 0) 511 return -EINVAL; 512 513 rv = get_int(&mesg, &gids); 514 if (rv || gids < 0 || gids > 8192) 515 return -EINVAL; 516 517 ug.gi = groups_alloc(gids); 518 if (!ug.gi) 519 return -ENOMEM; 520 521 for (i = 0 ; i < gids ; i++) { 522 int gid; 523 kgid_t kgid; 524 rv = get_int(&mesg, &gid); 525 err = -EINVAL; 526 if (rv) 527 goto out; 528 kgid = make_kgid(current_user_ns(), gid); 529 if (!gid_valid(kgid)) 530 goto out; 531 ug.gi->gid[i] = kgid; 532 } 533 534 groups_sort(ug.gi); 535 ugp = unix_gid_lookup(cd, uid); 536 if (ugp) { 537 struct cache_head *ch; 538 ug.h.flags = 0; 539 ug.h.expiry_time = expiry; 540 ch = sunrpc_cache_update(cd, 541 &ug.h, &ugp->h, 542 unix_gid_hash(uid)); 543 if (!ch) 544 err = -ENOMEM; 545 else { 546 err = 0; 547 cache_put(ch, cd); 548 } 549 } else 550 err = -ENOMEM; 551 out: 552 if (ug.gi) 553 put_group_info(ug.gi); 554 return err; 555 } 556 557 static int unix_gid_show(struct seq_file *m, 558 struct cache_detail *cd, 559 struct cache_head *h) 560 { 561 struct user_namespace *user_ns = m->file->f_cred->user_ns; 562 struct unix_gid *ug; 563 int i; 564 int glen; 565 566 if (h == NULL) { 567 seq_puts(m, "#uid cnt: gids...\n"); 568 return 0; 569 } 570 ug = container_of(h, struct unix_gid, h); 571 if (test_bit(CACHE_VALID, &h->flags) && 572 !test_bit(CACHE_NEGATIVE, &h->flags)) 573 glen = ug->gi->ngroups; 574 else 575 glen = 0; 576 577 seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen); 578 for (i = 0; i < glen; i++) 579 seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i])); 580 seq_printf(m, "\n"); 581 return 0; 582 } 583 584 static const struct cache_detail unix_gid_cache_template = { 585 .owner = THIS_MODULE, 586 .hash_size = GID_HASHMAX, 587 .name = "auth.unix.gid", 588 .cache_put = unix_gid_put, 589 .cache_upcall = unix_gid_upcall, 590 .cache_request = unix_gid_request, 591 .cache_parse = unix_gid_parse, 592 .cache_show = unix_gid_show, 593 .match = unix_gid_match, 594 .init = unix_gid_init, 595 .update = unix_gid_update, 596 .alloc = unix_gid_alloc, 597 }; 598 599 int unix_gid_cache_create(struct net *net) 600 { 601 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 602 struct cache_detail *cd; 603 int err; 604 605 cd = cache_create_net(&unix_gid_cache_template, net); 606 if (IS_ERR(cd)) 607 return PTR_ERR(cd); 608 err = cache_register_net(cd, net); 609 if (err) { 610 cache_destroy_net(cd, net); 611 return err; 612 } 613 sn->unix_gid_cache = cd; 614 return 0; 615 } 616 617 void unix_gid_cache_destroy(struct net *net) 618 { 619 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 620 struct cache_detail *cd = sn->unix_gid_cache; 621 622 sn->unix_gid_cache = NULL; 623 cache_purge(cd); 624 cache_unregister_net(cd, net); 625 cache_destroy_net(cd, net); 626 } 627 628 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid) 629 { 630 struct unix_gid ug; 631 struct cache_head *ch; 632 633 ug.uid = uid; 634 ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid)); 635 if (ch) 636 return container_of(ch, struct unix_gid, h); 637 else 638 return NULL; 639 } 640 641 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) 642 { 643 struct unix_gid *ug; 644 struct group_info *gi; 645 int ret; 646 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, 647 sunrpc_net_id); 648 649 ug = unix_gid_lookup(sn->unix_gid_cache, uid); 650 if (!ug) 651 return ERR_PTR(-EAGAIN); 652 ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle); 653 switch (ret) { 654 case -ENOENT: 655 return ERR_PTR(-ENOENT); 656 case -ETIMEDOUT: 657 return ERR_PTR(-ESHUTDOWN); 658 case 0: 659 gi = get_group_info(ug->gi); 660 cache_put(&ug->h, sn->unix_gid_cache); 661 return gi; 662 default: 663 return ERR_PTR(-EAGAIN); 664 } 665 } 666 667 int 668 svcauth_unix_set_client(struct svc_rqst *rqstp) 669 { 670 struct sockaddr_in *sin; 671 struct sockaddr_in6 *sin6, sin6_storage; 672 struct ip_map *ipm; 673 struct group_info *gi; 674 struct svc_cred *cred = &rqstp->rq_cred; 675 struct svc_xprt *xprt = rqstp->rq_xprt; 676 struct net *net = xprt->xpt_net; 677 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 678 679 switch (rqstp->rq_addr.ss_family) { 680 case AF_INET: 681 sin = svc_addr_in(rqstp); 682 sin6 = &sin6_storage; 683 ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr); 684 break; 685 case AF_INET6: 686 sin6 = svc_addr_in6(rqstp); 687 break; 688 default: 689 BUG(); 690 } 691 692 rqstp->rq_client = NULL; 693 if (rqstp->rq_proc == 0) 694 goto out; 695 696 rqstp->rq_auth_stat = rpc_autherr_badcred; 697 ipm = ip_map_cached_get(xprt); 698 if (ipm == NULL) 699 ipm = __ip_map_lookup(sn->ip_map_cache, rqstp->rq_server->sv_program->pg_class, 700 &sin6->sin6_addr); 701 702 if (ipm == NULL) 703 return SVC_DENIED; 704 705 switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) { 706 default: 707 BUG(); 708 case -ETIMEDOUT: 709 return SVC_CLOSE; 710 case -EAGAIN: 711 return SVC_DROP; 712 case -ENOENT: 713 return SVC_DENIED; 714 case 0: 715 rqstp->rq_client = &ipm->m_client->h; 716 kref_get(&rqstp->rq_client->ref); 717 ip_map_cached_put(xprt, ipm); 718 break; 719 } 720 721 gi = unix_gid_find(cred->cr_uid, rqstp); 722 switch (PTR_ERR(gi)) { 723 case -EAGAIN: 724 return SVC_DROP; 725 case -ESHUTDOWN: 726 return SVC_CLOSE; 727 case -ENOENT: 728 break; 729 default: 730 put_group_info(cred->cr_group_info); 731 cred->cr_group_info = gi; 732 } 733 734 out: 735 rqstp->rq_auth_stat = rpc_auth_ok; 736 return SVC_OK; 737 } 738 739 EXPORT_SYMBOL_GPL(svcauth_unix_set_client); 740 741 /** 742 * svcauth_null_accept - Decode and validate incoming RPC_AUTH_NULL credential 743 * @rqstp: RPC transaction 744 * 745 * Return values: 746 * %SVC_OK: Both credential and verifier are valid 747 * %SVC_DENIED: Credential or verifier is not valid 748 * %SVC_GARBAGE: Failed to decode credential or verifier 749 * %SVC_CLOSE: Temporary failure 750 * 751 * rqstp->rq_auth_stat is set as mandated by RFC 5531. 752 */ 753 static int 754 svcauth_null_accept(struct svc_rqst *rqstp) 755 { 756 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 757 struct svc_cred *cred = &rqstp->rq_cred; 758 u32 flavor, len; 759 void *body; 760 761 /* Length of Call's credential body field: */ 762 if (xdr_stream_decode_u32(xdr, &len) < 0) 763 return SVC_GARBAGE; 764 if (len != 0) { 765 rqstp->rq_auth_stat = rpc_autherr_badcred; 766 return SVC_DENIED; 767 } 768 769 /* Call's verf field: */ 770 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 771 return SVC_GARBAGE; 772 if (flavor != RPC_AUTH_NULL || len != 0) { 773 rqstp->rq_auth_stat = rpc_autherr_badverf; 774 return SVC_DENIED; 775 } 776 777 /* Signal that mapping to nobody uid/gid is required */ 778 cred->cr_uid = INVALID_UID; 779 cred->cr_gid = INVALID_GID; 780 cred->cr_group_info = groups_alloc(0); 781 if (cred->cr_group_info == NULL) 782 return SVC_CLOSE; /* kmalloc failure - client must retry */ 783 784 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, 785 RPC_AUTH_NULL, NULL, 0) < 0) 786 return SVC_CLOSE; 787 if (!svcxdr_set_accept_stat(rqstp)) 788 return SVC_CLOSE; 789 790 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL; 791 return SVC_OK; 792 } 793 794 static int 795 svcauth_null_release(struct svc_rqst *rqstp) 796 { 797 if (rqstp->rq_client) 798 auth_domain_put(rqstp->rq_client); 799 rqstp->rq_client = NULL; 800 if (rqstp->rq_cred.cr_group_info) 801 put_group_info(rqstp->rq_cred.cr_group_info); 802 rqstp->rq_cred.cr_group_info = NULL; 803 804 return 0; /* don't drop */ 805 } 806 807 808 struct auth_ops svcauth_null = { 809 .name = "null", 810 .owner = THIS_MODULE, 811 .flavour = RPC_AUTH_NULL, 812 .accept = svcauth_null_accept, 813 .release = svcauth_null_release, 814 .set_client = svcauth_unix_set_client, 815 }; 816 817 818 /** 819 * svcauth_tls_accept - Decode and validate incoming RPC_AUTH_TLS credential 820 * @rqstp: RPC transaction 821 * 822 * Return values: 823 * %SVC_OK: Both credential and verifier are valid 824 * %SVC_DENIED: Credential or verifier is not valid 825 * %SVC_GARBAGE: Failed to decode credential or verifier 826 * %SVC_CLOSE: Temporary failure 827 * 828 * rqstp->rq_auth_stat is set as mandated by RFC 5531. 829 */ 830 static int 831 svcauth_tls_accept(struct svc_rqst *rqstp) 832 { 833 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 834 struct svc_cred *cred = &rqstp->rq_cred; 835 u32 flavor, len; 836 void *body; 837 __be32 *p; 838 839 /* Length of Call's credential body field: */ 840 if (xdr_stream_decode_u32(xdr, &len) < 0) 841 return SVC_GARBAGE; 842 if (len != 0) { 843 rqstp->rq_auth_stat = rpc_autherr_badcred; 844 return SVC_DENIED; 845 } 846 847 /* Call's verf field: */ 848 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 849 return SVC_GARBAGE; 850 if (flavor != RPC_AUTH_NULL || len != 0) { 851 rqstp->rq_auth_stat = rpc_autherr_badverf; 852 return SVC_DENIED; 853 } 854 855 /* AUTH_TLS is not valid on non-NULL procedures */ 856 if (rqstp->rq_proc != 0) { 857 rqstp->rq_auth_stat = rpc_autherr_badcred; 858 return SVC_DENIED; 859 } 860 861 /* Signal that mapping to nobody uid/gid is required */ 862 cred->cr_uid = INVALID_UID; 863 cred->cr_gid = INVALID_GID; 864 cred->cr_group_info = groups_alloc(0); 865 if (cred->cr_group_info == NULL) 866 return SVC_CLOSE; 867 868 if (rqstp->rq_xprt->xpt_ops->xpo_start_tls) { 869 p = xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2 + 8); 870 if (!p) 871 return SVC_CLOSE; 872 *p++ = rpc_auth_null; 873 *p++ = cpu_to_be32(8); 874 memcpy(p, "STARTTLS", 8); 875 } else { 876 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, 877 RPC_AUTH_NULL, NULL, 0) < 0) 878 return SVC_CLOSE; 879 } 880 if (!svcxdr_set_accept_stat(rqstp)) 881 return SVC_CLOSE; 882 883 rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS; 884 return SVC_OK; 885 } 886 887 struct auth_ops svcauth_tls = { 888 .name = "tls", 889 .owner = THIS_MODULE, 890 .flavour = RPC_AUTH_TLS, 891 .accept = svcauth_tls_accept, 892 .release = svcauth_null_release, 893 .set_client = svcauth_unix_set_client, 894 }; 895 896 897 /** 898 * svcauth_unix_accept - Decode and validate incoming RPC_AUTH_SYS credential 899 * @rqstp: RPC transaction 900 * 901 * Return values: 902 * %SVC_OK: Both credential and verifier are valid 903 * %SVC_DENIED: Credential or verifier is not valid 904 * %SVC_GARBAGE: Failed to decode credential or verifier 905 * %SVC_CLOSE: Temporary failure 906 * 907 * rqstp->rq_auth_stat is set as mandated by RFC 5531. 908 */ 909 static int 910 svcauth_unix_accept(struct svc_rqst *rqstp) 911 { 912 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 913 struct svc_cred *cred = &rqstp->rq_cred; 914 struct user_namespace *userns; 915 u32 flavor, len, i; 916 void *body; 917 __be32 *p; 918 919 /* 920 * This implementation ignores the length of the Call's 921 * credential body field and the timestamp and machinename 922 * fields. 923 */ 924 p = xdr_inline_decode(xdr, XDR_UNIT * 3); 925 if (!p) 926 return SVC_GARBAGE; 927 len = be32_to_cpup(p + 2); 928 if (len > RPC_MAX_MACHINENAME) 929 return SVC_GARBAGE; 930 if (!xdr_inline_decode(xdr, len)) 931 return SVC_GARBAGE; 932 933 /* 934 * Note: we skip uid_valid()/gid_valid() checks here for 935 * backwards compatibility with clients that use -1 id's. 936 * Instead, -1 uid or gid is later mapped to the 937 * (export-specific) anonymous id by nfsd_setuser. 938 * Supplementary gid's will be left alone. 939 */ 940 userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ? 941 rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns; 942 if (xdr_stream_decode_u32(xdr, &i) < 0) 943 return SVC_GARBAGE; 944 cred->cr_uid = make_kuid(userns, i); 945 if (xdr_stream_decode_u32(xdr, &i) < 0) 946 return SVC_GARBAGE; 947 cred->cr_gid = make_kgid(userns, i); 948 949 if (xdr_stream_decode_u32(xdr, &len) < 0) 950 return SVC_GARBAGE; 951 if (len > UNX_NGROUPS) 952 goto badcred; 953 p = xdr_inline_decode(xdr, XDR_UNIT * len); 954 if (!p) 955 return SVC_GARBAGE; 956 cred->cr_group_info = groups_alloc(len); 957 if (cred->cr_group_info == NULL) 958 return SVC_CLOSE; 959 for (i = 0; i < len; i++) { 960 kgid_t kgid = make_kgid(userns, be32_to_cpup(p++)); 961 cred->cr_group_info->gid[i] = kgid; 962 } 963 groups_sort(cred->cr_group_info); 964 965 /* Call's verf field: */ 966 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 967 return SVC_GARBAGE; 968 if (flavor != RPC_AUTH_NULL || len != 0) { 969 rqstp->rq_auth_stat = rpc_autherr_badverf; 970 return SVC_DENIED; 971 } 972 973 if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, 974 RPC_AUTH_NULL, NULL, 0) < 0) 975 return SVC_CLOSE; 976 if (!svcxdr_set_accept_stat(rqstp)) 977 return SVC_CLOSE; 978 979 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX; 980 return SVC_OK; 981 982 badcred: 983 rqstp->rq_auth_stat = rpc_autherr_badcred; 984 return SVC_DENIED; 985 } 986 987 static int 988 svcauth_unix_release(struct svc_rqst *rqstp) 989 { 990 /* Verifier (such as it is) is already in place. 991 */ 992 if (rqstp->rq_client) 993 auth_domain_put(rqstp->rq_client); 994 rqstp->rq_client = NULL; 995 if (rqstp->rq_cred.cr_group_info) 996 put_group_info(rqstp->rq_cred.cr_group_info); 997 rqstp->rq_cred.cr_group_info = NULL; 998 999 return 0; 1000 } 1001 1002 1003 struct auth_ops svcauth_unix = { 1004 .name = "unix", 1005 .owner = THIS_MODULE, 1006 .flavour = RPC_AUTH_UNIX, 1007 .accept = svcauth_unix_accept, 1008 .release = svcauth_unix_release, 1009 .domain_release = svcauth_unix_domain_release, 1010 .set_client = svcauth_unix_set_client, 1011 }; 1012 1013 static const struct cache_detail ip_map_cache_template = { 1014 .owner = THIS_MODULE, 1015 .hash_size = IP_HASHMAX, 1016 .name = "auth.unix.ip", 1017 .cache_put = ip_map_put, 1018 .cache_upcall = ip_map_upcall, 1019 .cache_request = ip_map_request, 1020 .cache_parse = ip_map_parse, 1021 .cache_show = ip_map_show, 1022 .match = ip_map_match, 1023 .init = ip_map_init, 1024 .update = update, 1025 .alloc = ip_map_alloc, 1026 }; 1027 1028 int ip_map_cache_create(struct net *net) 1029 { 1030 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1031 struct cache_detail *cd; 1032 int err; 1033 1034 cd = cache_create_net(&ip_map_cache_template, net); 1035 if (IS_ERR(cd)) 1036 return PTR_ERR(cd); 1037 err = cache_register_net(cd, net); 1038 if (err) { 1039 cache_destroy_net(cd, net); 1040 return err; 1041 } 1042 sn->ip_map_cache = cd; 1043 return 0; 1044 } 1045 1046 void ip_map_cache_destroy(struct net *net) 1047 { 1048 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1049 struct cache_detail *cd = sn->ip_map_cache; 1050 1051 sn->ip_map_cache = NULL; 1052 cache_purge(cd); 1053 cache_unregister_net(cd, net); 1054 cache_destroy_net(cd, net); 1055 } 1056