1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Neil Brown <neilb@cse.unsw.edu.au> 4 * J. Bruce Fields <bfields@umich.edu> 5 * Andy Adamson <andros@umich.edu> 6 * Dug Song <dugsong@monkey.org> 7 * 8 * RPCSEC_GSS server authentication. 9 * This implements RPCSEC_GSS as defined in rfc2203 (rpcsec_gss) and rfc2078 10 * (gssapi) 11 * 12 * The RPCSEC_GSS involves three stages: 13 * 1/ context creation 14 * 2/ data exchange 15 * 3/ context destruction 16 * 17 * Context creation is handled largely by upcalls to user-space. 18 * In particular, GSS_Accept_sec_context is handled by an upcall 19 * Data exchange is handled entirely within the kernel 20 * In particular, GSS_GetMIC, GSS_VerifyMIC, GSS_Seal, GSS_Unseal are in-kernel. 21 * Context destruction is handled in-kernel 22 * GSS_Delete_sec_context is in-kernel 23 * 24 * Context creation is initiated by a RPCSEC_GSS_INIT request arriving. 25 * The context handle and gss_token are used as a key into the rpcsec_init cache. 26 * The content of this cache includes some of the outputs of GSS_Accept_sec_context, 27 * being major_status, minor_status, context_handle, reply_token. 28 * These are sent back to the client. 29 * Sequence window management is handled by the kernel. The window size if currently 30 * a compile time constant. 31 * 32 * When user-space is happy that a context is established, it places an entry 33 * in the rpcsec_context cache. The key for this cache is the context_handle. 34 * The content includes: 35 * uid/gidlist - for determining access rights 36 * mechanism type 37 * mechanism specific information, such as a key 38 * 39 */ 40 41 #include <linux/slab.h> 42 #include <linux/types.h> 43 #include <linux/module.h> 44 #include <linux/pagemap.h> 45 #include <linux/user_namespace.h> 46 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/gss_err.h> 49 #include <linux/sunrpc/svcauth.h> 50 #include <linux/sunrpc/svcauth_gss.h> 51 #include <linux/sunrpc/cache.h> 52 #include <linux/sunrpc/gss_krb5.h> 53 54 #include <trace/events/rpcgss.h> 55 56 #include "gss_rpc_upcall.h" 57 58 /* 59 * Unfortunately there isn't a maximum checksum size exported via the 60 * GSS API. Manufacture one based on GSS mechanisms supported by this 61 * implementation. 62 */ 63 #define GSS_MAX_CKSUMSIZE (GSS_KRB5_TOK_HDR_LEN + GSS_KRB5_MAX_CKSUM_LEN) 64 65 /* 66 * This value may be increased in the future to accommodate other 67 * usage of the scratch buffer. 68 */ 69 #define GSS_SCRATCH_SIZE GSS_MAX_CKSUMSIZE 70 71 struct gss_svc_data { 72 /* decoded gss client cred: */ 73 struct rpc_gss_wire_cred clcred; 74 /* save a pointer to the beginning of the encoded verifier, 75 * for use in encryption/checksumming in svcauth_gss_release: */ 76 __be32 *verf_start; 77 struct rsc *rsci; 78 79 /* for temporary results */ 80 __be32 gsd_seq_num; 81 u8 gsd_scratch[GSS_SCRATCH_SIZE]; 82 }; 83 84 /* The rpcsec_init cache is used for mapping RPCSEC_GSS_{,CONT_}INIT requests 85 * into replies. 86 * 87 * Key is context handle (\x if empty) and gss_token. 88 * Content is major_status minor_status (integers) context_handle, reply_token. 89 * 90 */ 91 92 static int netobj_equal(struct xdr_netobj *a, struct xdr_netobj *b) 93 { 94 return a->len == b->len && 0 == memcmp(a->data, b->data, a->len); 95 } 96 97 #define RSI_HASHBITS 6 98 #define RSI_HASHMAX (1<<RSI_HASHBITS) 99 100 struct rsi { 101 struct cache_head h; 102 struct xdr_netobj in_handle, in_token; 103 struct xdr_netobj out_handle, out_token; 104 int major_status, minor_status; 105 struct rcu_head rcu_head; 106 }; 107 108 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old); 109 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item); 110 111 static void rsi_free(struct rsi *rsii) 112 { 113 kfree(rsii->in_handle.data); 114 kfree(rsii->in_token.data); 115 kfree(rsii->out_handle.data); 116 kfree(rsii->out_token.data); 117 } 118 119 static void rsi_free_rcu(struct rcu_head *head) 120 { 121 struct rsi *rsii = container_of(head, struct rsi, rcu_head); 122 123 rsi_free(rsii); 124 kfree(rsii); 125 } 126 127 static void rsi_put(struct kref *ref) 128 { 129 struct rsi *rsii = container_of(ref, struct rsi, h.ref); 130 131 call_rcu(&rsii->rcu_head, rsi_free_rcu); 132 } 133 134 static inline int rsi_hash(struct rsi *item) 135 { 136 return hash_mem(item->in_handle.data, item->in_handle.len, RSI_HASHBITS) 137 ^ hash_mem(item->in_token.data, item->in_token.len, RSI_HASHBITS); 138 } 139 140 static int rsi_match(struct cache_head *a, struct cache_head *b) 141 { 142 struct rsi *item = container_of(a, struct rsi, h); 143 struct rsi *tmp = container_of(b, struct rsi, h); 144 return netobj_equal(&item->in_handle, &tmp->in_handle) && 145 netobj_equal(&item->in_token, &tmp->in_token); 146 } 147 148 static int dup_to_netobj(struct xdr_netobj *dst, char *src, int len) 149 { 150 dst->len = len; 151 dst->data = (len ? kmemdup(src, len, GFP_KERNEL) : NULL); 152 if (len && !dst->data) 153 return -ENOMEM; 154 return 0; 155 } 156 157 static inline int dup_netobj(struct xdr_netobj *dst, struct xdr_netobj *src) 158 { 159 return dup_to_netobj(dst, src->data, src->len); 160 } 161 162 static void rsi_init(struct cache_head *cnew, struct cache_head *citem) 163 { 164 struct rsi *new = container_of(cnew, struct rsi, h); 165 struct rsi *item = container_of(citem, struct rsi, h); 166 167 new->out_handle.data = NULL; 168 new->out_handle.len = 0; 169 new->out_token.data = NULL; 170 new->out_token.len = 0; 171 new->in_handle.len = item->in_handle.len; 172 item->in_handle.len = 0; 173 new->in_token.len = item->in_token.len; 174 item->in_token.len = 0; 175 new->in_handle.data = item->in_handle.data; 176 item->in_handle.data = NULL; 177 new->in_token.data = item->in_token.data; 178 item->in_token.data = NULL; 179 } 180 181 static void update_rsi(struct cache_head *cnew, struct cache_head *citem) 182 { 183 struct rsi *new = container_of(cnew, struct rsi, h); 184 struct rsi *item = container_of(citem, struct rsi, h); 185 186 BUG_ON(new->out_handle.data || new->out_token.data); 187 new->out_handle.len = item->out_handle.len; 188 item->out_handle.len = 0; 189 new->out_token.len = item->out_token.len; 190 item->out_token.len = 0; 191 new->out_handle.data = item->out_handle.data; 192 item->out_handle.data = NULL; 193 new->out_token.data = item->out_token.data; 194 item->out_token.data = NULL; 195 196 new->major_status = item->major_status; 197 new->minor_status = item->minor_status; 198 } 199 200 static struct cache_head *rsi_alloc(void) 201 { 202 struct rsi *rsii = kmalloc(sizeof(*rsii), GFP_KERNEL); 203 if (rsii) 204 return &rsii->h; 205 else 206 return NULL; 207 } 208 209 static int rsi_upcall(struct cache_detail *cd, struct cache_head *h) 210 { 211 return sunrpc_cache_pipe_upcall_timeout(cd, h); 212 } 213 214 static void rsi_request(struct cache_detail *cd, 215 struct cache_head *h, 216 char **bpp, int *blen) 217 { 218 struct rsi *rsii = container_of(h, struct rsi, h); 219 220 qword_addhex(bpp, blen, rsii->in_handle.data, rsii->in_handle.len); 221 qword_addhex(bpp, blen, rsii->in_token.data, rsii->in_token.len); 222 (*bpp)[-1] = '\n'; 223 WARN_ONCE(*blen < 0, 224 "RPCSEC/GSS credential too large - please use gssproxy\n"); 225 } 226 227 static int rsi_parse(struct cache_detail *cd, 228 char *mesg, int mlen) 229 { 230 /* context token expiry major minor context token */ 231 char *buf = mesg; 232 char *ep; 233 int len; 234 struct rsi rsii, *rsip = NULL; 235 time64_t expiry; 236 int status = -EINVAL; 237 238 memset(&rsii, 0, sizeof(rsii)); 239 /* handle */ 240 len = qword_get(&mesg, buf, mlen); 241 if (len < 0) 242 goto out; 243 status = -ENOMEM; 244 if (dup_to_netobj(&rsii.in_handle, buf, len)) 245 goto out; 246 247 /* token */ 248 len = qword_get(&mesg, buf, mlen); 249 status = -EINVAL; 250 if (len < 0) 251 goto out; 252 status = -ENOMEM; 253 if (dup_to_netobj(&rsii.in_token, buf, len)) 254 goto out; 255 256 rsip = rsi_lookup(cd, &rsii); 257 if (!rsip) 258 goto out; 259 260 rsii.h.flags = 0; 261 /* expiry */ 262 expiry = get_expiry(&mesg); 263 status = -EINVAL; 264 if (expiry == 0) 265 goto out; 266 267 /* major/minor */ 268 len = qword_get(&mesg, buf, mlen); 269 if (len <= 0) 270 goto out; 271 rsii.major_status = simple_strtoul(buf, &ep, 10); 272 if (*ep) 273 goto out; 274 len = qword_get(&mesg, buf, mlen); 275 if (len <= 0) 276 goto out; 277 rsii.minor_status = simple_strtoul(buf, &ep, 10); 278 if (*ep) 279 goto out; 280 281 /* out_handle */ 282 len = qword_get(&mesg, buf, mlen); 283 if (len < 0) 284 goto out; 285 status = -ENOMEM; 286 if (dup_to_netobj(&rsii.out_handle, buf, len)) 287 goto out; 288 289 /* out_token */ 290 len = qword_get(&mesg, buf, mlen); 291 status = -EINVAL; 292 if (len < 0) 293 goto out; 294 status = -ENOMEM; 295 if (dup_to_netobj(&rsii.out_token, buf, len)) 296 goto out; 297 rsii.h.expiry_time = expiry; 298 rsip = rsi_update(cd, &rsii, rsip); 299 status = 0; 300 out: 301 rsi_free(&rsii); 302 if (rsip) 303 cache_put(&rsip->h, cd); 304 else 305 status = -ENOMEM; 306 return status; 307 } 308 309 static const struct cache_detail rsi_cache_template = { 310 .owner = THIS_MODULE, 311 .hash_size = RSI_HASHMAX, 312 .name = "auth.rpcsec.init", 313 .cache_put = rsi_put, 314 .cache_upcall = rsi_upcall, 315 .cache_request = rsi_request, 316 .cache_parse = rsi_parse, 317 .match = rsi_match, 318 .init = rsi_init, 319 .update = update_rsi, 320 .alloc = rsi_alloc, 321 }; 322 323 static struct rsi *rsi_lookup(struct cache_detail *cd, struct rsi *item) 324 { 325 struct cache_head *ch; 326 int hash = rsi_hash(item); 327 328 ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); 329 if (ch) 330 return container_of(ch, struct rsi, h); 331 else 332 return NULL; 333 } 334 335 static struct rsi *rsi_update(struct cache_detail *cd, struct rsi *new, struct rsi *old) 336 { 337 struct cache_head *ch; 338 int hash = rsi_hash(new); 339 340 ch = sunrpc_cache_update(cd, &new->h, 341 &old->h, hash); 342 if (ch) 343 return container_of(ch, struct rsi, h); 344 else 345 return NULL; 346 } 347 348 349 /* 350 * The rpcsec_context cache is used to store a context that is 351 * used in data exchange. 352 * The key is a context handle. The content is: 353 * uid, gidlist, mechanism, service-set, mech-specific-data 354 */ 355 356 #define RSC_HASHBITS 10 357 #define RSC_HASHMAX (1<<RSC_HASHBITS) 358 359 #define GSS_SEQ_WIN 128 360 361 struct gss_svc_seq_data { 362 /* highest seq number seen so far: */ 363 u32 sd_max; 364 /* for i such that sd_max-GSS_SEQ_WIN < i <= sd_max, the i-th bit of 365 * sd_win is nonzero iff sequence number i has been seen already: */ 366 unsigned long sd_win[GSS_SEQ_WIN/BITS_PER_LONG]; 367 spinlock_t sd_lock; 368 }; 369 370 struct rsc { 371 struct cache_head h; 372 struct xdr_netobj handle; 373 struct svc_cred cred; 374 struct gss_svc_seq_data seqdata; 375 struct gss_ctx *mechctx; 376 struct rcu_head rcu_head; 377 }; 378 379 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old); 380 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item); 381 382 static void rsc_free(struct rsc *rsci) 383 { 384 kfree(rsci->handle.data); 385 if (rsci->mechctx) 386 gss_delete_sec_context(&rsci->mechctx); 387 free_svc_cred(&rsci->cred); 388 } 389 390 static void rsc_free_rcu(struct rcu_head *head) 391 { 392 struct rsc *rsci = container_of(head, struct rsc, rcu_head); 393 394 kfree(rsci->handle.data); 395 kfree(rsci); 396 } 397 398 static void rsc_put(struct kref *ref) 399 { 400 struct rsc *rsci = container_of(ref, struct rsc, h.ref); 401 402 if (rsci->mechctx) 403 gss_delete_sec_context(&rsci->mechctx); 404 free_svc_cred(&rsci->cred); 405 call_rcu(&rsci->rcu_head, rsc_free_rcu); 406 } 407 408 static inline int 409 rsc_hash(struct rsc *rsci) 410 { 411 return hash_mem(rsci->handle.data, rsci->handle.len, RSC_HASHBITS); 412 } 413 414 static int 415 rsc_match(struct cache_head *a, struct cache_head *b) 416 { 417 struct rsc *new = container_of(a, struct rsc, h); 418 struct rsc *tmp = container_of(b, struct rsc, h); 419 420 return netobj_equal(&new->handle, &tmp->handle); 421 } 422 423 static void 424 rsc_init(struct cache_head *cnew, struct cache_head *ctmp) 425 { 426 struct rsc *new = container_of(cnew, struct rsc, h); 427 struct rsc *tmp = container_of(ctmp, struct rsc, h); 428 429 new->handle.len = tmp->handle.len; 430 tmp->handle.len = 0; 431 new->handle.data = tmp->handle.data; 432 tmp->handle.data = NULL; 433 new->mechctx = NULL; 434 init_svc_cred(&new->cred); 435 } 436 437 static void 438 update_rsc(struct cache_head *cnew, struct cache_head *ctmp) 439 { 440 struct rsc *new = container_of(cnew, struct rsc, h); 441 struct rsc *tmp = container_of(ctmp, struct rsc, h); 442 443 new->mechctx = tmp->mechctx; 444 tmp->mechctx = NULL; 445 memset(&new->seqdata, 0, sizeof(new->seqdata)); 446 spin_lock_init(&new->seqdata.sd_lock); 447 new->cred = tmp->cred; 448 init_svc_cred(&tmp->cred); 449 } 450 451 static struct cache_head * 452 rsc_alloc(void) 453 { 454 struct rsc *rsci = kmalloc(sizeof(*rsci), GFP_KERNEL); 455 if (rsci) 456 return &rsci->h; 457 else 458 return NULL; 459 } 460 461 static int rsc_upcall(struct cache_detail *cd, struct cache_head *h) 462 { 463 return -EINVAL; 464 } 465 466 static int rsc_parse(struct cache_detail *cd, 467 char *mesg, int mlen) 468 { 469 /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */ 470 char *buf = mesg; 471 int id; 472 int len, rv; 473 struct rsc rsci, *rscp = NULL; 474 time64_t expiry; 475 int status = -EINVAL; 476 struct gss_api_mech *gm = NULL; 477 478 memset(&rsci, 0, sizeof(rsci)); 479 /* context handle */ 480 len = qword_get(&mesg, buf, mlen); 481 if (len < 0) goto out; 482 status = -ENOMEM; 483 if (dup_to_netobj(&rsci.handle, buf, len)) 484 goto out; 485 486 rsci.h.flags = 0; 487 /* expiry */ 488 expiry = get_expiry(&mesg); 489 status = -EINVAL; 490 if (expiry == 0) 491 goto out; 492 493 rscp = rsc_lookup(cd, &rsci); 494 if (!rscp) 495 goto out; 496 497 /* uid, or NEGATIVE */ 498 rv = get_int(&mesg, &id); 499 if (rv == -EINVAL) 500 goto out; 501 if (rv == -ENOENT) 502 set_bit(CACHE_NEGATIVE, &rsci.h.flags); 503 else { 504 int N, i; 505 506 /* 507 * NOTE: we skip uid_valid()/gid_valid() checks here: 508 * instead, * -1 id's are later mapped to the 509 * (export-specific) anonymous id by nfsd_setuser. 510 * 511 * (But supplementary gid's get no such special 512 * treatment so are checked for validity here.) 513 */ 514 /* uid */ 515 rsci.cred.cr_uid = make_kuid(current_user_ns(), id); 516 517 /* gid */ 518 if (get_int(&mesg, &id)) 519 goto out; 520 rsci.cred.cr_gid = make_kgid(current_user_ns(), id); 521 522 /* number of additional gid's */ 523 if (get_int(&mesg, &N)) 524 goto out; 525 if (N < 0 || N > NGROUPS_MAX) 526 goto out; 527 status = -ENOMEM; 528 rsci.cred.cr_group_info = groups_alloc(N); 529 if (rsci.cred.cr_group_info == NULL) 530 goto out; 531 532 /* gid's */ 533 status = -EINVAL; 534 for (i=0; i<N; i++) { 535 kgid_t kgid; 536 if (get_int(&mesg, &id)) 537 goto out; 538 kgid = make_kgid(current_user_ns(), id); 539 if (!gid_valid(kgid)) 540 goto out; 541 rsci.cred.cr_group_info->gid[i] = kgid; 542 } 543 groups_sort(rsci.cred.cr_group_info); 544 545 /* mech name */ 546 len = qword_get(&mesg, buf, mlen); 547 if (len < 0) 548 goto out; 549 gm = rsci.cred.cr_gss_mech = gss_mech_get_by_name(buf); 550 status = -EOPNOTSUPP; 551 if (!gm) 552 goto out; 553 554 status = -EINVAL; 555 /* mech-specific data: */ 556 len = qword_get(&mesg, buf, mlen); 557 if (len < 0) 558 goto out; 559 status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, 560 NULL, GFP_KERNEL); 561 if (status) 562 goto out; 563 564 /* get client name */ 565 len = qword_get(&mesg, buf, mlen); 566 if (len > 0) { 567 rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL); 568 if (!rsci.cred.cr_principal) { 569 status = -ENOMEM; 570 goto out; 571 } 572 } 573 574 } 575 rsci.h.expiry_time = expiry; 576 rscp = rsc_update(cd, &rsci, rscp); 577 status = 0; 578 out: 579 rsc_free(&rsci); 580 if (rscp) 581 cache_put(&rscp->h, cd); 582 else 583 status = -ENOMEM; 584 return status; 585 } 586 587 static const struct cache_detail rsc_cache_template = { 588 .owner = THIS_MODULE, 589 .hash_size = RSC_HASHMAX, 590 .name = "auth.rpcsec.context", 591 .cache_put = rsc_put, 592 .cache_upcall = rsc_upcall, 593 .cache_parse = rsc_parse, 594 .match = rsc_match, 595 .init = rsc_init, 596 .update = update_rsc, 597 .alloc = rsc_alloc, 598 }; 599 600 static struct rsc *rsc_lookup(struct cache_detail *cd, struct rsc *item) 601 { 602 struct cache_head *ch; 603 int hash = rsc_hash(item); 604 605 ch = sunrpc_cache_lookup_rcu(cd, &item->h, hash); 606 if (ch) 607 return container_of(ch, struct rsc, h); 608 else 609 return NULL; 610 } 611 612 static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old) 613 { 614 struct cache_head *ch; 615 int hash = rsc_hash(new); 616 617 ch = sunrpc_cache_update(cd, &new->h, 618 &old->h, hash); 619 if (ch) 620 return container_of(ch, struct rsc, h); 621 else 622 return NULL; 623 } 624 625 626 static struct rsc * 627 gss_svc_searchbyctx(struct cache_detail *cd, struct xdr_netobj *handle) 628 { 629 struct rsc rsci; 630 struct rsc *found; 631 632 memset(&rsci, 0, sizeof(rsci)); 633 if (dup_to_netobj(&rsci.handle, handle->data, handle->len)) 634 return NULL; 635 found = rsc_lookup(cd, &rsci); 636 rsc_free(&rsci); 637 if (!found) 638 return NULL; 639 if (cache_check(cd, &found->h, NULL)) 640 return NULL; 641 return found; 642 } 643 644 /** 645 * gss_check_seq_num - GSS sequence number window check 646 * @rqstp: RPC Call to use when reporting errors 647 * @rsci: cached GSS context state (updated on return) 648 * @seq_num: sequence number to check 649 * 650 * Implements sequence number algorithm as specified in 651 * RFC 2203, Section 5.3.3.1. "Context Management". 652 * 653 * Return values: 654 * %true: @rqstp's GSS sequence number is inside the window 655 * %false: @rqstp's GSS sequence number is outside the window 656 */ 657 static bool gss_check_seq_num(const struct svc_rqst *rqstp, struct rsc *rsci, 658 u32 seq_num) 659 { 660 struct gss_svc_seq_data *sd = &rsci->seqdata; 661 bool result = false; 662 663 spin_lock(&sd->sd_lock); 664 if (seq_num > sd->sd_max) { 665 if (seq_num >= sd->sd_max + GSS_SEQ_WIN) { 666 memset(sd->sd_win, 0, sizeof(sd->sd_win)); 667 sd->sd_max = seq_num; 668 } else while (sd->sd_max < seq_num) { 669 sd->sd_max++; 670 __clear_bit(sd->sd_max % GSS_SEQ_WIN, sd->sd_win); 671 } 672 __set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win); 673 goto ok; 674 } else if (seq_num + GSS_SEQ_WIN <= sd->sd_max) { 675 goto toolow; 676 } 677 if (__test_and_set_bit(seq_num % GSS_SEQ_WIN, sd->sd_win)) 678 goto alreadyseen; 679 680 ok: 681 result = true; 682 out: 683 spin_unlock(&sd->sd_lock); 684 return result; 685 686 toolow: 687 trace_rpcgss_svc_seqno_low(rqstp, seq_num, 688 sd->sd_max - GSS_SEQ_WIN, 689 sd->sd_max); 690 goto out; 691 alreadyseen: 692 trace_rpcgss_svc_seqno_seen(rqstp, seq_num); 693 goto out; 694 } 695 696 /* 697 * Decode and verify a Call's verifier field. For RPC_AUTH_GSS Calls, 698 * the body of this field contains a variable length checksum. 699 * 700 * GSS-specific auth_stat values are mandated by RFC 2203 Section 701 * 5.3.3.3. 702 */ 703 static int 704 svcauth_gss_verify_header(struct svc_rqst *rqstp, struct rsc *rsci, 705 __be32 *rpcstart, struct rpc_gss_wire_cred *gc) 706 { 707 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 708 struct gss_ctx *ctx_id = rsci->mechctx; 709 u32 flavor, maj_stat; 710 struct xdr_buf rpchdr; 711 struct xdr_netobj checksum; 712 struct kvec iov; 713 714 /* 715 * Compute the checksum of the incoming Call from the 716 * XID field to credential field: 717 */ 718 iov.iov_base = rpcstart; 719 iov.iov_len = (u8 *)xdr->p - (u8 *)rpcstart; 720 xdr_buf_from_iov(&iov, &rpchdr); 721 722 /* Call's verf field: */ 723 if (xdr_stream_decode_opaque_auth(xdr, &flavor, 724 (void **)&checksum.data, 725 &checksum.len) < 0) { 726 rqstp->rq_auth_stat = rpc_autherr_badverf; 727 return SVC_DENIED; 728 } 729 if (flavor != RPC_AUTH_GSS) { 730 rqstp->rq_auth_stat = rpc_autherr_badverf; 731 return SVC_DENIED; 732 } 733 734 if (rqstp->rq_deferred) 735 return SVC_OK; 736 maj_stat = gss_verify_mic(ctx_id, &rpchdr, &checksum); 737 if (maj_stat != GSS_S_COMPLETE) { 738 trace_rpcgss_svc_mic(rqstp, maj_stat); 739 rqstp->rq_auth_stat = rpcsec_gsserr_credproblem; 740 return SVC_DENIED; 741 } 742 743 if (gc->gc_seq > MAXSEQ) { 744 trace_rpcgss_svc_seqno_large(rqstp, gc->gc_seq); 745 rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem; 746 return SVC_DENIED; 747 } 748 if (!gss_check_seq_num(rqstp, rsci, gc->gc_seq)) 749 return SVC_DROP; 750 return SVC_OK; 751 } 752 753 /* 754 * Construct and encode a Reply's verifier field. The verifier's body 755 * field contains a variable-length checksum of the GSS sequence 756 * number. 757 */ 758 static bool 759 svcauth_gss_encode_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq) 760 { 761 struct gss_svc_data *gsd = rqstp->rq_auth_data; 762 u32 maj_stat; 763 struct xdr_buf verf_data; 764 struct xdr_netobj checksum; 765 struct kvec iov; 766 767 gsd->gsd_seq_num = cpu_to_be32(seq); 768 iov.iov_base = &gsd->gsd_seq_num; 769 iov.iov_len = XDR_UNIT; 770 xdr_buf_from_iov(&iov, &verf_data); 771 772 checksum.data = gsd->gsd_scratch; 773 maj_stat = gss_get_mic(ctx_id, &verf_data, &checksum); 774 if (maj_stat != GSS_S_COMPLETE) 775 goto bad_mic; 776 777 return xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream, RPC_AUTH_GSS, 778 checksum.data, checksum.len) > 0; 779 780 bad_mic: 781 trace_rpcgss_svc_get_mic(rqstp, maj_stat); 782 return false; 783 } 784 785 struct gss_domain { 786 struct auth_domain h; 787 u32 pseudoflavor; 788 }; 789 790 static struct auth_domain * 791 find_gss_auth_domain(struct gss_ctx *ctx, u32 svc) 792 { 793 char *name; 794 795 name = gss_service_to_auth_domain_name(ctx->mech_type, svc); 796 if (!name) 797 return NULL; 798 return auth_domain_find(name); 799 } 800 801 static struct auth_ops svcauthops_gss; 802 803 u32 svcauth_gss_flavor(struct auth_domain *dom) 804 { 805 struct gss_domain *gd = container_of(dom, struct gss_domain, h); 806 807 return gd->pseudoflavor; 808 } 809 810 EXPORT_SYMBOL_GPL(svcauth_gss_flavor); 811 812 struct auth_domain * 813 svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name) 814 { 815 struct gss_domain *new; 816 struct auth_domain *test; 817 int stat = -ENOMEM; 818 819 new = kmalloc(sizeof(*new), GFP_KERNEL); 820 if (!new) 821 goto out; 822 kref_init(&new->h.ref); 823 new->h.name = kstrdup(name, GFP_KERNEL); 824 if (!new->h.name) 825 goto out_free_dom; 826 new->h.flavour = &svcauthops_gss; 827 new->pseudoflavor = pseudoflavor; 828 829 test = auth_domain_lookup(name, &new->h); 830 if (test != &new->h) { 831 pr_warn("svc: duplicate registration of gss pseudo flavour %s.\n", 832 name); 833 stat = -EADDRINUSE; 834 auth_domain_put(test); 835 goto out_free_name; 836 } 837 return test; 838 839 out_free_name: 840 kfree(new->h.name); 841 out_free_dom: 842 kfree(new); 843 out: 844 return ERR_PTR(stat); 845 } 846 EXPORT_SYMBOL_GPL(svcauth_gss_register_pseudoflavor); 847 848 /* 849 * RFC 2203, Section 5.3.2.2 850 * 851 * struct rpc_gss_integ_data { 852 * opaque databody_integ<>; 853 * opaque checksum<>; 854 * }; 855 * 856 * struct rpc_gss_data_t { 857 * unsigned int seq_num; 858 * proc_req_arg_t arg; 859 * }; 860 */ 861 static noinline_for_stack int 862 svcauth_gss_unwrap_integ(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx) 863 { 864 struct gss_svc_data *gsd = rqstp->rq_auth_data; 865 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 866 u32 len, offset, seq_num, maj_stat; 867 struct xdr_buf *buf = xdr->buf; 868 struct xdr_buf databody_integ; 869 struct xdr_netobj checksum; 870 871 /* NFS READ normally uses splice to send data in-place. However 872 * the data in cache can change after the reply's MIC is computed 873 * but before the RPC reply is sent. To prevent the client from 874 * rejecting the server-computed MIC in this somewhat rare case, 875 * do not use splice with the GSS integrity service. 876 */ 877 clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); 878 879 /* Did we already verify the signature on the original pass through? */ 880 if (rqstp->rq_deferred) 881 return 0; 882 883 if (xdr_stream_decode_u32(xdr, &len) < 0) 884 goto unwrap_failed; 885 if (len & 3) 886 goto unwrap_failed; 887 offset = xdr_stream_pos(xdr); 888 if (xdr_buf_subsegment(buf, &databody_integ, offset, len)) 889 goto unwrap_failed; 890 891 /* 892 * The xdr_stream now points to the @seq_num field. The next 893 * XDR data item is the @arg field, which contains the clear 894 * text RPC program payload. The checksum, which follows the 895 * @arg field, is located and decoded without updating the 896 * xdr_stream. 897 */ 898 899 offset += len; 900 if (xdr_decode_word(buf, offset, &checksum.len)) 901 goto unwrap_failed; 902 if (checksum.len > sizeof(gsd->gsd_scratch)) 903 goto unwrap_failed; 904 checksum.data = gsd->gsd_scratch; 905 if (read_bytes_from_xdr_buf(buf, offset + XDR_UNIT, checksum.data, 906 checksum.len)) 907 goto unwrap_failed; 908 909 maj_stat = gss_verify_mic(ctx, &databody_integ, &checksum); 910 if (maj_stat != GSS_S_COMPLETE) 911 goto bad_mic; 912 913 /* The received seqno is protected by the checksum. */ 914 if (xdr_stream_decode_u32(xdr, &seq_num) < 0) 915 goto unwrap_failed; 916 if (seq_num != seq) 917 goto bad_seqno; 918 919 xdr_truncate_decode(xdr, XDR_UNIT + checksum.len); 920 return 0; 921 922 unwrap_failed: 923 trace_rpcgss_svc_unwrap_failed(rqstp); 924 return -EINVAL; 925 bad_seqno: 926 trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num); 927 return -EINVAL; 928 bad_mic: 929 trace_rpcgss_svc_mic(rqstp, maj_stat); 930 return -EINVAL; 931 } 932 933 /* 934 * RFC 2203, Section 5.3.2.3 935 * 936 * struct rpc_gss_priv_data { 937 * opaque databody_priv<> 938 * }; 939 * 940 * struct rpc_gss_data_t { 941 * unsigned int seq_num; 942 * proc_req_arg_t arg; 943 * }; 944 */ 945 static noinline_for_stack int 946 svcauth_gss_unwrap_priv(struct svc_rqst *rqstp, u32 seq, struct gss_ctx *ctx) 947 { 948 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 949 u32 len, maj_stat, seq_num, offset; 950 struct xdr_buf *buf = xdr->buf; 951 unsigned int saved_len; 952 953 clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags); 954 955 if (xdr_stream_decode_u32(xdr, &len) < 0) 956 goto unwrap_failed; 957 if (rqstp->rq_deferred) { 958 /* Already decrypted last time through! The sequence number 959 * check at out_seq is unnecessary but harmless: */ 960 goto out_seq; 961 } 962 if (len > xdr_stream_remaining(xdr)) 963 goto unwrap_failed; 964 offset = xdr_stream_pos(xdr); 965 966 saved_len = buf->len; 967 maj_stat = gss_unwrap(ctx, offset, offset + len, buf); 968 if (maj_stat != GSS_S_COMPLETE) 969 goto bad_unwrap; 970 xdr->nwords -= XDR_QUADLEN(saved_len - buf->len); 971 972 out_seq: 973 /* gss_unwrap() decrypted the sequence number. */ 974 if (xdr_stream_decode_u32(xdr, &seq_num) < 0) 975 goto unwrap_failed; 976 if (seq_num != seq) 977 goto bad_seqno; 978 return 0; 979 980 unwrap_failed: 981 trace_rpcgss_svc_unwrap_failed(rqstp); 982 return -EINVAL; 983 bad_seqno: 984 trace_rpcgss_svc_seqno_bad(rqstp, seq, seq_num); 985 return -EINVAL; 986 bad_unwrap: 987 trace_rpcgss_svc_unwrap(rqstp, maj_stat); 988 return -EINVAL; 989 } 990 991 static int 992 svcauth_gss_set_client(struct svc_rqst *rqstp) 993 { 994 struct gss_svc_data *svcdata = rqstp->rq_auth_data; 995 struct rsc *rsci = svcdata->rsci; 996 struct rpc_gss_wire_cred *gc = &svcdata->clcred; 997 int stat; 998 999 rqstp->rq_auth_stat = rpc_autherr_badcred; 1000 1001 /* 1002 * A gss export can be specified either by: 1003 * export *(sec=krb5,rw) 1004 * or by 1005 * export gss/krb5(rw) 1006 * The latter is deprecated; but for backwards compatibility reasons 1007 * the nfsd code will still fall back on trying it if the former 1008 * doesn't work; so we try to make both available to nfsd, below. 1009 */ 1010 rqstp->rq_gssclient = find_gss_auth_domain(rsci->mechctx, gc->gc_svc); 1011 if (rqstp->rq_gssclient == NULL) 1012 return SVC_DENIED; 1013 stat = svcauth_unix_set_client(rqstp); 1014 if (stat == SVC_DROP || stat == SVC_CLOSE) 1015 return stat; 1016 1017 rqstp->rq_auth_stat = rpc_auth_ok; 1018 return SVC_OK; 1019 } 1020 1021 static bool 1022 svcauth_gss_proc_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, 1023 struct xdr_netobj *out_handle, int *major_status, 1024 u32 seq_num) 1025 { 1026 struct xdr_stream *xdr = &rqstp->rq_res_stream; 1027 struct rsc *rsci; 1028 bool rc; 1029 1030 if (*major_status != GSS_S_COMPLETE) 1031 goto null_verifier; 1032 rsci = gss_svc_searchbyctx(cd, out_handle); 1033 if (rsci == NULL) { 1034 *major_status = GSS_S_NO_CONTEXT; 1035 goto null_verifier; 1036 } 1037 1038 rc = svcauth_gss_encode_verf(rqstp, rsci->mechctx, seq_num); 1039 cache_put(&rsci->h, cd); 1040 return rc; 1041 1042 null_verifier: 1043 return xdr_stream_encode_opaque_auth(xdr, RPC_AUTH_NULL, NULL, 0) > 0; 1044 } 1045 1046 static void gss_free_in_token_pages(struct gssp_in_token *in_token) 1047 { 1048 u32 inlen; 1049 int i; 1050 1051 i = 0; 1052 inlen = in_token->page_len; 1053 while (inlen) { 1054 if (in_token->pages[i]) 1055 put_page(in_token->pages[i]); 1056 inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen; 1057 } 1058 1059 kfree(in_token->pages); 1060 in_token->pages = NULL; 1061 } 1062 1063 static int gss_read_proxy_verf(struct svc_rqst *rqstp, 1064 struct rpc_gss_wire_cred *gc, 1065 struct xdr_netobj *in_handle, 1066 struct gssp_in_token *in_token) 1067 { 1068 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 1069 unsigned int length, pgto_offs, pgfrom_offs; 1070 int pages, i, pgto, pgfrom; 1071 size_t to_offs, from_offs; 1072 u32 inlen; 1073 1074 if (dup_netobj(in_handle, &gc->gc_ctx)) 1075 return SVC_CLOSE; 1076 1077 /* 1078 * RFC 2203 Section 5.2.2 1079 * 1080 * struct rpc_gss_init_arg { 1081 * opaque gss_token<>; 1082 * }; 1083 */ 1084 if (xdr_stream_decode_u32(xdr, &inlen) < 0) 1085 goto out_denied_free; 1086 if (inlen > xdr_stream_remaining(xdr)) 1087 goto out_denied_free; 1088 1089 pages = DIV_ROUND_UP(inlen, PAGE_SIZE); 1090 in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL); 1091 if (!in_token->pages) 1092 goto out_denied_free; 1093 in_token->page_base = 0; 1094 in_token->page_len = inlen; 1095 for (i = 0; i < pages; i++) { 1096 in_token->pages[i] = alloc_page(GFP_KERNEL); 1097 if (!in_token->pages[i]) { 1098 gss_free_in_token_pages(in_token); 1099 goto out_denied_free; 1100 } 1101 } 1102 1103 length = min_t(unsigned int, inlen, (char *)xdr->end - (char *)xdr->p); 1104 memcpy(page_address(in_token->pages[0]), xdr->p, length); 1105 inlen -= length; 1106 1107 to_offs = length; 1108 from_offs = rqstp->rq_arg.page_base; 1109 while (inlen) { 1110 pgto = to_offs >> PAGE_SHIFT; 1111 pgfrom = from_offs >> PAGE_SHIFT; 1112 pgto_offs = to_offs & ~PAGE_MASK; 1113 pgfrom_offs = from_offs & ~PAGE_MASK; 1114 1115 length = min_t(unsigned int, inlen, 1116 min_t(unsigned int, PAGE_SIZE - pgto_offs, 1117 PAGE_SIZE - pgfrom_offs)); 1118 memcpy(page_address(in_token->pages[pgto]) + pgto_offs, 1119 page_address(rqstp->rq_arg.pages[pgfrom]) + pgfrom_offs, 1120 length); 1121 1122 to_offs += length; 1123 from_offs += length; 1124 inlen -= length; 1125 } 1126 return 0; 1127 1128 out_denied_free: 1129 kfree(in_handle->data); 1130 return SVC_DENIED; 1131 } 1132 1133 /* 1134 * RFC 2203, Section 5.2.3.1. 1135 * 1136 * struct rpc_gss_init_res { 1137 * opaque handle<>; 1138 * unsigned int gss_major; 1139 * unsigned int gss_minor; 1140 * unsigned int seq_window; 1141 * opaque gss_token<>; 1142 * }; 1143 */ 1144 static bool 1145 svcxdr_encode_gss_init_res(struct xdr_stream *xdr, 1146 struct xdr_netobj *handle, 1147 struct xdr_netobj *gss_token, 1148 unsigned int major_status, 1149 unsigned int minor_status, u32 seq_num) 1150 { 1151 if (xdr_stream_encode_opaque(xdr, handle->data, handle->len) < 0) 1152 return false; 1153 if (xdr_stream_encode_u32(xdr, major_status) < 0) 1154 return false; 1155 if (xdr_stream_encode_u32(xdr, minor_status) < 0) 1156 return false; 1157 if (xdr_stream_encode_u32(xdr, seq_num) < 0) 1158 return false; 1159 if (xdr_stream_encode_opaque(xdr, gss_token->data, gss_token->len) < 0) 1160 return false; 1161 return true; 1162 } 1163 1164 /* 1165 * Having read the cred already and found we're in the context 1166 * initiation case, read the verifier and initiate (or check the results 1167 * of) upcalls to userspace for help with context initiation. If 1168 * the upcall results are available, write the verifier and result. 1169 * Otherwise, drop the request pending an answer to the upcall. 1170 */ 1171 static int 1172 svcauth_gss_legacy_init(struct svc_rqst *rqstp, 1173 struct rpc_gss_wire_cred *gc) 1174 { 1175 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 1176 struct rsi *rsip, rsikey; 1177 __be32 *p; 1178 u32 len; 1179 int ret; 1180 struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); 1181 1182 memset(&rsikey, 0, sizeof(rsikey)); 1183 if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx)) 1184 return SVC_CLOSE; 1185 1186 /* 1187 * RFC 2203 Section 5.2.2 1188 * 1189 * struct rpc_gss_init_arg { 1190 * opaque gss_token<>; 1191 * }; 1192 */ 1193 if (xdr_stream_decode_u32(xdr, &len) < 0) { 1194 kfree(rsikey.in_handle.data); 1195 return SVC_DENIED; 1196 } 1197 p = xdr_inline_decode(xdr, len); 1198 if (!p) { 1199 kfree(rsikey.in_handle.data); 1200 return SVC_DENIED; 1201 } 1202 rsikey.in_token.data = kmalloc(len, GFP_KERNEL); 1203 if (ZERO_OR_NULL_PTR(rsikey.in_token.data)) { 1204 kfree(rsikey.in_handle.data); 1205 return SVC_CLOSE; 1206 } 1207 memcpy(rsikey.in_token.data, p, len); 1208 rsikey.in_token.len = len; 1209 1210 /* Perform upcall, or find upcall result: */ 1211 rsip = rsi_lookup(sn->rsi_cache, &rsikey); 1212 rsi_free(&rsikey); 1213 if (!rsip) 1214 return SVC_CLOSE; 1215 if (cache_check(sn->rsi_cache, &rsip->h, &rqstp->rq_chandle) < 0) 1216 /* No upcall result: */ 1217 return SVC_CLOSE; 1218 1219 ret = SVC_CLOSE; 1220 if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &rsip->out_handle, 1221 &rsip->major_status, GSS_SEQ_WIN)) 1222 goto out; 1223 if (xdr_stream_encode_u32(&rqstp->rq_res_stream, RPC_SUCCESS) < 0) 1224 goto out; 1225 if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &rsip->out_handle, 1226 &rsip->out_token, rsip->major_status, 1227 rsip->minor_status, GSS_SEQ_WIN)) 1228 goto out; 1229 1230 ret = SVC_COMPLETE; 1231 out: 1232 cache_put(&rsip->h, sn->rsi_cache); 1233 return ret; 1234 } 1235 1236 static int gss_proxy_save_rsc(struct cache_detail *cd, 1237 struct gssp_upcall_data *ud, 1238 uint64_t *handle) 1239 { 1240 struct rsc rsci, *rscp = NULL; 1241 static atomic64_t ctxhctr; 1242 long long ctxh; 1243 struct gss_api_mech *gm = NULL; 1244 time64_t expiry; 1245 int status; 1246 1247 memset(&rsci, 0, sizeof(rsci)); 1248 /* context handle */ 1249 status = -ENOMEM; 1250 /* the handle needs to be just a unique id, 1251 * use a static counter */ 1252 ctxh = atomic64_inc_return(&ctxhctr); 1253 1254 /* make a copy for the caller */ 1255 *handle = ctxh; 1256 1257 /* make a copy for the rsc cache */ 1258 if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t))) 1259 goto out; 1260 rscp = rsc_lookup(cd, &rsci); 1261 if (!rscp) 1262 goto out; 1263 1264 /* creds */ 1265 if (!ud->found_creds) { 1266 /* userspace seem buggy, we should always get at least a 1267 * mapping to nobody */ 1268 goto out; 1269 } else { 1270 struct timespec64 boot; 1271 1272 /* steal creds */ 1273 rsci.cred = ud->creds; 1274 memset(&ud->creds, 0, sizeof(struct svc_cred)); 1275 1276 status = -EOPNOTSUPP; 1277 /* get mech handle from OID */ 1278 gm = gss_mech_get_by_OID(&ud->mech_oid); 1279 if (!gm) 1280 goto out; 1281 rsci.cred.cr_gss_mech = gm; 1282 1283 status = -EINVAL; 1284 /* mech-specific data: */ 1285 status = gss_import_sec_context(ud->out_handle.data, 1286 ud->out_handle.len, 1287 gm, &rsci.mechctx, 1288 &expiry, GFP_KERNEL); 1289 if (status) 1290 goto out; 1291 1292 getboottime64(&boot); 1293 expiry -= boot.tv_sec; 1294 } 1295 1296 rsci.h.expiry_time = expiry; 1297 rscp = rsc_update(cd, &rsci, rscp); 1298 status = 0; 1299 out: 1300 rsc_free(&rsci); 1301 if (rscp) 1302 cache_put(&rscp->h, cd); 1303 else 1304 status = -ENOMEM; 1305 return status; 1306 } 1307 1308 static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, 1309 struct rpc_gss_wire_cred *gc) 1310 { 1311 struct xdr_netobj cli_handle; 1312 struct gssp_upcall_data ud; 1313 uint64_t handle; 1314 int status; 1315 int ret; 1316 struct net *net = SVC_NET(rqstp); 1317 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1318 1319 memset(&ud, 0, sizeof(ud)); 1320 ret = gss_read_proxy_verf(rqstp, gc, &ud.in_handle, &ud.in_token); 1321 if (ret) 1322 return ret; 1323 1324 ret = SVC_CLOSE; 1325 1326 /* Perform synchronous upcall to gss-proxy */ 1327 status = gssp_accept_sec_context_upcall(net, &ud); 1328 if (status) 1329 goto out; 1330 1331 trace_rpcgss_svc_accept_upcall(rqstp, ud.major_status, ud.minor_status); 1332 1333 switch (ud.major_status) { 1334 case GSS_S_CONTINUE_NEEDED: 1335 cli_handle = ud.out_handle; 1336 break; 1337 case GSS_S_COMPLETE: 1338 status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle); 1339 if (status) 1340 goto out; 1341 cli_handle.data = (u8 *)&handle; 1342 cli_handle.len = sizeof(handle); 1343 break; 1344 default: 1345 goto out; 1346 } 1347 1348 if (!svcauth_gss_proc_init_verf(sn->rsc_cache, rqstp, &cli_handle, 1349 &ud.major_status, GSS_SEQ_WIN)) 1350 goto out; 1351 if (xdr_stream_encode_u32(&rqstp->rq_res_stream, RPC_SUCCESS) < 0) 1352 goto out; 1353 if (!svcxdr_encode_gss_init_res(&rqstp->rq_res_stream, &cli_handle, 1354 &ud.out_token, ud.major_status, 1355 ud.minor_status, GSS_SEQ_WIN)) 1356 goto out; 1357 1358 ret = SVC_COMPLETE; 1359 out: 1360 gss_free_in_token_pages(&ud.in_token); 1361 gssp_free_upcall_data(&ud); 1362 return ret; 1363 } 1364 1365 /* 1366 * Try to set the sn->use_gss_proxy variable to a new value. We only allow 1367 * it to be changed if it's currently undefined (-1). If it's any other value 1368 * then return -EBUSY unless the type wouldn't have changed anyway. 1369 */ 1370 static int set_gss_proxy(struct net *net, int type) 1371 { 1372 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1373 int ret; 1374 1375 WARN_ON_ONCE(type != 0 && type != 1); 1376 ret = cmpxchg(&sn->use_gss_proxy, -1, type); 1377 if (ret != -1 && ret != type) 1378 return -EBUSY; 1379 return 0; 1380 } 1381 1382 static bool use_gss_proxy(struct net *net) 1383 { 1384 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1385 1386 /* If use_gss_proxy is still undefined, then try to disable it */ 1387 if (sn->use_gss_proxy == -1) 1388 set_gss_proxy(net, 0); 1389 return sn->use_gss_proxy; 1390 } 1391 1392 static noinline_for_stack int 1393 svcauth_gss_proc_init(struct svc_rqst *rqstp, struct rpc_gss_wire_cred *gc) 1394 { 1395 struct xdr_stream *xdr = &rqstp->rq_arg_stream; 1396 u32 flavor, len; 1397 void *body; 1398 1399 /* Call's verf field: */ 1400 if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0) 1401 return SVC_GARBAGE; 1402 if (flavor != RPC_AUTH_NULL || len != 0) { 1403 rqstp->rq_auth_stat = rpc_autherr_badverf; 1404 return SVC_DENIED; 1405 } 1406 1407 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) { 1408 rqstp->rq_auth_stat = rpc_autherr_badcred; 1409 return SVC_DENIED; 1410 } 1411 1412 if (!use_gss_proxy(SVC_NET(rqstp))) 1413 return svcauth_gss_legacy_init(rqstp, gc); 1414 return svcauth_gss_proxy_init(rqstp, gc); 1415 } 1416 1417 #ifdef CONFIG_PROC_FS 1418 1419 static ssize_t write_gssp(struct file *file, const char __user *buf, 1420 size_t count, loff_t *ppos) 1421 { 1422 struct net *net = pde_data(file_inode(file)); 1423 char tbuf[20]; 1424 unsigned long i; 1425 int res; 1426 1427 if (*ppos || count > sizeof(tbuf)-1) 1428 return -EINVAL; 1429 if (copy_from_user(tbuf, buf, count)) 1430 return -EFAULT; 1431 1432 tbuf[count] = 0; 1433 res = kstrtoul(tbuf, 0, &i); 1434 if (res) 1435 return res; 1436 if (i != 1) 1437 return -EINVAL; 1438 res = set_gssp_clnt(net); 1439 if (res) 1440 return res; 1441 res = set_gss_proxy(net, 1); 1442 if (res) 1443 return res; 1444 return count; 1445 } 1446 1447 static ssize_t read_gssp(struct file *file, char __user *buf, 1448 size_t count, loff_t *ppos) 1449 { 1450 struct net *net = pde_data(file_inode(file)); 1451 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1452 unsigned long p = *ppos; 1453 char tbuf[10]; 1454 size_t len; 1455 1456 snprintf(tbuf, sizeof(tbuf), "%d\n", sn->use_gss_proxy); 1457 len = strlen(tbuf); 1458 if (p >= len) 1459 return 0; 1460 len -= p; 1461 if (len > count) 1462 len = count; 1463 if (copy_to_user(buf, (void *)(tbuf+p), len)) 1464 return -EFAULT; 1465 *ppos += len; 1466 return len; 1467 } 1468 1469 static const struct proc_ops use_gss_proxy_proc_ops = { 1470 .proc_open = nonseekable_open, 1471 .proc_write = write_gssp, 1472 .proc_read = read_gssp, 1473 }; 1474 1475 static int create_use_gss_proxy_proc_entry(struct net *net) 1476 { 1477 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1478 struct proc_dir_entry **p = &sn->use_gssp_proc; 1479 1480 sn->use_gss_proxy = -1; 1481 *p = proc_create_data("use-gss-proxy", S_IFREG | 0600, 1482 sn->proc_net_rpc, 1483 &use_gss_proxy_proc_ops, net); 1484 if (!*p) 1485 return -ENOMEM; 1486 init_gssp_clnt(sn); 1487 return 0; 1488 } 1489 1490 static void destroy_use_gss_proxy_proc_entry(struct net *net) 1491 { 1492 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1493 1494 if (sn->use_gssp_proc) { 1495 remove_proc_entry("use-gss-proxy", sn->proc_net_rpc); 1496 clear_gssp_clnt(sn); 1497 } 1498 } 1499 #else /* CONFIG_PROC_FS */ 1500 1501 static int create_use_gss_proxy_proc_entry(struct net *net) 1502 { 1503 return 0; 1504 } 1505 1506 static void destroy_use_gss_proxy_proc_entry(struct net *net) {} 1507 1508 #endif /* CONFIG_PROC_FS */ 1509 1510 /* 1511 * The Call's credential body should contain a struct rpc_gss_cred_t. 1512 * 1513 * RFC 2203 Section 5 1514 * 1515 * struct rpc_gss_cred_t { 1516 * union switch (unsigned int version) { 1517 * case RPCSEC_GSS_VERS_1: 1518 * struct { 1519 * rpc_gss_proc_t gss_proc; 1520 * unsigned int seq_num; 1521 * rpc_gss_service_t service; 1522 * opaque handle<>; 1523 * } rpc_gss_cred_vers_1_t; 1524 * } 1525 * }; 1526 */ 1527 static bool 1528 svcauth_gss_decode_credbody(struct xdr_stream *xdr, 1529 struct rpc_gss_wire_cred *gc, 1530 __be32 **rpcstart) 1531 { 1532 ssize_t handle_len; 1533 u32 body_len; 1534 __be32 *p; 1535 1536 p = xdr_inline_decode(xdr, XDR_UNIT); 1537 if (!p) 1538 return false; 1539 /* 1540 * start of rpc packet is 7 u32's back from here: 1541 * xid direction rpcversion prog vers proc flavour 1542 */ 1543 *rpcstart = p - 7; 1544 body_len = be32_to_cpup(p); 1545 if (body_len > RPC_MAX_AUTH_SIZE) 1546 return false; 1547 1548 /* struct rpc_gss_cred_t */ 1549 if (xdr_stream_decode_u32(xdr, &gc->gc_v) < 0) 1550 return false; 1551 if (xdr_stream_decode_u32(xdr, &gc->gc_proc) < 0) 1552 return false; 1553 if (xdr_stream_decode_u32(xdr, &gc->gc_seq) < 0) 1554 return false; 1555 if (xdr_stream_decode_u32(xdr, &gc->gc_svc) < 0) 1556 return false; 1557 handle_len = xdr_stream_decode_opaque_inline(xdr, 1558 (void **)&gc->gc_ctx.data, 1559 body_len); 1560 if (handle_len < 0) 1561 return false; 1562 if (body_len != XDR_UNIT * 5 + xdr_align_size(handle_len)) 1563 return false; 1564 1565 gc->gc_ctx.len = handle_len; 1566 return true; 1567 } 1568 1569 /** 1570 * svcauth_gss_accept - Decode and validate incoming RPC_AUTH_GSS credential 1571 * @rqstp: RPC transaction 1572 * 1573 * Return values: 1574 * %SVC_OK: Success 1575 * %SVC_COMPLETE: GSS context lifetime event 1576 * %SVC_DENIED: Credential or verifier is not valid 1577 * %SVC_GARBAGE: Failed to decode credential or verifier 1578 * %SVC_CLOSE: Temporary failure 1579 * 1580 * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531). 1581 */ 1582 static int 1583 svcauth_gss_accept(struct svc_rqst *rqstp) 1584 { 1585 struct gss_svc_data *svcdata = rqstp->rq_auth_data; 1586 __be32 *rpcstart; 1587 struct rpc_gss_wire_cred *gc; 1588 struct rsc *rsci = NULL; 1589 int ret; 1590 struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); 1591 1592 rqstp->rq_auth_stat = rpc_autherr_badcred; 1593 if (!svcdata) 1594 svcdata = kmalloc(sizeof(*svcdata), GFP_KERNEL); 1595 if (!svcdata) 1596 goto auth_err; 1597 rqstp->rq_auth_data = svcdata; 1598 svcdata->verf_start = NULL; 1599 svcdata->rsci = NULL; 1600 gc = &svcdata->clcred; 1601 1602 if (!svcauth_gss_decode_credbody(&rqstp->rq_arg_stream, gc, &rpcstart)) 1603 goto auth_err; 1604 if (gc->gc_v != RPC_GSS_VERSION) 1605 goto auth_err; 1606 1607 switch (gc->gc_proc) { 1608 case RPC_GSS_PROC_INIT: 1609 case RPC_GSS_PROC_CONTINUE_INIT: 1610 if (rqstp->rq_proc != 0) 1611 goto auth_err; 1612 return svcauth_gss_proc_init(rqstp, gc); 1613 case RPC_GSS_PROC_DESTROY: 1614 if (rqstp->rq_proc != 0) 1615 goto auth_err; 1616 fallthrough; 1617 case RPC_GSS_PROC_DATA: 1618 rqstp->rq_auth_stat = rpcsec_gsserr_credproblem; 1619 rsci = gss_svc_searchbyctx(sn->rsc_cache, &gc->gc_ctx); 1620 if (!rsci) 1621 goto auth_err; 1622 switch (svcauth_gss_verify_header(rqstp, rsci, rpcstart, gc)) { 1623 case SVC_OK: 1624 break; 1625 case SVC_DENIED: 1626 goto auth_err; 1627 case SVC_DROP: 1628 goto drop; 1629 } 1630 break; 1631 default: 1632 if (rqstp->rq_proc != 0) 1633 goto auth_err; 1634 rqstp->rq_auth_stat = rpc_autherr_rejectedcred; 1635 goto auth_err; 1636 } 1637 1638 /* now act upon the command: */ 1639 switch (gc->gc_proc) { 1640 case RPC_GSS_PROC_DESTROY: 1641 if (!svcauth_gss_encode_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1642 goto auth_err; 1643 /* Delete the entry from the cache_list and call cache_put */ 1644 sunrpc_cache_unhash(sn->rsc_cache, &rsci->h); 1645 if (xdr_stream_encode_u32(&rqstp->rq_res_stream, RPC_SUCCESS) < 0) 1646 goto auth_err; 1647 goto complete; 1648 case RPC_GSS_PROC_DATA: 1649 rqstp->rq_auth_stat = rpcsec_gsserr_ctxproblem; 1650 svcdata->verf_start = xdr_reserve_space(&rqstp->rq_res_stream, 0); 1651 if (!svcauth_gss_encode_verf(rqstp, rsci->mechctx, gc->gc_seq)) 1652 goto auth_err; 1653 rqstp->rq_cred = rsci->cred; 1654 get_group_info(rsci->cred.cr_group_info); 1655 rqstp->rq_auth_stat = rpc_autherr_badcred; 1656 switch (gc->gc_svc) { 1657 case RPC_GSS_SVC_NONE: 1658 break; 1659 case RPC_GSS_SVC_INTEGRITY: 1660 /* placeholders for body length and seq. number: */ 1661 xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2); 1662 if (svcauth_gss_unwrap_integ(rqstp, gc->gc_seq, 1663 rsci->mechctx)) 1664 goto garbage_args; 1665 svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE); 1666 break; 1667 case RPC_GSS_SVC_PRIVACY: 1668 /* placeholders for body length and seq. number: */ 1669 xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2); 1670 if (svcauth_gss_unwrap_priv(rqstp, gc->gc_seq, 1671 rsci->mechctx)) 1672 goto garbage_args; 1673 svcxdr_set_auth_slack(rqstp, RPC_MAX_AUTH_SIZE * 2); 1674 break; 1675 default: 1676 goto auth_err; 1677 } 1678 svcdata->rsci = rsci; 1679 cache_get(&rsci->h); 1680 rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor( 1681 rsci->mechctx->mech_type, 1682 GSS_C_QOP_DEFAULT, 1683 gc->gc_svc); 1684 ret = SVC_OK; 1685 trace_rpcgss_svc_authenticate(rqstp, gc); 1686 goto out; 1687 } 1688 garbage_args: 1689 ret = SVC_GARBAGE; 1690 goto out; 1691 auth_err: 1692 xdr_truncate_encode(&rqstp->rq_res_stream, XDR_UNIT * 2); 1693 ret = SVC_DENIED; 1694 goto out; 1695 complete: 1696 ret = SVC_COMPLETE; 1697 goto out; 1698 drop: 1699 ret = SVC_CLOSE; 1700 out: 1701 if (rsci) 1702 cache_put(&rsci->h, sn->rsc_cache); 1703 return ret; 1704 } 1705 1706 static __be32 * 1707 svcauth_gss_prepare_to_wrap(struct svc_rqst *rqstp, struct gss_svc_data *gsd) 1708 { 1709 struct xdr_buf *resbuf = &rqstp->rq_res; 1710 __be32 *p; 1711 u32 verf_len; 1712 1713 p = gsd->verf_start; 1714 gsd->verf_start = NULL; 1715 1716 /* AUTH_ERROR replies are not wrapped. */ 1717 if (rqstp->rq_auth_stat != rpc_auth_ok) 1718 return NULL; 1719 1720 /* Skip the verifier: */ 1721 p += 1; 1722 verf_len = ntohl(*p++); 1723 p += XDR_QUADLEN(verf_len); 1724 /* move accept_stat to right place: */ 1725 memcpy(p, p + 2, 4); 1726 /* Also don't wrap if the accept stat is nonzero: */ 1727 if (*p != rpc_success) { 1728 resbuf->head[0].iov_len -= 2 * 4; 1729 return NULL; 1730 } 1731 p++; 1732 return p; 1733 } 1734 1735 /* 1736 * RFC 2203, Section 5.3.2.2 1737 * 1738 * struct rpc_gss_integ_data { 1739 * opaque databody_integ<>; 1740 * opaque checksum<>; 1741 * }; 1742 * 1743 * struct rpc_gss_data_t { 1744 * unsigned int seq_num; 1745 * proc_req_arg_t arg; 1746 * }; 1747 * 1748 * The RPC Reply message has already been XDR-encoded. rq_res_stream 1749 * is now positioned so that the checksum can be written just past 1750 * the RPC Reply message. 1751 */ 1752 static int svcauth_gss_wrap_integ(struct svc_rqst *rqstp) 1753 { 1754 struct gss_svc_data *gsd = rqstp->rq_auth_data; 1755 struct xdr_stream *xdr = &rqstp->rq_res_stream; 1756 struct rpc_gss_wire_cred *gc = &gsd->clcred; 1757 struct xdr_buf *buf = xdr->buf; 1758 struct xdr_buf databody_integ; 1759 struct xdr_netobj checksum; 1760 u32 offset, len, maj_stat; 1761 __be32 *p; 1762 1763 p = svcauth_gss_prepare_to_wrap(rqstp, gsd); 1764 if (p == NULL) 1765 goto out; 1766 1767 offset = (u8 *)(p + 1) - (u8 *)buf->head[0].iov_base; 1768 len = buf->len - offset; 1769 if (xdr_buf_subsegment(buf, &databody_integ, offset, len)) 1770 goto wrap_failed; 1771 /* Buffer space for these has already been reserved in 1772 * svcauth_gss_accept(). */ 1773 *p++ = cpu_to_be32(len); 1774 *p = cpu_to_be32(gc->gc_seq); 1775 1776 checksum.data = gsd->gsd_scratch; 1777 maj_stat = gss_get_mic(gsd->rsci->mechctx, &databody_integ, &checksum); 1778 if (maj_stat != GSS_S_COMPLETE) 1779 goto bad_mic; 1780 1781 if (xdr_stream_encode_opaque(xdr, checksum.data, checksum.len) < 0) 1782 goto wrap_failed; 1783 xdr_commit_encode(xdr); 1784 1785 out: 1786 return 0; 1787 1788 bad_mic: 1789 trace_rpcgss_svc_get_mic(rqstp, maj_stat); 1790 return -EINVAL; 1791 wrap_failed: 1792 trace_rpcgss_svc_wrap_failed(rqstp); 1793 return -EINVAL; 1794 } 1795 1796 /* 1797 * RFC 2203, Section 5.3.2.3 1798 * 1799 * struct rpc_gss_priv_data { 1800 * opaque databody_priv<> 1801 * }; 1802 * 1803 * struct rpc_gss_data_t { 1804 * unsigned int seq_num; 1805 * proc_req_arg_t arg; 1806 * }; 1807 * 1808 * gss_wrap() expands the size of the RPC message payload in the 1809 * response buffer. The main purpose of svcauth_gss_wrap_priv() 1810 * is to ensure there is adequate space in the response buffer to 1811 * avoid overflow during the wrap. 1812 */ 1813 static int svcauth_gss_wrap_priv(struct svc_rqst *rqstp) 1814 { 1815 struct gss_svc_data *gsd = rqstp->rq_auth_data; 1816 struct rpc_gss_wire_cred *gc = &gsd->clcred; 1817 struct xdr_buf *buf = &rqstp->rq_res; 1818 struct kvec *head = buf->head; 1819 struct kvec *tail = buf->tail; 1820 u32 offset, pad, maj_stat; 1821 __be32 *p, *lenp; 1822 1823 p = svcauth_gss_prepare_to_wrap(rqstp, gsd); 1824 if (p == NULL) 1825 return 0; 1826 1827 lenp = p++; 1828 offset = (u8 *)p - (u8 *)head->iov_base; 1829 /* Buffer space for this field has already been reserved 1830 * in svcauth_gss_accept(). */ 1831 *p = cpu_to_be32(gc->gc_seq); 1832 1833 /* 1834 * If there is currently tail data, make sure there is 1835 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in 1836 * the page, and move the current tail data such that 1837 * there is RPC_MAX_AUTH_SIZE slack space available in 1838 * both the head and tail. 1839 */ 1840 if (tail->iov_base) { 1841 if (tail->iov_base >= head->iov_base + PAGE_SIZE) 1842 goto wrap_failed; 1843 if (tail->iov_base < head->iov_base) 1844 goto wrap_failed; 1845 if (tail->iov_len + head->iov_len 1846 + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE) 1847 goto wrap_failed; 1848 memmove(tail->iov_base + RPC_MAX_AUTH_SIZE, tail->iov_base, 1849 tail->iov_len); 1850 tail->iov_base += RPC_MAX_AUTH_SIZE; 1851 } 1852 /* 1853 * If there is no current tail data, make sure there is 1854 * room for the head data, and 2 * RPC_MAX_AUTH_SIZE in the 1855 * allotted page, and set up tail information such that there 1856 * is RPC_MAX_AUTH_SIZE slack space available in both the 1857 * head and tail. 1858 */ 1859 if (!tail->iov_base) { 1860 if (head->iov_len + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE) 1861 goto wrap_failed; 1862 tail->iov_base = head->iov_base 1863 + head->iov_len + RPC_MAX_AUTH_SIZE; 1864 tail->iov_len = 0; 1865 } 1866 1867 maj_stat = gss_wrap(gsd->rsci->mechctx, offset, buf, buf->pages); 1868 if (maj_stat != GSS_S_COMPLETE) 1869 goto bad_wrap; 1870 1871 *lenp = cpu_to_be32(buf->len - offset); 1872 pad = xdr_pad_size(buf->len - offset); 1873 p = (__be32 *)(tail->iov_base + tail->iov_len); 1874 memset(p, 0, pad); 1875 tail->iov_len += pad; 1876 buf->len += pad; 1877 1878 return 0; 1879 wrap_failed: 1880 trace_rpcgss_svc_wrap_failed(rqstp); 1881 return -EINVAL; 1882 bad_wrap: 1883 trace_rpcgss_svc_wrap(rqstp, maj_stat); 1884 return -ENOMEM; 1885 } 1886 1887 /** 1888 * svcauth_gss_release - Wrap payload and release resources 1889 * @rqstp: RPC transaction context 1890 * 1891 * Return values: 1892 * %0: the Reply is ready to be sent 1893 * %-ENOMEM: failed to allocate memory 1894 * %-EINVAL: encoding error 1895 * 1896 * XXX: These return values do not match the return values documented 1897 * for the auth_ops ->release method in linux/sunrpc/svcauth.h. 1898 */ 1899 static int 1900 svcauth_gss_release(struct svc_rqst *rqstp) 1901 { 1902 struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); 1903 struct gss_svc_data *gsd = rqstp->rq_auth_data; 1904 struct rpc_gss_wire_cred *gc; 1905 int stat; 1906 1907 if (!gsd) 1908 goto out; 1909 gc = &gsd->clcred; 1910 if (gc->gc_proc != RPC_GSS_PROC_DATA) 1911 goto out; 1912 /* Release can be called twice, but we only wrap once. */ 1913 if (gsd->verf_start == NULL) 1914 goto out; 1915 1916 switch (gc->gc_svc) { 1917 case RPC_GSS_SVC_NONE: 1918 break; 1919 case RPC_GSS_SVC_INTEGRITY: 1920 stat = svcauth_gss_wrap_integ(rqstp); 1921 if (stat) 1922 goto out_err; 1923 break; 1924 case RPC_GSS_SVC_PRIVACY: 1925 stat = svcauth_gss_wrap_priv(rqstp); 1926 if (stat) 1927 goto out_err; 1928 break; 1929 /* 1930 * For any other gc_svc value, svcauth_gss_accept() already set 1931 * the auth_error appropriately; just fall through: 1932 */ 1933 } 1934 1935 out: 1936 stat = 0; 1937 out_err: 1938 if (rqstp->rq_client) 1939 auth_domain_put(rqstp->rq_client); 1940 rqstp->rq_client = NULL; 1941 if (rqstp->rq_gssclient) 1942 auth_domain_put(rqstp->rq_gssclient); 1943 rqstp->rq_gssclient = NULL; 1944 if (rqstp->rq_cred.cr_group_info) 1945 put_group_info(rqstp->rq_cred.cr_group_info); 1946 rqstp->rq_cred.cr_group_info = NULL; 1947 if (gsd && gsd->rsci) { 1948 cache_put(&gsd->rsci->h, sn->rsc_cache); 1949 gsd->rsci = NULL; 1950 } 1951 return stat; 1952 } 1953 1954 static void 1955 svcauth_gss_domain_release_rcu(struct rcu_head *head) 1956 { 1957 struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head); 1958 struct gss_domain *gd = container_of(dom, struct gss_domain, h); 1959 1960 kfree(dom->name); 1961 kfree(gd); 1962 } 1963 1964 static void 1965 svcauth_gss_domain_release(struct auth_domain *dom) 1966 { 1967 call_rcu(&dom->rcu_head, svcauth_gss_domain_release_rcu); 1968 } 1969 1970 static struct auth_ops svcauthops_gss = { 1971 .name = "rpcsec_gss", 1972 .owner = THIS_MODULE, 1973 .flavour = RPC_AUTH_GSS, 1974 .accept = svcauth_gss_accept, 1975 .release = svcauth_gss_release, 1976 .domain_release = svcauth_gss_domain_release, 1977 .set_client = svcauth_gss_set_client, 1978 }; 1979 1980 static int rsi_cache_create_net(struct net *net) 1981 { 1982 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1983 struct cache_detail *cd; 1984 int err; 1985 1986 cd = cache_create_net(&rsi_cache_template, net); 1987 if (IS_ERR(cd)) 1988 return PTR_ERR(cd); 1989 err = cache_register_net(cd, net); 1990 if (err) { 1991 cache_destroy_net(cd, net); 1992 return err; 1993 } 1994 sn->rsi_cache = cd; 1995 return 0; 1996 } 1997 1998 static void rsi_cache_destroy_net(struct net *net) 1999 { 2000 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 2001 struct cache_detail *cd = sn->rsi_cache; 2002 2003 sn->rsi_cache = NULL; 2004 cache_purge(cd); 2005 cache_unregister_net(cd, net); 2006 cache_destroy_net(cd, net); 2007 } 2008 2009 static int rsc_cache_create_net(struct net *net) 2010 { 2011 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 2012 struct cache_detail *cd; 2013 int err; 2014 2015 cd = cache_create_net(&rsc_cache_template, net); 2016 if (IS_ERR(cd)) 2017 return PTR_ERR(cd); 2018 err = cache_register_net(cd, net); 2019 if (err) { 2020 cache_destroy_net(cd, net); 2021 return err; 2022 } 2023 sn->rsc_cache = cd; 2024 return 0; 2025 } 2026 2027 static void rsc_cache_destroy_net(struct net *net) 2028 { 2029 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 2030 struct cache_detail *cd = sn->rsc_cache; 2031 2032 sn->rsc_cache = NULL; 2033 cache_purge(cd); 2034 cache_unregister_net(cd, net); 2035 cache_destroy_net(cd, net); 2036 } 2037 2038 int 2039 gss_svc_init_net(struct net *net) 2040 { 2041 int rv; 2042 2043 rv = rsc_cache_create_net(net); 2044 if (rv) 2045 return rv; 2046 rv = rsi_cache_create_net(net); 2047 if (rv) 2048 goto out1; 2049 rv = create_use_gss_proxy_proc_entry(net); 2050 if (rv) 2051 goto out2; 2052 return 0; 2053 out2: 2054 rsi_cache_destroy_net(net); 2055 out1: 2056 rsc_cache_destroy_net(net); 2057 return rv; 2058 } 2059 2060 void 2061 gss_svc_shutdown_net(struct net *net) 2062 { 2063 destroy_use_gss_proxy_proc_entry(net); 2064 rsi_cache_destroy_net(net); 2065 rsc_cache_destroy_net(net); 2066 } 2067 2068 int 2069 gss_svc_init(void) 2070 { 2071 return svc_auth_register(RPC_AUTH_GSS, &svcauthops_gss); 2072 } 2073 2074 void 2075 gss_svc_shutdown(void) 2076 { 2077 svc_auth_unregister(RPC_AUTH_GSS); 2078 } 2079