1 /* 2 * linux/net/sunrpc/auth_gss/auth_gss.c 3 * 4 * RPCSEC_GSS client authentication. 5 * 6 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Dug Song <dugsong@monkey.org> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <linux/module.h> 40 #include <linux/init.h> 41 #include <linux/types.h> 42 #include <linux/slab.h> 43 #include <linux/sched.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/auth.h> 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/gss_err.h> 50 #include <linux/workqueue.h> 51 #include <linux/sunrpc/rpc_pipe_fs.h> 52 #include <linux/sunrpc/gss_api.h> 53 #include <asm/uaccess.h> 54 #include <linux/hashtable.h> 55 56 #include "../netns.h" 57 58 static const struct rpc_authops authgss_ops; 59 60 static const struct rpc_credops gss_credops; 61 static const struct rpc_credops gss_nullops; 62 63 #define GSS_RETRY_EXPIRED 5 64 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; 65 66 #define GSS_KEY_EXPIRE_TIMEO 240 67 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; 68 69 #ifdef RPC_DEBUG 70 # define RPCDBG_FACILITY RPCDBG_AUTH 71 #endif 72 73 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) 74 /* length of a krb5 verifier (48), plus data added before arguments when 75 * using integrity (two 4-byte integers): */ 76 #define GSS_VERF_SLACK 100 77 78 static DEFINE_HASHTABLE(gss_auth_hash_table, 4); 79 static DEFINE_SPINLOCK(gss_auth_hash_lock); 80 81 struct gss_pipe { 82 struct rpc_pipe_dir_object pdo; 83 struct rpc_pipe *pipe; 84 struct rpc_clnt *clnt; 85 const char *name; 86 struct kref kref; 87 }; 88 89 struct gss_auth { 90 struct kref kref; 91 struct hlist_node hash; 92 struct rpc_auth rpc_auth; 93 struct gss_api_mech *mech; 94 enum rpc_gss_svc service; 95 struct rpc_clnt *client; 96 struct net *net; 97 /* 98 * There are two upcall pipes; dentry[1], named "gssd", is used 99 * for the new text-based upcall; dentry[0] is named after the 100 * mechanism (for example, "krb5") and exists for 101 * backwards-compatibility with older gssd's. 102 */ 103 struct gss_pipe *gss_pipe[2]; 104 const char *target_name; 105 }; 106 107 /* pipe_version >= 0 if and only if someone has a pipe open. */ 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 112 static void gss_free_ctx(struct gss_cl_ctx *); 113 static const struct rpc_pipe_ops gss_upcall_ops_v0; 114 static const struct rpc_pipe_ops gss_upcall_ops_v1; 115 116 static inline struct gss_cl_ctx * 117 gss_get_ctx(struct gss_cl_ctx *ctx) 118 { 119 atomic_inc(&ctx->count); 120 return ctx; 121 } 122 123 static inline void 124 gss_put_ctx(struct gss_cl_ctx *ctx) 125 { 126 if (atomic_dec_and_test(&ctx->count)) 127 gss_free_ctx(ctx); 128 } 129 130 /* gss_cred_set_ctx: 131 * called by gss_upcall_callback and gss_create_upcall in order 132 * to set the gss context. The actual exchange of an old context 133 * and a new one is protected by the pipe->lock. 134 */ 135 static void 136 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 137 { 138 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 139 140 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 141 return; 142 gss_get_ctx(ctx); 143 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 144 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 145 smp_mb__before_clear_bit(); 146 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 147 } 148 149 static const void * 150 simple_get_bytes(const void *p, const void *end, void *res, size_t len) 151 { 152 const void *q = (const void *)((const char *)p + len); 153 if (unlikely(q > end || q < p)) 154 return ERR_PTR(-EFAULT); 155 memcpy(res, p, len); 156 return q; 157 } 158 159 static inline const void * 160 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 161 { 162 const void *q; 163 unsigned int len; 164 165 p = simple_get_bytes(p, end, &len, sizeof(len)); 166 if (IS_ERR(p)) 167 return p; 168 q = (const void *)((const char *)p + len); 169 if (unlikely(q > end || q < p)) 170 return ERR_PTR(-EFAULT); 171 dest->data = kmemdup(p, len, GFP_NOFS); 172 if (unlikely(dest->data == NULL)) 173 return ERR_PTR(-ENOMEM); 174 dest->len = len; 175 return q; 176 } 177 178 static struct gss_cl_ctx * 179 gss_cred_get_ctx(struct rpc_cred *cred) 180 { 181 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 182 struct gss_cl_ctx *ctx = NULL; 183 184 rcu_read_lock(); 185 if (gss_cred->gc_ctx) 186 ctx = gss_get_ctx(gss_cred->gc_ctx); 187 rcu_read_unlock(); 188 return ctx; 189 } 190 191 static struct gss_cl_ctx * 192 gss_alloc_context(void) 193 { 194 struct gss_cl_ctx *ctx; 195 196 ctx = kzalloc(sizeof(*ctx), GFP_NOFS); 197 if (ctx != NULL) { 198 ctx->gc_proc = RPC_GSS_PROC_DATA; 199 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 200 spin_lock_init(&ctx->gc_seq_lock); 201 atomic_set(&ctx->count,1); 202 } 203 return ctx; 204 } 205 206 #define GSSD_MIN_TIMEOUT (60 * 60) 207 static const void * 208 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) 209 { 210 const void *q; 211 unsigned int seclen; 212 unsigned int timeout; 213 unsigned long now = jiffies; 214 u32 window_size; 215 int ret; 216 217 /* First unsigned int gives the remaining lifetime in seconds of the 218 * credential - e.g. the remaining TGT lifetime for Kerberos or 219 * the -t value passed to GSSD. 220 */ 221 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 222 if (IS_ERR(p)) 223 goto err; 224 if (timeout == 0) 225 timeout = GSSD_MIN_TIMEOUT; 226 ctx->gc_expiry = now + ((unsigned long)timeout * HZ); 227 /* Sequence number window. Determines the maximum number of 228 * simultaneous requests 229 */ 230 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 231 if (IS_ERR(p)) 232 goto err; 233 ctx->gc_win = window_size; 234 /* gssd signals an error by passing ctx->gc_win = 0: */ 235 if (ctx->gc_win == 0) { 236 /* 237 * in which case, p points to an error code. Anything other 238 * than -EKEYEXPIRED gets converted to -EACCES. 239 */ 240 p = simple_get_bytes(p, end, &ret, sizeof(ret)); 241 if (!IS_ERR(p)) 242 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 243 ERR_PTR(-EACCES); 244 goto err; 245 } 246 /* copy the opaque wire context */ 247 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); 248 if (IS_ERR(p)) 249 goto err; 250 /* import the opaque security context */ 251 p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); 252 if (IS_ERR(p)) 253 goto err; 254 q = (const void *)((const char *)p + seclen); 255 if (unlikely(q > end || q < p)) { 256 p = ERR_PTR(-EFAULT); 257 goto err; 258 } 259 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); 260 if (ret < 0) { 261 p = ERR_PTR(ret); 262 goto err; 263 } 264 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n", 265 __func__, ctx->gc_expiry, now, timeout); 266 return q; 267 err: 268 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p)); 269 return p; 270 } 271 272 #define UPCALL_BUF_LEN 128 273 274 struct gss_upcall_msg { 275 atomic_t count; 276 kuid_t uid; 277 struct rpc_pipe_msg msg; 278 struct list_head list; 279 struct gss_auth *auth; 280 struct rpc_pipe *pipe; 281 struct rpc_wait_queue rpc_waitqueue; 282 wait_queue_head_t waitqueue; 283 struct gss_cl_ctx *ctx; 284 char databuf[UPCALL_BUF_LEN]; 285 }; 286 287 static int get_pipe_version(struct net *net) 288 { 289 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 290 int ret; 291 292 spin_lock(&pipe_version_lock); 293 if (sn->pipe_version >= 0) { 294 atomic_inc(&sn->pipe_users); 295 ret = sn->pipe_version; 296 } else 297 ret = -EAGAIN; 298 spin_unlock(&pipe_version_lock); 299 return ret; 300 } 301 302 static void put_pipe_version(struct net *net) 303 { 304 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 305 306 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { 307 sn->pipe_version = -1; 308 spin_unlock(&pipe_version_lock); 309 } 310 } 311 312 static void 313 gss_release_msg(struct gss_upcall_msg *gss_msg) 314 { 315 struct net *net = gss_msg->auth->net; 316 if (!atomic_dec_and_test(&gss_msg->count)) 317 return; 318 put_pipe_version(net); 319 BUG_ON(!list_empty(&gss_msg->list)); 320 if (gss_msg->ctx != NULL) 321 gss_put_ctx(gss_msg->ctx); 322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 kfree(gss_msg); 324 } 325 326 static struct gss_upcall_msg * 327 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 328 { 329 struct gss_upcall_msg *pos; 330 list_for_each_entry(pos, &pipe->in_downcall, list) { 331 if (!uid_eq(pos->uid, uid)) 332 continue; 333 atomic_inc(&pos->count); 334 dprintk("RPC: %s found msg %p\n", __func__, pos); 335 return pos; 336 } 337 dprintk("RPC: %s found nothing\n", __func__); 338 return NULL; 339 } 340 341 /* Try to add an upcall to the pipefs queue. 342 * If an upcall owned by our uid already exists, then we return a reference 343 * to that upcall instead of adding the new upcall. 344 */ 345 static inline struct gss_upcall_msg * 346 gss_add_msg(struct gss_upcall_msg *gss_msg) 347 { 348 struct rpc_pipe *pipe = gss_msg->pipe; 349 struct gss_upcall_msg *old; 350 351 spin_lock(&pipe->lock); 352 old = __gss_find_upcall(pipe, gss_msg->uid); 353 if (old == NULL) { 354 atomic_inc(&gss_msg->count); 355 list_add(&gss_msg->list, &pipe->in_downcall); 356 } else 357 gss_msg = old; 358 spin_unlock(&pipe->lock); 359 return gss_msg; 360 } 361 362 static void 363 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 364 { 365 list_del_init(&gss_msg->list); 366 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 367 wake_up_all(&gss_msg->waitqueue); 368 atomic_dec(&gss_msg->count); 369 } 370 371 static void 372 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 373 { 374 struct rpc_pipe *pipe = gss_msg->pipe; 375 376 if (list_empty(&gss_msg->list)) 377 return; 378 spin_lock(&pipe->lock); 379 if (!list_empty(&gss_msg->list)) 380 __gss_unhash_msg(gss_msg); 381 spin_unlock(&pipe->lock); 382 } 383 384 static void 385 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) 386 { 387 switch (gss_msg->msg.errno) { 388 case 0: 389 if (gss_msg->ctx == NULL) 390 break; 391 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 392 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); 393 break; 394 case -EKEYEXPIRED: 395 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 396 } 397 gss_cred->gc_upcall_timestamp = jiffies; 398 gss_cred->gc_upcall = NULL; 399 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 400 } 401 402 static void 403 gss_upcall_callback(struct rpc_task *task) 404 { 405 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 406 struct gss_cred, gc_base); 407 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 408 struct rpc_pipe *pipe = gss_msg->pipe; 409 410 spin_lock(&pipe->lock); 411 gss_handle_downcall_result(gss_cred, gss_msg); 412 spin_unlock(&pipe->lock); 413 task->tk_status = gss_msg->msg.errno; 414 gss_release_msg(gss_msg); 415 } 416 417 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) 418 { 419 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); 420 memcpy(gss_msg->databuf, &uid, sizeof(uid)); 421 gss_msg->msg.data = gss_msg->databuf; 422 gss_msg->msg.len = sizeof(uid); 423 424 BUILD_BUG_ON(sizeof(uid) > sizeof(gss_msg->databuf)); 425 } 426 427 static int gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 428 const char *service_name, 429 const char *target_name) 430 { 431 struct gss_api_mech *mech = gss_msg->auth->mech; 432 char *p = gss_msg->databuf; 433 size_t buflen = sizeof(gss_msg->databuf); 434 int len; 435 436 len = scnprintf(p, buflen, "mech=%s uid=%d ", mech->gm_name, 437 from_kuid(&init_user_ns, gss_msg->uid)); 438 buflen -= len; 439 p += len; 440 gss_msg->msg.len = len; 441 if (target_name) { 442 len = scnprintf(p, buflen, "target=%s ", target_name); 443 buflen -= len; 444 p += len; 445 gss_msg->msg.len += len; 446 } 447 if (service_name != NULL) { 448 len = scnprintf(p, buflen, "service=%s ", service_name); 449 buflen -= len; 450 p += len; 451 gss_msg->msg.len += len; 452 } 453 if (mech->gm_upcall_enctypes) { 454 len = scnprintf(p, buflen, "enctypes=%s ", 455 mech->gm_upcall_enctypes); 456 buflen -= len; 457 p += len; 458 gss_msg->msg.len += len; 459 } 460 len = scnprintf(p, buflen, "\n"); 461 if (len == 0) 462 goto out_overflow; 463 gss_msg->msg.len += len; 464 465 gss_msg->msg.data = gss_msg->databuf; 466 return 0; 467 out_overflow: 468 WARN_ON_ONCE(1); 469 return -ENOMEM; 470 } 471 472 static struct gss_upcall_msg * 473 gss_alloc_msg(struct gss_auth *gss_auth, 474 kuid_t uid, const char *service_name) 475 { 476 struct gss_upcall_msg *gss_msg; 477 int vers; 478 int err = -ENOMEM; 479 480 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 481 if (gss_msg == NULL) 482 goto err; 483 vers = get_pipe_version(gss_auth->net); 484 err = vers; 485 if (err < 0) 486 goto err_free_msg; 487 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe; 488 INIT_LIST_HEAD(&gss_msg->list); 489 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 490 init_waitqueue_head(&gss_msg->waitqueue); 491 atomic_set(&gss_msg->count, 1); 492 gss_msg->uid = uid; 493 gss_msg->auth = gss_auth; 494 switch (vers) { 495 case 0: 496 gss_encode_v0_msg(gss_msg); 497 break; 498 default: 499 err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 500 if (err) 501 goto err_free_msg; 502 }; 503 return gss_msg; 504 err_free_msg: 505 kfree(gss_msg); 506 err: 507 return ERR_PTR(err); 508 } 509 510 static struct gss_upcall_msg * 511 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) 512 { 513 struct gss_cred *gss_cred = container_of(cred, 514 struct gss_cred, gc_base); 515 struct gss_upcall_msg *gss_new, *gss_msg; 516 kuid_t uid = cred->cr_uid; 517 518 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal); 519 if (IS_ERR(gss_new)) 520 return gss_new; 521 gss_msg = gss_add_msg(gss_new); 522 if (gss_msg == gss_new) { 523 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 524 if (res) { 525 gss_unhash_msg(gss_new); 526 gss_msg = ERR_PTR(res); 527 } 528 } else 529 gss_release_msg(gss_new); 530 return gss_msg; 531 } 532 533 static void warn_gssd(void) 534 { 535 dprintk("AUTH_GSS upcall failed. Please check user daemon is running.\n"); 536 } 537 538 static inline int 539 gss_refresh_upcall(struct rpc_task *task) 540 { 541 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 542 struct gss_auth *gss_auth = container_of(cred->cr_auth, 543 struct gss_auth, rpc_auth); 544 struct gss_cred *gss_cred = container_of(cred, 545 struct gss_cred, gc_base); 546 struct gss_upcall_msg *gss_msg; 547 struct rpc_pipe *pipe; 548 int err = 0; 549 550 dprintk("RPC: %5u %s for uid %u\n", 551 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); 552 gss_msg = gss_setup_upcall(gss_auth, cred); 553 if (PTR_ERR(gss_msg) == -EAGAIN) { 554 /* XXX: warning on the first, under the assumption we 555 * shouldn't normally hit this case on a refresh. */ 556 warn_gssd(); 557 task->tk_timeout = 15*HZ; 558 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 559 return -EAGAIN; 560 } 561 if (IS_ERR(gss_msg)) { 562 err = PTR_ERR(gss_msg); 563 goto out; 564 } 565 pipe = gss_msg->pipe; 566 spin_lock(&pipe->lock); 567 if (gss_cred->gc_upcall != NULL) 568 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 569 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 570 task->tk_timeout = 0; 571 gss_cred->gc_upcall = gss_msg; 572 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 573 atomic_inc(&gss_msg->count); 574 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 575 } else { 576 gss_handle_downcall_result(gss_cred, gss_msg); 577 err = gss_msg->msg.errno; 578 } 579 spin_unlock(&pipe->lock); 580 gss_release_msg(gss_msg); 581 out: 582 dprintk("RPC: %5u %s for uid %u result %d\n", 583 task->tk_pid, __func__, 584 from_kuid(&init_user_ns, cred->cr_uid), err); 585 return err; 586 } 587 588 static inline int 589 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 590 { 591 struct net *net = gss_auth->net; 592 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 593 struct rpc_pipe *pipe; 594 struct rpc_cred *cred = &gss_cred->gc_base; 595 struct gss_upcall_msg *gss_msg; 596 DEFINE_WAIT(wait); 597 int err; 598 599 dprintk("RPC: %s for uid %u\n", 600 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 601 retry: 602 err = 0; 603 /* if gssd is down, just skip upcalling altogether */ 604 if (!gssd_running(net)) { 605 warn_gssd(); 606 return -EACCES; 607 } 608 gss_msg = gss_setup_upcall(gss_auth, cred); 609 if (PTR_ERR(gss_msg) == -EAGAIN) { 610 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 611 sn->pipe_version >= 0, 15 * HZ); 612 if (sn->pipe_version < 0) { 613 warn_gssd(); 614 err = -EACCES; 615 } 616 if (err < 0) 617 goto out; 618 goto retry; 619 } 620 if (IS_ERR(gss_msg)) { 621 err = PTR_ERR(gss_msg); 622 goto out; 623 } 624 pipe = gss_msg->pipe; 625 for (;;) { 626 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 627 spin_lock(&pipe->lock); 628 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 629 break; 630 } 631 spin_unlock(&pipe->lock); 632 if (fatal_signal_pending(current)) { 633 err = -ERESTARTSYS; 634 goto out_intr; 635 } 636 schedule(); 637 } 638 if (gss_msg->ctx) 639 gss_cred_set_ctx(cred, gss_msg->ctx); 640 else 641 err = gss_msg->msg.errno; 642 spin_unlock(&pipe->lock); 643 out_intr: 644 finish_wait(&gss_msg->waitqueue, &wait); 645 gss_release_msg(gss_msg); 646 out: 647 dprintk("RPC: %s for uid %u result %d\n", 648 __func__, from_kuid(&init_user_ns, cred->cr_uid), err); 649 return err; 650 } 651 652 #define MSG_BUF_MAXSIZE 1024 653 654 static ssize_t 655 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 656 { 657 const void *p, *end; 658 void *buf; 659 struct gss_upcall_msg *gss_msg; 660 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; 661 struct gss_cl_ctx *ctx; 662 uid_t id; 663 kuid_t uid; 664 ssize_t err = -EFBIG; 665 666 if (mlen > MSG_BUF_MAXSIZE) 667 goto out; 668 err = -ENOMEM; 669 buf = kmalloc(mlen, GFP_NOFS); 670 if (!buf) 671 goto out; 672 673 err = -EFAULT; 674 if (copy_from_user(buf, src, mlen)) 675 goto err; 676 677 end = (const void *)((char *)buf + mlen); 678 p = simple_get_bytes(buf, end, &id, sizeof(id)); 679 if (IS_ERR(p)) { 680 err = PTR_ERR(p); 681 goto err; 682 } 683 684 uid = make_kuid(&init_user_ns, id); 685 if (!uid_valid(uid)) { 686 err = -EINVAL; 687 goto err; 688 } 689 690 err = -ENOMEM; 691 ctx = gss_alloc_context(); 692 if (ctx == NULL) 693 goto err; 694 695 err = -ENOENT; 696 /* Find a matching upcall */ 697 spin_lock(&pipe->lock); 698 gss_msg = __gss_find_upcall(pipe, uid); 699 if (gss_msg == NULL) { 700 spin_unlock(&pipe->lock); 701 goto err_put_ctx; 702 } 703 list_del_init(&gss_msg->list); 704 spin_unlock(&pipe->lock); 705 706 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 707 if (IS_ERR(p)) { 708 err = PTR_ERR(p); 709 switch (err) { 710 case -EACCES: 711 case -EKEYEXPIRED: 712 gss_msg->msg.errno = err; 713 err = mlen; 714 break; 715 case -EFAULT: 716 case -ENOMEM: 717 case -EINVAL: 718 case -ENOSYS: 719 gss_msg->msg.errno = -EAGAIN; 720 break; 721 default: 722 printk(KERN_CRIT "%s: bad return from " 723 "gss_fill_context: %zd\n", __func__, err); 724 BUG(); 725 } 726 goto err_release_msg; 727 } 728 gss_msg->ctx = gss_get_ctx(ctx); 729 err = mlen; 730 731 err_release_msg: 732 spin_lock(&pipe->lock); 733 __gss_unhash_msg(gss_msg); 734 spin_unlock(&pipe->lock); 735 gss_release_msg(gss_msg); 736 err_put_ctx: 737 gss_put_ctx(ctx); 738 err: 739 kfree(buf); 740 out: 741 dprintk("RPC: %s returning %Zd\n", __func__, err); 742 return err; 743 } 744 745 static int gss_pipe_open(struct inode *inode, int new_version) 746 { 747 struct net *net = inode->i_sb->s_fs_info; 748 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 749 int ret = 0; 750 751 spin_lock(&pipe_version_lock); 752 if (sn->pipe_version < 0) { 753 /* First open of any gss pipe determines the version: */ 754 sn->pipe_version = new_version; 755 rpc_wake_up(&pipe_version_rpc_waitqueue); 756 wake_up(&pipe_version_waitqueue); 757 } else if (sn->pipe_version != new_version) { 758 /* Trying to open a pipe of a different version */ 759 ret = -EBUSY; 760 goto out; 761 } 762 atomic_inc(&sn->pipe_users); 763 out: 764 spin_unlock(&pipe_version_lock); 765 return ret; 766 767 } 768 769 static int gss_pipe_open_v0(struct inode *inode) 770 { 771 return gss_pipe_open(inode, 0); 772 } 773 774 static int gss_pipe_open_v1(struct inode *inode) 775 { 776 return gss_pipe_open(inode, 1); 777 } 778 779 static void 780 gss_pipe_release(struct inode *inode) 781 { 782 struct net *net = inode->i_sb->s_fs_info; 783 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 784 struct gss_upcall_msg *gss_msg; 785 786 restart: 787 spin_lock(&pipe->lock); 788 list_for_each_entry(gss_msg, &pipe->in_downcall, list) { 789 790 if (!list_empty(&gss_msg->msg.list)) 791 continue; 792 gss_msg->msg.errno = -EPIPE; 793 atomic_inc(&gss_msg->count); 794 __gss_unhash_msg(gss_msg); 795 spin_unlock(&pipe->lock); 796 gss_release_msg(gss_msg); 797 goto restart; 798 } 799 spin_unlock(&pipe->lock); 800 801 put_pipe_version(net); 802 } 803 804 static void 805 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 806 { 807 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 808 809 if (msg->errno < 0) { 810 dprintk("RPC: %s releasing msg %p\n", 811 __func__, gss_msg); 812 atomic_inc(&gss_msg->count); 813 gss_unhash_msg(gss_msg); 814 if (msg->errno == -ETIMEDOUT) 815 warn_gssd(); 816 gss_release_msg(gss_msg); 817 } 818 } 819 820 static void gss_pipe_dentry_destroy(struct dentry *dir, 821 struct rpc_pipe_dir_object *pdo) 822 { 823 struct gss_pipe *gss_pipe = pdo->pdo_data; 824 struct rpc_pipe *pipe = gss_pipe->pipe; 825 826 if (pipe->dentry != NULL) { 827 rpc_unlink(pipe->dentry); 828 pipe->dentry = NULL; 829 } 830 } 831 832 static int gss_pipe_dentry_create(struct dentry *dir, 833 struct rpc_pipe_dir_object *pdo) 834 { 835 struct gss_pipe *p = pdo->pdo_data; 836 struct dentry *dentry; 837 838 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe); 839 if (IS_ERR(dentry)) 840 return PTR_ERR(dentry); 841 p->pipe->dentry = dentry; 842 return 0; 843 } 844 845 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = { 846 .create = gss_pipe_dentry_create, 847 .destroy = gss_pipe_dentry_destroy, 848 }; 849 850 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt, 851 const char *name, 852 const struct rpc_pipe_ops *upcall_ops) 853 { 854 struct gss_pipe *p; 855 int err = -ENOMEM; 856 857 p = kmalloc(sizeof(*p), GFP_KERNEL); 858 if (p == NULL) 859 goto err; 860 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 861 if (IS_ERR(p->pipe)) { 862 err = PTR_ERR(p->pipe); 863 goto err_free_gss_pipe; 864 } 865 p->name = name; 866 p->clnt = clnt; 867 kref_init(&p->kref); 868 rpc_init_pipe_dir_object(&p->pdo, 869 &gss_pipe_dir_object_ops, 870 p); 871 return p; 872 err_free_gss_pipe: 873 kfree(p); 874 err: 875 return ERR_PTR(err); 876 } 877 878 struct gss_alloc_pdo { 879 struct rpc_clnt *clnt; 880 const char *name; 881 const struct rpc_pipe_ops *upcall_ops; 882 }; 883 884 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data) 885 { 886 struct gss_pipe *gss_pipe; 887 struct gss_alloc_pdo *args = data; 888 889 if (pdo->pdo_ops != &gss_pipe_dir_object_ops) 890 return 0; 891 gss_pipe = container_of(pdo, struct gss_pipe, pdo); 892 if (strcmp(gss_pipe->name, args->name) != 0) 893 return 0; 894 if (!kref_get_unless_zero(&gss_pipe->kref)) 895 return 0; 896 return 1; 897 } 898 899 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data) 900 { 901 struct gss_pipe *gss_pipe; 902 struct gss_alloc_pdo *args = data; 903 904 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops); 905 if (!IS_ERR(gss_pipe)) 906 return &gss_pipe->pdo; 907 return NULL; 908 } 909 910 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt, 911 const char *name, 912 const struct rpc_pipe_ops *upcall_ops) 913 { 914 struct net *net = rpc_net_ns(clnt); 915 struct rpc_pipe_dir_object *pdo; 916 struct gss_alloc_pdo args = { 917 .clnt = clnt, 918 .name = name, 919 .upcall_ops = upcall_ops, 920 }; 921 922 pdo = rpc_find_or_alloc_pipe_dir_object(net, 923 &clnt->cl_pipedir_objects, 924 gss_pipe_match_pdo, 925 gss_pipe_alloc_pdo, 926 &args); 927 if (pdo != NULL) 928 return container_of(pdo, struct gss_pipe, pdo); 929 return ERR_PTR(-ENOMEM); 930 } 931 932 static void __gss_pipe_free(struct gss_pipe *p) 933 { 934 struct rpc_clnt *clnt = p->clnt; 935 struct net *net = rpc_net_ns(clnt); 936 937 rpc_remove_pipe_dir_object(net, 938 &clnt->cl_pipedir_objects, 939 &p->pdo); 940 rpc_destroy_pipe_data(p->pipe); 941 kfree(p); 942 } 943 944 static void __gss_pipe_release(struct kref *kref) 945 { 946 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref); 947 948 __gss_pipe_free(p); 949 } 950 951 static void gss_pipe_free(struct gss_pipe *p) 952 { 953 if (p != NULL) 954 kref_put(&p->kref, __gss_pipe_release); 955 } 956 957 /* 958 * NOTE: we have the opportunity to use different 959 * parameters based on the input flavor (which must be a pseudoflavor) 960 */ 961 static struct gss_auth * 962 gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 963 { 964 rpc_authflavor_t flavor = args->pseudoflavor; 965 struct gss_auth *gss_auth; 966 struct gss_pipe *gss_pipe; 967 struct rpc_auth * auth; 968 int err = -ENOMEM; /* XXX? */ 969 970 dprintk("RPC: creating GSS authenticator for client %p\n", clnt); 971 972 if (!try_module_get(THIS_MODULE)) 973 return ERR_PTR(err); 974 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 975 goto out_dec; 976 INIT_HLIST_NODE(&gss_auth->hash); 977 gss_auth->target_name = NULL; 978 if (args->target_name) { 979 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL); 980 if (gss_auth->target_name == NULL) 981 goto err_free; 982 } 983 gss_auth->client = clnt; 984 gss_auth->net = get_net(rpc_net_ns(clnt)); 985 err = -EINVAL; 986 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 987 if (!gss_auth->mech) { 988 dprintk("RPC: Pseudoflavor %d not found!\n", flavor); 989 goto err_put_net; 990 } 991 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 992 if (gss_auth->service == 0) 993 goto err_put_mech; 994 auth = &gss_auth->rpc_auth; 995 auth->au_cslack = GSS_CRED_SLACK >> 2; 996 auth->au_rslack = GSS_VERF_SLACK >> 2; 997 auth->au_ops = &authgss_ops; 998 auth->au_flavor = flavor; 999 atomic_set(&auth->au_count, 1); 1000 kref_init(&gss_auth->kref); 1001 1002 err = rpcauth_init_credcache(auth); 1003 if (err) 1004 goto err_put_mech; 1005 /* 1006 * Note: if we created the old pipe first, then someone who 1007 * examined the directory at the right moment might conclude 1008 * that we supported only the old pipe. So we instead create 1009 * the new pipe first. 1010 */ 1011 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1); 1012 if (IS_ERR(gss_pipe)) { 1013 err = PTR_ERR(gss_pipe); 1014 goto err_destroy_credcache; 1015 } 1016 gss_auth->gss_pipe[1] = gss_pipe; 1017 1018 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name, 1019 &gss_upcall_ops_v0); 1020 if (IS_ERR(gss_pipe)) { 1021 err = PTR_ERR(gss_pipe); 1022 goto err_destroy_pipe_1; 1023 } 1024 gss_auth->gss_pipe[0] = gss_pipe; 1025 1026 return gss_auth; 1027 err_destroy_pipe_1: 1028 gss_pipe_free(gss_auth->gss_pipe[1]); 1029 err_destroy_credcache: 1030 rpcauth_destroy_credcache(auth); 1031 err_put_mech: 1032 gss_mech_put(gss_auth->mech); 1033 err_put_net: 1034 put_net(gss_auth->net); 1035 err_free: 1036 kfree(gss_auth->target_name); 1037 kfree(gss_auth); 1038 out_dec: 1039 module_put(THIS_MODULE); 1040 return ERR_PTR(err); 1041 } 1042 1043 static void 1044 gss_free(struct gss_auth *gss_auth) 1045 { 1046 gss_pipe_free(gss_auth->gss_pipe[0]); 1047 gss_pipe_free(gss_auth->gss_pipe[1]); 1048 gss_mech_put(gss_auth->mech); 1049 put_net(gss_auth->net); 1050 kfree(gss_auth->target_name); 1051 1052 kfree(gss_auth); 1053 module_put(THIS_MODULE); 1054 } 1055 1056 static void 1057 gss_free_callback(struct kref *kref) 1058 { 1059 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 1060 1061 gss_free(gss_auth); 1062 } 1063 1064 static void 1065 gss_destroy(struct rpc_auth *auth) 1066 { 1067 struct gss_auth *gss_auth = container_of(auth, 1068 struct gss_auth, rpc_auth); 1069 1070 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 1071 auth, auth->au_flavor); 1072 1073 if (hash_hashed(&gss_auth->hash)) { 1074 spin_lock(&gss_auth_hash_lock); 1075 hash_del(&gss_auth->hash); 1076 spin_unlock(&gss_auth_hash_lock); 1077 } 1078 1079 gss_pipe_free(gss_auth->gss_pipe[0]); 1080 gss_auth->gss_pipe[0] = NULL; 1081 gss_pipe_free(gss_auth->gss_pipe[1]); 1082 gss_auth->gss_pipe[1] = NULL; 1083 rpcauth_destroy_credcache(auth); 1084 1085 kref_put(&gss_auth->kref, gss_free_callback); 1086 } 1087 1088 /* 1089 * Auths may be shared between rpc clients that were cloned from a 1090 * common client with the same xprt, if they also share the flavor and 1091 * target_name. 1092 * 1093 * The auth is looked up from the oldest parent sharing the same 1094 * cl_xprt, and the auth itself references only that common parent 1095 * (which is guaranteed to last as long as any of its descendants). 1096 */ 1097 static struct gss_auth * 1098 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1099 struct rpc_clnt *clnt, 1100 struct gss_auth *new) 1101 { 1102 struct gss_auth *gss_auth; 1103 unsigned long hashval = (unsigned long)clnt; 1104 1105 spin_lock(&gss_auth_hash_lock); 1106 hash_for_each_possible(gss_auth_hash_table, 1107 gss_auth, 1108 hash, 1109 hashval) { 1110 if (gss_auth->client != clnt) 1111 continue; 1112 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1113 continue; 1114 if (gss_auth->target_name != args->target_name) { 1115 if (gss_auth->target_name == NULL) 1116 continue; 1117 if (args->target_name == NULL) 1118 continue; 1119 if (strcmp(gss_auth->target_name, args->target_name)) 1120 continue; 1121 } 1122 if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count)) 1123 continue; 1124 goto out; 1125 } 1126 if (new) 1127 hash_add(gss_auth_hash_table, &new->hash, hashval); 1128 gss_auth = new; 1129 out: 1130 spin_unlock(&gss_auth_hash_lock); 1131 return gss_auth; 1132 } 1133 1134 static struct gss_auth * 1135 gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1136 { 1137 struct gss_auth *gss_auth; 1138 struct gss_auth *new; 1139 1140 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL); 1141 if (gss_auth != NULL) 1142 goto out; 1143 new = gss_create_new(args, clnt); 1144 if (IS_ERR(new)) 1145 return new; 1146 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new); 1147 if (gss_auth != new) 1148 gss_destroy(&new->rpc_auth); 1149 out: 1150 return gss_auth; 1151 } 1152 1153 static struct rpc_auth * 1154 gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1155 { 1156 struct gss_auth *gss_auth; 1157 struct rpc_xprt *xprt = rcu_access_pointer(clnt->cl_xprt); 1158 1159 while (clnt != clnt->cl_parent) { 1160 struct rpc_clnt *parent = clnt->cl_parent; 1161 /* Find the original parent for this transport */ 1162 if (rcu_access_pointer(parent->cl_xprt) != xprt) 1163 break; 1164 clnt = parent; 1165 } 1166 1167 gss_auth = gss_create_hashed(args, clnt); 1168 if (IS_ERR(gss_auth)) 1169 return ERR_CAST(gss_auth); 1170 return &gss_auth->rpc_auth; 1171 } 1172 1173 /* 1174 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 1175 * to the server with the GSS control procedure field set to 1176 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 1177 * all RPCSEC_GSS state associated with that context. 1178 */ 1179 static int 1180 gss_destroying_context(struct rpc_cred *cred) 1181 { 1182 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1183 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1184 struct rpc_task *task; 1185 1186 if (gss_cred->gc_ctx == NULL || 1187 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 1188 return 0; 1189 1190 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 1191 cred->cr_ops = &gss_nullops; 1192 1193 /* Take a reference to ensure the cred will be destroyed either 1194 * by the RPC call or by the put_rpccred() below */ 1195 get_rpccred(cred); 1196 1197 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1198 if (!IS_ERR(task)) 1199 rpc_put_task(task); 1200 1201 put_rpccred(cred); 1202 return 1; 1203 } 1204 1205 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1206 * to create a new cred or context, so they check that things have been 1207 * allocated before freeing them. */ 1208 static void 1209 gss_do_free_ctx(struct gss_cl_ctx *ctx) 1210 { 1211 dprintk("RPC: %s\n", __func__); 1212 1213 gss_delete_sec_context(&ctx->gc_gss_ctx); 1214 kfree(ctx->gc_wire_ctx.data); 1215 kfree(ctx); 1216 } 1217 1218 static void 1219 gss_free_ctx_callback(struct rcu_head *head) 1220 { 1221 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 1222 gss_do_free_ctx(ctx); 1223 } 1224 1225 static void 1226 gss_free_ctx(struct gss_cl_ctx *ctx) 1227 { 1228 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 1229 } 1230 1231 static void 1232 gss_free_cred(struct gss_cred *gss_cred) 1233 { 1234 dprintk("RPC: %s cred=%p\n", __func__, gss_cred); 1235 kfree(gss_cred); 1236 } 1237 1238 static void 1239 gss_free_cred_callback(struct rcu_head *head) 1240 { 1241 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 1242 gss_free_cred(gss_cred); 1243 } 1244 1245 static void 1246 gss_destroy_nullcred(struct rpc_cred *cred) 1247 { 1248 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1249 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1250 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 1251 1252 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); 1253 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1254 if (ctx) 1255 gss_put_ctx(ctx); 1256 kref_put(&gss_auth->kref, gss_free_callback); 1257 } 1258 1259 static void 1260 gss_destroy_cred(struct rpc_cred *cred) 1261 { 1262 1263 if (gss_destroying_context(cred)) 1264 return; 1265 gss_destroy_nullcred(cred); 1266 } 1267 1268 /* 1269 * Lookup RPCSEC_GSS cred for the current process 1270 */ 1271 static struct rpc_cred * 1272 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1273 { 1274 return rpcauth_lookup_credcache(auth, acred, flags); 1275 } 1276 1277 static struct rpc_cred * 1278 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1279 { 1280 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1281 struct gss_cred *cred = NULL; 1282 int err = -ENOMEM; 1283 1284 dprintk("RPC: %s for uid %d, flavor %d\n", 1285 __func__, from_kuid(&init_user_ns, acred->uid), 1286 auth->au_flavor); 1287 1288 if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) 1289 goto out_err; 1290 1291 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 1292 /* 1293 * Note: in order to force a call to call_refresh(), we deliberately 1294 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 1295 */ 1296 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 1297 cred->gc_service = gss_auth->service; 1298 cred->gc_principal = NULL; 1299 if (acred->machine_cred) 1300 cred->gc_principal = acred->principal; 1301 kref_get(&gss_auth->kref); 1302 return &cred->gc_base; 1303 1304 out_err: 1305 dprintk("RPC: %s failed with error %d\n", __func__, err); 1306 return ERR_PTR(err); 1307 } 1308 1309 static int 1310 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) 1311 { 1312 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1313 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); 1314 int err; 1315 1316 do { 1317 err = gss_create_upcall(gss_auth, gss_cred); 1318 } while (err == -EAGAIN); 1319 return err; 1320 } 1321 1322 /* 1323 * Returns -EACCES if GSS context is NULL or will expire within the 1324 * timeout (miliseconds) 1325 */ 1326 static int 1327 gss_key_timeout(struct rpc_cred *rc) 1328 { 1329 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1330 unsigned long now = jiffies; 1331 unsigned long expire; 1332 1333 if (gss_cred->gc_ctx == NULL) 1334 return -EACCES; 1335 1336 expire = gss_cred->gc_ctx->gc_expiry - (gss_key_expire_timeo * HZ); 1337 1338 if (time_after(now, expire)) 1339 return -EACCES; 1340 return 0; 1341 } 1342 1343 static int 1344 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) 1345 { 1346 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1347 int ret; 1348 1349 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 1350 goto out; 1351 /* Don't match with creds that have expired. */ 1352 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 1353 return 0; 1354 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) 1355 return 0; 1356 out: 1357 if (acred->principal != NULL) { 1358 if (gss_cred->gc_principal == NULL) 1359 return 0; 1360 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0; 1361 goto check_expire; 1362 } 1363 if (gss_cred->gc_principal != NULL) 1364 return 0; 1365 ret = uid_eq(rc->cr_uid, acred->uid); 1366 1367 check_expire: 1368 if (ret == 0) 1369 return ret; 1370 1371 /* Notify acred users of GSS context expiration timeout */ 1372 if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) && 1373 (gss_key_timeout(rc) != 0)) { 1374 /* test will now be done from generic cred */ 1375 test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags); 1376 /* tell NFS layer that key will expire soon */ 1377 set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); 1378 } 1379 return ret; 1380 } 1381 1382 /* 1383 * Marshal credentials. 1384 * Maybe we should keep a cached credential for performance reasons. 1385 */ 1386 static __be32 * 1387 gss_marshal(struct rpc_task *task, __be32 *p) 1388 { 1389 struct rpc_rqst *req = task->tk_rqstp; 1390 struct rpc_cred *cred = req->rq_cred; 1391 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1392 gc_base); 1393 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1394 __be32 *cred_len; 1395 u32 maj_stat = 0; 1396 struct xdr_netobj mic; 1397 struct kvec iov; 1398 struct xdr_buf verf_buf; 1399 1400 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1401 1402 *p++ = htonl(RPC_AUTH_GSS); 1403 cred_len = p++; 1404 1405 spin_lock(&ctx->gc_seq_lock); 1406 req->rq_seqno = ctx->gc_seq++; 1407 spin_unlock(&ctx->gc_seq_lock); 1408 1409 *p++ = htonl((u32) RPC_GSS_VERSION); 1410 *p++ = htonl((u32) ctx->gc_proc); 1411 *p++ = htonl((u32) req->rq_seqno); 1412 *p++ = htonl((u32) gss_cred->gc_service); 1413 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1414 *cred_len = htonl((p - (cred_len + 1)) << 2); 1415 1416 /* We compute the checksum for the verifier over the xdr-encoded bytes 1417 * starting with the xid and ending at the end of the credential: */ 1418 iov.iov_base = xprt_skip_transport_header(req->rq_xprt, 1419 req->rq_snd_buf.head[0].iov_base); 1420 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1421 xdr_buf_from_iov(&iov, &verf_buf); 1422 1423 /* set verifier flavor*/ 1424 *p++ = htonl(RPC_AUTH_GSS); 1425 1426 mic.data = (u8 *)(p + 1); 1427 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1428 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1429 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1430 } else if (maj_stat != 0) { 1431 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1432 goto out_put_ctx; 1433 } 1434 p = xdr_encode_opaque(p, NULL, mic.len); 1435 gss_put_ctx(ctx); 1436 return p; 1437 out_put_ctx: 1438 gss_put_ctx(ctx); 1439 return NULL; 1440 } 1441 1442 static int gss_renew_cred(struct rpc_task *task) 1443 { 1444 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; 1445 struct gss_cred *gss_cred = container_of(oldcred, 1446 struct gss_cred, 1447 gc_base); 1448 struct rpc_auth *auth = oldcred->cr_auth; 1449 struct auth_cred acred = { 1450 .uid = oldcred->cr_uid, 1451 .principal = gss_cred->gc_principal, 1452 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), 1453 }; 1454 struct rpc_cred *new; 1455 1456 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); 1457 if (IS_ERR(new)) 1458 return PTR_ERR(new); 1459 task->tk_rqstp->rq_cred = new; 1460 put_rpccred(oldcred); 1461 return 0; 1462 } 1463 1464 static int gss_cred_is_negative_entry(struct rpc_cred *cred) 1465 { 1466 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1467 unsigned long now = jiffies; 1468 unsigned long begin, expire; 1469 struct gss_cred *gss_cred; 1470 1471 gss_cred = container_of(cred, struct gss_cred, gc_base); 1472 begin = gss_cred->gc_upcall_timestamp; 1473 expire = begin + gss_expired_cred_retry_delay * HZ; 1474 1475 if (time_in_range_open(now, begin, expire)) 1476 return 1; 1477 } 1478 return 0; 1479 } 1480 1481 /* 1482 * Refresh credentials. XXX - finish 1483 */ 1484 static int 1485 gss_refresh(struct rpc_task *task) 1486 { 1487 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1488 int ret = 0; 1489 1490 if (gss_cred_is_negative_entry(cred)) 1491 return -EKEYEXPIRED; 1492 1493 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1494 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1495 ret = gss_renew_cred(task); 1496 if (ret < 0) 1497 goto out; 1498 cred = task->tk_rqstp->rq_cred; 1499 } 1500 1501 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 1502 ret = gss_refresh_upcall(task); 1503 out: 1504 return ret; 1505 } 1506 1507 /* Dummy refresh routine: used only when destroying the context */ 1508 static int 1509 gss_refresh_null(struct rpc_task *task) 1510 { 1511 return 0; 1512 } 1513 1514 static __be32 * 1515 gss_validate(struct rpc_task *task, __be32 *p) 1516 { 1517 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1518 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1519 __be32 seq; 1520 struct kvec iov; 1521 struct xdr_buf verf_buf; 1522 struct xdr_netobj mic; 1523 u32 flav,len; 1524 u32 maj_stat; 1525 __be32 *ret = ERR_PTR(-EIO); 1526 1527 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1528 1529 flav = ntohl(*p++); 1530 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 1531 goto out_bad; 1532 if (flav != RPC_AUTH_GSS) 1533 goto out_bad; 1534 seq = htonl(task->tk_rqstp->rq_seqno); 1535 iov.iov_base = &seq; 1536 iov.iov_len = sizeof(seq); 1537 xdr_buf_from_iov(&iov, &verf_buf); 1538 mic.data = (u8 *)p; 1539 mic.len = len; 1540 1541 ret = ERR_PTR(-EACCES); 1542 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1543 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1544 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1545 if (maj_stat) { 1546 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", 1547 task->tk_pid, __func__, maj_stat); 1548 goto out_bad; 1549 } 1550 /* We leave it to unwrap to calculate au_rslack. For now we just 1551 * calculate the length of the verifier: */ 1552 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 1553 gss_put_ctx(ctx); 1554 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1555 task->tk_pid, __func__); 1556 return p + XDR_QUADLEN(len); 1557 out_bad: 1558 gss_put_ctx(ctx); 1559 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1560 PTR_ERR(ret)); 1561 return ret; 1562 } 1563 1564 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, 1565 __be32 *p, void *obj) 1566 { 1567 struct xdr_stream xdr; 1568 1569 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); 1570 encode(rqstp, &xdr, obj); 1571 } 1572 1573 static inline int 1574 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1575 kxdreproc_t encode, struct rpc_rqst *rqstp, 1576 __be32 *p, void *obj) 1577 { 1578 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1579 struct xdr_buf integ_buf; 1580 __be32 *integ_len = NULL; 1581 struct xdr_netobj mic; 1582 u32 offset; 1583 __be32 *q; 1584 struct kvec *iov; 1585 u32 maj_stat = 0; 1586 int status = -EIO; 1587 1588 integ_len = p++; 1589 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1590 *p++ = htonl(rqstp->rq_seqno); 1591 1592 gss_wrap_req_encode(encode, rqstp, p, obj); 1593 1594 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1595 offset, snd_buf->len - offset)) 1596 return status; 1597 *integ_len = htonl(integ_buf.len); 1598 1599 /* guess whether we're in the head or the tail: */ 1600 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1601 iov = snd_buf->tail; 1602 else 1603 iov = snd_buf->head; 1604 p = iov->iov_base + iov->iov_len; 1605 mic.data = (u8 *)(p + 1); 1606 1607 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1608 status = -EIO; /* XXX? */ 1609 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1610 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1611 else if (maj_stat) 1612 return status; 1613 q = xdr_encode_opaque(p, NULL, mic.len); 1614 1615 offset = (u8 *)q - (u8 *)p; 1616 iov->iov_len += offset; 1617 snd_buf->len += offset; 1618 return 0; 1619 } 1620 1621 static void 1622 priv_release_snd_buf(struct rpc_rqst *rqstp) 1623 { 1624 int i; 1625 1626 for (i=0; i < rqstp->rq_enc_pages_num; i++) 1627 __free_page(rqstp->rq_enc_pages[i]); 1628 kfree(rqstp->rq_enc_pages); 1629 } 1630 1631 static int 1632 alloc_enc_pages(struct rpc_rqst *rqstp) 1633 { 1634 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1635 int first, last, i; 1636 1637 if (snd_buf->page_len == 0) { 1638 rqstp->rq_enc_pages_num = 0; 1639 return 0; 1640 } 1641 1642 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1643 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1644 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1645 rqstp->rq_enc_pages 1646 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1647 GFP_NOFS); 1648 if (!rqstp->rq_enc_pages) 1649 goto out; 1650 for (i=0; i < rqstp->rq_enc_pages_num; i++) { 1651 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); 1652 if (rqstp->rq_enc_pages[i] == NULL) 1653 goto out_free; 1654 } 1655 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1656 return 0; 1657 out_free: 1658 rqstp->rq_enc_pages_num = i; 1659 priv_release_snd_buf(rqstp); 1660 out: 1661 return -EAGAIN; 1662 } 1663 1664 static inline int 1665 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1666 kxdreproc_t encode, struct rpc_rqst *rqstp, 1667 __be32 *p, void *obj) 1668 { 1669 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1670 u32 offset; 1671 u32 maj_stat; 1672 int status; 1673 __be32 *opaque_len; 1674 struct page **inpages; 1675 int first; 1676 int pad; 1677 struct kvec *iov; 1678 char *tmp; 1679 1680 opaque_len = p++; 1681 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1682 *p++ = htonl(rqstp->rq_seqno); 1683 1684 gss_wrap_req_encode(encode, rqstp, p, obj); 1685 1686 status = alloc_enc_pages(rqstp); 1687 if (status) 1688 return status; 1689 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1690 inpages = snd_buf->pages + first; 1691 snd_buf->pages = rqstp->rq_enc_pages; 1692 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1693 /* 1694 * Give the tail its own page, in case we need extra space in the 1695 * head when wrapping: 1696 * 1697 * call_allocate() allocates twice the slack space required 1698 * by the authentication flavor to rq_callsize. 1699 * For GSS, slack is GSS_CRED_SLACK. 1700 */ 1701 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1702 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1703 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1704 snd_buf->tail[0].iov_base = tmp; 1705 } 1706 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1707 /* slack space should prevent this ever happening: */ 1708 BUG_ON(snd_buf->len > snd_buf->buflen); 1709 status = -EIO; 1710 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1711 * done anyway, so it's safe to put the request on the wire: */ 1712 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1713 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1714 else if (maj_stat) 1715 return status; 1716 1717 *opaque_len = htonl(snd_buf->len - offset); 1718 /* guess whether we're in the head or the tail: */ 1719 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1720 iov = snd_buf->tail; 1721 else 1722 iov = snd_buf->head; 1723 p = iov->iov_base + iov->iov_len; 1724 pad = 3 - ((snd_buf->len - offset - 1) & 3); 1725 memset(p, 0, pad); 1726 iov->iov_len += pad; 1727 snd_buf->len += pad; 1728 1729 return 0; 1730 } 1731 1732 static int 1733 gss_wrap_req(struct rpc_task *task, 1734 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) 1735 { 1736 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1737 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1738 gc_base); 1739 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1740 int status = -EIO; 1741 1742 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1743 if (ctx->gc_proc != RPC_GSS_PROC_DATA) { 1744 /* The spec seems a little ambiguous here, but I think that not 1745 * wrapping context destruction requests makes the most sense. 1746 */ 1747 gss_wrap_req_encode(encode, rqstp, p, obj); 1748 status = 0; 1749 goto out; 1750 } 1751 switch (gss_cred->gc_service) { 1752 case RPC_GSS_SVC_NONE: 1753 gss_wrap_req_encode(encode, rqstp, p, obj); 1754 status = 0; 1755 break; 1756 case RPC_GSS_SVC_INTEGRITY: 1757 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); 1758 break; 1759 case RPC_GSS_SVC_PRIVACY: 1760 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); 1761 break; 1762 } 1763 out: 1764 gss_put_ctx(ctx); 1765 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); 1766 return status; 1767 } 1768 1769 static inline int 1770 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1771 struct rpc_rqst *rqstp, __be32 **p) 1772 { 1773 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1774 struct xdr_buf integ_buf; 1775 struct xdr_netobj mic; 1776 u32 data_offset, mic_offset; 1777 u32 integ_len; 1778 u32 maj_stat; 1779 int status = -EIO; 1780 1781 integ_len = ntohl(*(*p)++); 1782 if (integ_len & 3) 1783 return status; 1784 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1785 mic_offset = integ_len + data_offset; 1786 if (mic_offset > rcv_buf->len) 1787 return status; 1788 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1789 return status; 1790 1791 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, 1792 mic_offset - data_offset)) 1793 return status; 1794 1795 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) 1796 return status; 1797 1798 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1799 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1800 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1801 if (maj_stat != GSS_S_COMPLETE) 1802 return status; 1803 return 0; 1804 } 1805 1806 static inline int 1807 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1808 struct rpc_rqst *rqstp, __be32 **p) 1809 { 1810 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1811 u32 offset; 1812 u32 opaque_len; 1813 u32 maj_stat; 1814 int status = -EIO; 1815 1816 opaque_len = ntohl(*(*p)++); 1817 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1818 if (offset + opaque_len > rcv_buf->len) 1819 return status; 1820 /* remove padding: */ 1821 rcv_buf->len = offset + opaque_len; 1822 1823 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1824 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1825 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1826 if (maj_stat != GSS_S_COMPLETE) 1827 return status; 1828 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1829 return status; 1830 1831 return 0; 1832 } 1833 1834 static int 1835 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, 1836 __be32 *p, void *obj) 1837 { 1838 struct xdr_stream xdr; 1839 1840 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 1841 return decode(rqstp, &xdr, obj); 1842 } 1843 1844 static int 1845 gss_unwrap_resp(struct rpc_task *task, 1846 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) 1847 { 1848 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1849 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1850 gc_base); 1851 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1852 __be32 *savedp = p; 1853 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1854 int savedlen = head->iov_len; 1855 int status = -EIO; 1856 1857 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1858 goto out_decode; 1859 switch (gss_cred->gc_service) { 1860 case RPC_GSS_SVC_NONE: 1861 break; 1862 case RPC_GSS_SVC_INTEGRITY: 1863 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1864 if (status) 1865 goto out; 1866 break; 1867 case RPC_GSS_SVC_PRIVACY: 1868 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1869 if (status) 1870 goto out; 1871 break; 1872 } 1873 /* take into account extra slack for integrity and privacy cases: */ 1874 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1875 + (savedlen - head->iov_len); 1876 out_decode: 1877 status = gss_unwrap_req_decode(decode, rqstp, p, obj); 1878 out: 1879 gss_put_ctx(ctx); 1880 dprintk("RPC: %5u %s returning %d\n", 1881 task->tk_pid, __func__, status); 1882 return status; 1883 } 1884 1885 static const struct rpc_authops authgss_ops = { 1886 .owner = THIS_MODULE, 1887 .au_flavor = RPC_AUTH_GSS, 1888 .au_name = "RPCSEC_GSS", 1889 .create = gss_create, 1890 .destroy = gss_destroy, 1891 .lookup_cred = gss_lookup_cred, 1892 .crcreate = gss_create_cred, 1893 .list_pseudoflavors = gss_mech_list_pseudoflavors, 1894 .info2flavor = gss_mech_info2flavor, 1895 .flavor2info = gss_mech_flavor2info, 1896 }; 1897 1898 static const struct rpc_credops gss_credops = { 1899 .cr_name = "AUTH_GSS", 1900 .crdestroy = gss_destroy_cred, 1901 .cr_init = gss_cred_init, 1902 .crbind = rpcauth_generic_bind_cred, 1903 .crmatch = gss_match, 1904 .crmarshal = gss_marshal, 1905 .crrefresh = gss_refresh, 1906 .crvalidate = gss_validate, 1907 .crwrap_req = gss_wrap_req, 1908 .crunwrap_resp = gss_unwrap_resp, 1909 .crkey_timeout = gss_key_timeout, 1910 }; 1911 1912 static const struct rpc_credops gss_nullops = { 1913 .cr_name = "AUTH_GSS", 1914 .crdestroy = gss_destroy_nullcred, 1915 .crbind = rpcauth_generic_bind_cred, 1916 .crmatch = gss_match, 1917 .crmarshal = gss_marshal, 1918 .crrefresh = gss_refresh_null, 1919 .crvalidate = gss_validate, 1920 .crwrap_req = gss_wrap_req, 1921 .crunwrap_resp = gss_unwrap_resp, 1922 }; 1923 1924 static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 1925 .upcall = rpc_pipe_generic_upcall, 1926 .downcall = gss_pipe_downcall, 1927 .destroy_msg = gss_pipe_destroy_msg, 1928 .open_pipe = gss_pipe_open_v0, 1929 .release_pipe = gss_pipe_release, 1930 }; 1931 1932 static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 1933 .upcall = rpc_pipe_generic_upcall, 1934 .downcall = gss_pipe_downcall, 1935 .destroy_msg = gss_pipe_destroy_msg, 1936 .open_pipe = gss_pipe_open_v1, 1937 .release_pipe = gss_pipe_release, 1938 }; 1939 1940 static __net_init int rpcsec_gss_init_net(struct net *net) 1941 { 1942 return gss_svc_init_net(net); 1943 } 1944 1945 static __net_exit void rpcsec_gss_exit_net(struct net *net) 1946 { 1947 gss_svc_shutdown_net(net); 1948 } 1949 1950 static struct pernet_operations rpcsec_gss_net_ops = { 1951 .init = rpcsec_gss_init_net, 1952 .exit = rpcsec_gss_exit_net, 1953 }; 1954 1955 /* 1956 * Initialize RPCSEC_GSS module 1957 */ 1958 static int __init init_rpcsec_gss(void) 1959 { 1960 int err = 0; 1961 1962 err = rpcauth_register(&authgss_ops); 1963 if (err) 1964 goto out; 1965 err = gss_svc_init(); 1966 if (err) 1967 goto out_unregister; 1968 err = register_pernet_subsys(&rpcsec_gss_net_ops); 1969 if (err) 1970 goto out_svc_exit; 1971 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 1972 return 0; 1973 out_svc_exit: 1974 gss_svc_shutdown(); 1975 out_unregister: 1976 rpcauth_unregister(&authgss_ops); 1977 out: 1978 return err; 1979 } 1980 1981 static void __exit exit_rpcsec_gss(void) 1982 { 1983 unregister_pernet_subsys(&rpcsec_gss_net_ops); 1984 gss_svc_shutdown(); 1985 rpcauth_unregister(&authgss_ops); 1986 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1987 } 1988 1989 MODULE_ALIAS("rpc-auth-6"); 1990 MODULE_LICENSE("GPL"); 1991 module_param_named(expired_cred_retry_delay, 1992 gss_expired_cred_retry_delay, 1993 uint, 0644); 1994 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " 1995 "the RPC engine retries an expired credential"); 1996 1997 module_param_named(key_expire_timeo, 1998 gss_key_expire_timeo, 1999 uint, 0644); 2000 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a " 2001 "credential keys lifetime where the NFS layer cleans up " 2002 "prior to key expiration"); 2003 2004 module_init(init_rpcsec_gss) 2005 module_exit(exit_rpcsec_gss) 2006