1 /* 2 * linux/net/sunrpc/auth_gss/auth_gss.c 3 * 4 * RPCSEC_GSS client authentication. 5 * 6 * Copyright (c) 2000 The Regents of the University of Michigan. 7 * All rights reserved. 8 * 9 * Dug Song <dugsong@monkey.org> 10 * Andy Adamson <andros@umich.edu> 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its 22 * contributors may be used to endorse or promote products derived 23 * from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 39 #include <linux/module.h> 40 #include <linux/init.h> 41 #include <linux/types.h> 42 #include <linux/slab.h> 43 #include <linux/sched.h> 44 #include <linux/pagemap.h> 45 #include <linux/sunrpc/clnt.h> 46 #include <linux/sunrpc/auth.h> 47 #include <linux/sunrpc/auth_gss.h> 48 #include <linux/sunrpc/svcauth_gss.h> 49 #include <linux/sunrpc/gss_err.h> 50 #include <linux/workqueue.h> 51 #include <linux/sunrpc/rpc_pipe_fs.h> 52 #include <linux/sunrpc/gss_api.h> 53 #include <asm/uaccess.h> 54 #include <linux/hashtable.h> 55 56 #include "../netns.h" 57 58 static const struct rpc_authops authgss_ops; 59 60 static const struct rpc_credops gss_credops; 61 static const struct rpc_credops gss_nullops; 62 63 #define GSS_RETRY_EXPIRED 5 64 static unsigned int gss_expired_cred_retry_delay = GSS_RETRY_EXPIRED; 65 66 #define GSS_KEY_EXPIRE_TIMEO 240 67 static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO; 68 69 #ifdef RPC_DEBUG 70 # define RPCDBG_FACILITY RPCDBG_AUTH 71 #endif 72 73 #define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2) 74 /* length of a krb5 verifier (48), plus data added before arguments when 75 * using integrity (two 4-byte integers): */ 76 #define GSS_VERF_SLACK 100 77 78 static DEFINE_HASHTABLE(gss_auth_hash_table, 4); 79 static DEFINE_SPINLOCK(gss_auth_hash_lock); 80 81 struct gss_pipe { 82 struct rpc_pipe_dir_object pdo; 83 struct rpc_pipe *pipe; 84 struct rpc_clnt *clnt; 85 const char *name; 86 struct kref kref; 87 }; 88 89 struct gss_auth { 90 struct kref kref; 91 struct hlist_node hash; 92 struct rpc_auth rpc_auth; 93 struct gss_api_mech *mech; 94 enum rpc_gss_svc service; 95 struct rpc_clnt *client; 96 struct net *net; 97 /* 98 * There are two upcall pipes; dentry[1], named "gssd", is used 99 * for the new text-based upcall; dentry[0] is named after the 100 * mechanism (for example, "krb5") and exists for 101 * backwards-compatibility with older gssd's. 102 */ 103 struct gss_pipe *gss_pipe[2]; 104 const char *target_name; 105 }; 106 107 /* pipe_version >= 0 if and only if someone has a pipe open. */ 108 static DEFINE_SPINLOCK(pipe_version_lock); 109 static struct rpc_wait_queue pipe_version_rpc_waitqueue; 110 static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 111 112 static void gss_free_ctx(struct gss_cl_ctx *); 113 static const struct rpc_pipe_ops gss_upcall_ops_v0; 114 static const struct rpc_pipe_ops gss_upcall_ops_v1; 115 116 static inline struct gss_cl_ctx * 117 gss_get_ctx(struct gss_cl_ctx *ctx) 118 { 119 atomic_inc(&ctx->count); 120 return ctx; 121 } 122 123 static inline void 124 gss_put_ctx(struct gss_cl_ctx *ctx) 125 { 126 if (atomic_dec_and_test(&ctx->count)) 127 gss_free_ctx(ctx); 128 } 129 130 /* gss_cred_set_ctx: 131 * called by gss_upcall_callback and gss_create_upcall in order 132 * to set the gss context. The actual exchange of an old context 133 * and a new one is protected by the pipe->lock. 134 */ 135 static void 136 gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 137 { 138 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 139 140 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 141 return; 142 gss_get_ctx(ctx); 143 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 144 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 145 smp_mb__before_clear_bit(); 146 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 147 } 148 149 static const void * 150 simple_get_bytes(const void *p, const void *end, void *res, size_t len) 151 { 152 const void *q = (const void *)((const char *)p + len); 153 if (unlikely(q > end || q < p)) 154 return ERR_PTR(-EFAULT); 155 memcpy(res, p, len); 156 return q; 157 } 158 159 static inline const void * 160 simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 161 { 162 const void *q; 163 unsigned int len; 164 165 p = simple_get_bytes(p, end, &len, sizeof(len)); 166 if (IS_ERR(p)) 167 return p; 168 q = (const void *)((const char *)p + len); 169 if (unlikely(q > end || q < p)) 170 return ERR_PTR(-EFAULT); 171 dest->data = kmemdup(p, len, GFP_NOFS); 172 if (unlikely(dest->data == NULL)) 173 return ERR_PTR(-ENOMEM); 174 dest->len = len; 175 return q; 176 } 177 178 static struct gss_cl_ctx * 179 gss_cred_get_ctx(struct rpc_cred *cred) 180 { 181 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 182 struct gss_cl_ctx *ctx = NULL; 183 184 rcu_read_lock(); 185 if (gss_cred->gc_ctx) 186 ctx = gss_get_ctx(gss_cred->gc_ctx); 187 rcu_read_unlock(); 188 return ctx; 189 } 190 191 static struct gss_cl_ctx * 192 gss_alloc_context(void) 193 { 194 struct gss_cl_ctx *ctx; 195 196 ctx = kzalloc(sizeof(*ctx), GFP_NOFS); 197 if (ctx != NULL) { 198 ctx->gc_proc = RPC_GSS_PROC_DATA; 199 ctx->gc_seq = 1; /* NetApp 6.4R1 doesn't accept seq. no. 0 */ 200 spin_lock_init(&ctx->gc_seq_lock); 201 atomic_set(&ctx->count,1); 202 } 203 return ctx; 204 } 205 206 #define GSSD_MIN_TIMEOUT (60 * 60) 207 static const void * 208 gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct gss_api_mech *gm) 209 { 210 const void *q; 211 unsigned int seclen; 212 unsigned int timeout; 213 unsigned long now = jiffies; 214 u32 window_size; 215 int ret; 216 217 /* First unsigned int gives the remaining lifetime in seconds of the 218 * credential - e.g. the remaining TGT lifetime for Kerberos or 219 * the -t value passed to GSSD. 220 */ 221 p = simple_get_bytes(p, end, &timeout, sizeof(timeout)); 222 if (IS_ERR(p)) 223 goto err; 224 if (timeout == 0) 225 timeout = GSSD_MIN_TIMEOUT; 226 ctx->gc_expiry = now + ((unsigned long)timeout * HZ); 227 /* Sequence number window. Determines the maximum number of 228 * simultaneous requests 229 */ 230 p = simple_get_bytes(p, end, &window_size, sizeof(window_size)); 231 if (IS_ERR(p)) 232 goto err; 233 ctx->gc_win = window_size; 234 /* gssd signals an error by passing ctx->gc_win = 0: */ 235 if (ctx->gc_win == 0) { 236 /* 237 * in which case, p points to an error code. Anything other 238 * than -EKEYEXPIRED gets converted to -EACCES. 239 */ 240 p = simple_get_bytes(p, end, &ret, sizeof(ret)); 241 if (!IS_ERR(p)) 242 p = (ret == -EKEYEXPIRED) ? ERR_PTR(-EKEYEXPIRED) : 243 ERR_PTR(-EACCES); 244 goto err; 245 } 246 /* copy the opaque wire context */ 247 p = simple_get_netobj(p, end, &ctx->gc_wire_ctx); 248 if (IS_ERR(p)) 249 goto err; 250 /* import the opaque security context */ 251 p = simple_get_bytes(p, end, &seclen, sizeof(seclen)); 252 if (IS_ERR(p)) 253 goto err; 254 q = (const void *)((const char *)p + seclen); 255 if (unlikely(q > end || q < p)) { 256 p = ERR_PTR(-EFAULT); 257 goto err; 258 } 259 ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); 260 if (ret < 0) { 261 p = ERR_PTR(ret); 262 goto err; 263 } 264 dprintk("RPC: %s Success. gc_expiry %lu now %lu timeout %u\n", 265 __func__, ctx->gc_expiry, now, timeout); 266 return q; 267 err: 268 dprintk("RPC: %s returns error %ld\n", __func__, -PTR_ERR(p)); 269 return p; 270 } 271 272 #define UPCALL_BUF_LEN 128 273 274 struct gss_upcall_msg { 275 atomic_t count; 276 kuid_t uid; 277 struct rpc_pipe_msg msg; 278 struct list_head list; 279 struct gss_auth *auth; 280 struct rpc_pipe *pipe; 281 struct rpc_wait_queue rpc_waitqueue; 282 wait_queue_head_t waitqueue; 283 struct gss_cl_ctx *ctx; 284 char databuf[UPCALL_BUF_LEN]; 285 }; 286 287 static int get_pipe_version(struct net *net) 288 { 289 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 290 int ret; 291 292 spin_lock(&pipe_version_lock); 293 if (sn->pipe_version >= 0) { 294 atomic_inc(&sn->pipe_users); 295 ret = sn->pipe_version; 296 } else 297 ret = -EAGAIN; 298 spin_unlock(&pipe_version_lock); 299 return ret; 300 } 301 302 static void put_pipe_version(struct net *net) 303 { 304 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 305 306 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) { 307 sn->pipe_version = -1; 308 spin_unlock(&pipe_version_lock); 309 } 310 } 311 312 static void 313 gss_release_msg(struct gss_upcall_msg *gss_msg) 314 { 315 struct net *net = gss_msg->auth->net; 316 if (!atomic_dec_and_test(&gss_msg->count)) 317 return; 318 put_pipe_version(net); 319 BUG_ON(!list_empty(&gss_msg->list)); 320 if (gss_msg->ctx != NULL) 321 gss_put_ctx(gss_msg->ctx); 322 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue); 323 kfree(gss_msg); 324 } 325 326 static struct gss_upcall_msg * 327 __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid) 328 { 329 struct gss_upcall_msg *pos; 330 list_for_each_entry(pos, &pipe->in_downcall, list) { 331 if (!uid_eq(pos->uid, uid)) 332 continue; 333 atomic_inc(&pos->count); 334 dprintk("RPC: %s found msg %p\n", __func__, pos); 335 return pos; 336 } 337 dprintk("RPC: %s found nothing\n", __func__); 338 return NULL; 339 } 340 341 /* Try to add an upcall to the pipefs queue. 342 * If an upcall owned by our uid already exists, then we return a reference 343 * to that upcall instead of adding the new upcall. 344 */ 345 static inline struct gss_upcall_msg * 346 gss_add_msg(struct gss_upcall_msg *gss_msg) 347 { 348 struct rpc_pipe *pipe = gss_msg->pipe; 349 struct gss_upcall_msg *old; 350 351 spin_lock(&pipe->lock); 352 old = __gss_find_upcall(pipe, gss_msg->uid); 353 if (old == NULL) { 354 atomic_inc(&gss_msg->count); 355 list_add(&gss_msg->list, &pipe->in_downcall); 356 } else 357 gss_msg = old; 358 spin_unlock(&pipe->lock); 359 return gss_msg; 360 } 361 362 static void 363 __gss_unhash_msg(struct gss_upcall_msg *gss_msg) 364 { 365 list_del_init(&gss_msg->list); 366 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 367 wake_up_all(&gss_msg->waitqueue); 368 atomic_dec(&gss_msg->count); 369 } 370 371 static void 372 gss_unhash_msg(struct gss_upcall_msg *gss_msg) 373 { 374 struct rpc_pipe *pipe = gss_msg->pipe; 375 376 if (list_empty(&gss_msg->list)) 377 return; 378 spin_lock(&pipe->lock); 379 if (!list_empty(&gss_msg->list)) 380 __gss_unhash_msg(gss_msg); 381 spin_unlock(&pipe->lock); 382 } 383 384 static void 385 gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss_msg) 386 { 387 switch (gss_msg->msg.errno) { 388 case 0: 389 if (gss_msg->ctx == NULL) 390 break; 391 clear_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 392 gss_cred_set_ctx(&gss_cred->gc_base, gss_msg->ctx); 393 break; 394 case -EKEYEXPIRED: 395 set_bit(RPCAUTH_CRED_NEGATIVE, &gss_cred->gc_base.cr_flags); 396 } 397 gss_cred->gc_upcall_timestamp = jiffies; 398 gss_cred->gc_upcall = NULL; 399 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno); 400 } 401 402 static void 403 gss_upcall_callback(struct rpc_task *task) 404 { 405 struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred, 406 struct gss_cred, gc_base); 407 struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall; 408 struct rpc_pipe *pipe = gss_msg->pipe; 409 410 spin_lock(&pipe->lock); 411 gss_handle_downcall_result(gss_cred, gss_msg); 412 spin_unlock(&pipe->lock); 413 task->tk_status = gss_msg->msg.errno; 414 gss_release_msg(gss_msg); 415 } 416 417 static void gss_encode_v0_msg(struct gss_upcall_msg *gss_msg) 418 { 419 uid_t uid = from_kuid(&init_user_ns, gss_msg->uid); 420 memcpy(gss_msg->databuf, &uid, sizeof(uid)); 421 gss_msg->msg.data = gss_msg->databuf; 422 gss_msg->msg.len = sizeof(uid); 423 BUG_ON(sizeof(uid) > UPCALL_BUF_LEN); 424 } 425 426 static void gss_encode_v1_msg(struct gss_upcall_msg *gss_msg, 427 const char *service_name, 428 const char *target_name) 429 { 430 struct gss_api_mech *mech = gss_msg->auth->mech; 431 char *p = gss_msg->databuf; 432 int len = 0; 433 434 gss_msg->msg.len = sprintf(gss_msg->databuf, "mech=%s uid=%d ", 435 mech->gm_name, 436 from_kuid(&init_user_ns, gss_msg->uid)); 437 p += gss_msg->msg.len; 438 if (target_name) { 439 len = sprintf(p, "target=%s ", target_name); 440 p += len; 441 gss_msg->msg.len += len; 442 } 443 if (service_name != NULL) { 444 len = sprintf(p, "service=%s ", service_name); 445 p += len; 446 gss_msg->msg.len += len; 447 } 448 if (mech->gm_upcall_enctypes) { 449 len = sprintf(p, "enctypes=%s ", mech->gm_upcall_enctypes); 450 p += len; 451 gss_msg->msg.len += len; 452 } 453 len = sprintf(p, "\n"); 454 gss_msg->msg.len += len; 455 456 gss_msg->msg.data = gss_msg->databuf; 457 BUG_ON(gss_msg->msg.len > UPCALL_BUF_LEN); 458 } 459 460 static struct gss_upcall_msg * 461 gss_alloc_msg(struct gss_auth *gss_auth, 462 kuid_t uid, const char *service_name) 463 { 464 struct gss_upcall_msg *gss_msg; 465 int vers; 466 467 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 468 if (gss_msg == NULL) 469 return ERR_PTR(-ENOMEM); 470 vers = get_pipe_version(gss_auth->net); 471 if (vers < 0) { 472 kfree(gss_msg); 473 return ERR_PTR(vers); 474 } 475 gss_msg->pipe = gss_auth->gss_pipe[vers]->pipe; 476 INIT_LIST_HEAD(&gss_msg->list); 477 rpc_init_wait_queue(&gss_msg->rpc_waitqueue, "RPCSEC_GSS upcall waitq"); 478 init_waitqueue_head(&gss_msg->waitqueue); 479 atomic_set(&gss_msg->count, 1); 480 gss_msg->uid = uid; 481 gss_msg->auth = gss_auth; 482 switch (vers) { 483 case 0: 484 gss_encode_v0_msg(gss_msg); 485 default: 486 gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name); 487 }; 488 return gss_msg; 489 } 490 491 static struct gss_upcall_msg * 492 gss_setup_upcall(struct gss_auth *gss_auth, struct rpc_cred *cred) 493 { 494 struct gss_cred *gss_cred = container_of(cred, 495 struct gss_cred, gc_base); 496 struct gss_upcall_msg *gss_new, *gss_msg; 497 kuid_t uid = cred->cr_uid; 498 499 gss_new = gss_alloc_msg(gss_auth, uid, gss_cred->gc_principal); 500 if (IS_ERR(gss_new)) 501 return gss_new; 502 gss_msg = gss_add_msg(gss_new); 503 if (gss_msg == gss_new) { 504 int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg); 505 if (res) { 506 gss_unhash_msg(gss_new); 507 gss_msg = ERR_PTR(res); 508 } 509 } else 510 gss_release_msg(gss_new); 511 return gss_msg; 512 } 513 514 static void warn_gssd(void) 515 { 516 static unsigned long ratelimit; 517 unsigned long now = jiffies; 518 519 if (time_after(now, ratelimit)) { 520 printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" 521 "Please check user daemon is running.\n"); 522 ratelimit = now + 15*HZ; 523 } 524 } 525 526 static inline int 527 gss_refresh_upcall(struct rpc_task *task) 528 { 529 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 530 struct gss_auth *gss_auth = container_of(cred->cr_auth, 531 struct gss_auth, rpc_auth); 532 struct gss_cred *gss_cred = container_of(cred, 533 struct gss_cred, gc_base); 534 struct gss_upcall_msg *gss_msg; 535 struct rpc_pipe *pipe; 536 int err = 0; 537 538 dprintk("RPC: %5u %s for uid %u\n", 539 task->tk_pid, __func__, from_kuid(&init_user_ns, cred->cr_uid)); 540 gss_msg = gss_setup_upcall(gss_auth, cred); 541 if (PTR_ERR(gss_msg) == -EAGAIN) { 542 /* XXX: warning on the first, under the assumption we 543 * shouldn't normally hit this case on a refresh. */ 544 warn_gssd(); 545 task->tk_timeout = 15*HZ; 546 rpc_sleep_on(&pipe_version_rpc_waitqueue, task, NULL); 547 return -EAGAIN; 548 } 549 if (IS_ERR(gss_msg)) { 550 err = PTR_ERR(gss_msg); 551 goto out; 552 } 553 pipe = gss_msg->pipe; 554 spin_lock(&pipe->lock); 555 if (gss_cred->gc_upcall != NULL) 556 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL); 557 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 558 task->tk_timeout = 0; 559 gss_cred->gc_upcall = gss_msg; 560 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 561 atomic_inc(&gss_msg->count); 562 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback); 563 } else { 564 gss_handle_downcall_result(gss_cred, gss_msg); 565 err = gss_msg->msg.errno; 566 } 567 spin_unlock(&pipe->lock); 568 gss_release_msg(gss_msg); 569 out: 570 dprintk("RPC: %5u %s for uid %u result %d\n", 571 task->tk_pid, __func__, 572 from_kuid(&init_user_ns, cred->cr_uid), err); 573 return err; 574 } 575 576 static inline int 577 gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 578 { 579 struct net *net = gss_auth->net; 580 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 581 struct rpc_pipe *pipe; 582 struct rpc_cred *cred = &gss_cred->gc_base; 583 struct gss_upcall_msg *gss_msg; 584 unsigned long timeout; 585 DEFINE_WAIT(wait); 586 int err; 587 588 dprintk("RPC: %s for uid %u\n", 589 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 590 retry: 591 err = 0; 592 /* Default timeout is 15s unless we know that gssd is not running */ 593 timeout = 15 * HZ; 594 if (!sn->gssd_running) 595 timeout = HZ >> 2; 596 gss_msg = gss_setup_upcall(gss_auth, cred); 597 if (PTR_ERR(gss_msg) == -EAGAIN) { 598 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 599 sn->pipe_version >= 0, timeout); 600 if (sn->pipe_version < 0) { 601 if (err == 0) 602 sn->gssd_running = 0; 603 warn_gssd(); 604 err = -EACCES; 605 } 606 if (err < 0) 607 goto out; 608 goto retry; 609 } 610 if (IS_ERR(gss_msg)) { 611 err = PTR_ERR(gss_msg); 612 goto out; 613 } 614 pipe = gss_msg->pipe; 615 for (;;) { 616 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE); 617 spin_lock(&pipe->lock); 618 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 619 break; 620 } 621 spin_unlock(&pipe->lock); 622 if (fatal_signal_pending(current)) { 623 err = -ERESTARTSYS; 624 goto out_intr; 625 } 626 schedule(); 627 } 628 if (gss_msg->ctx) 629 gss_cred_set_ctx(cred, gss_msg->ctx); 630 else 631 err = gss_msg->msg.errno; 632 spin_unlock(&pipe->lock); 633 out_intr: 634 finish_wait(&gss_msg->waitqueue, &wait); 635 gss_release_msg(gss_msg); 636 out: 637 dprintk("RPC: %s for uid %u result %d\n", 638 __func__, from_kuid(&init_user_ns, cred->cr_uid), err); 639 return err; 640 } 641 642 #define MSG_BUF_MAXSIZE 1024 643 644 static ssize_t 645 gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 646 { 647 const void *p, *end; 648 void *buf; 649 struct gss_upcall_msg *gss_msg; 650 struct rpc_pipe *pipe = RPC_I(file_inode(filp))->pipe; 651 struct gss_cl_ctx *ctx; 652 uid_t id; 653 kuid_t uid; 654 ssize_t err = -EFBIG; 655 656 if (mlen > MSG_BUF_MAXSIZE) 657 goto out; 658 err = -ENOMEM; 659 buf = kmalloc(mlen, GFP_NOFS); 660 if (!buf) 661 goto out; 662 663 err = -EFAULT; 664 if (copy_from_user(buf, src, mlen)) 665 goto err; 666 667 end = (const void *)((char *)buf + mlen); 668 p = simple_get_bytes(buf, end, &id, sizeof(id)); 669 if (IS_ERR(p)) { 670 err = PTR_ERR(p); 671 goto err; 672 } 673 674 uid = make_kuid(&init_user_ns, id); 675 if (!uid_valid(uid)) { 676 err = -EINVAL; 677 goto err; 678 } 679 680 err = -ENOMEM; 681 ctx = gss_alloc_context(); 682 if (ctx == NULL) 683 goto err; 684 685 err = -ENOENT; 686 /* Find a matching upcall */ 687 spin_lock(&pipe->lock); 688 gss_msg = __gss_find_upcall(pipe, uid); 689 if (gss_msg == NULL) { 690 spin_unlock(&pipe->lock); 691 goto err_put_ctx; 692 } 693 list_del_init(&gss_msg->list); 694 spin_unlock(&pipe->lock); 695 696 p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); 697 if (IS_ERR(p)) { 698 err = PTR_ERR(p); 699 switch (err) { 700 case -EACCES: 701 case -EKEYEXPIRED: 702 gss_msg->msg.errno = err; 703 err = mlen; 704 break; 705 case -EFAULT: 706 case -ENOMEM: 707 case -EINVAL: 708 case -ENOSYS: 709 gss_msg->msg.errno = -EAGAIN; 710 break; 711 default: 712 printk(KERN_CRIT "%s: bad return from " 713 "gss_fill_context: %zd\n", __func__, err); 714 BUG(); 715 } 716 goto err_release_msg; 717 } 718 gss_msg->ctx = gss_get_ctx(ctx); 719 err = mlen; 720 721 err_release_msg: 722 spin_lock(&pipe->lock); 723 __gss_unhash_msg(gss_msg); 724 spin_unlock(&pipe->lock); 725 gss_release_msg(gss_msg); 726 err_put_ctx: 727 gss_put_ctx(ctx); 728 err: 729 kfree(buf); 730 out: 731 dprintk("RPC: %s returning %Zd\n", __func__, err); 732 return err; 733 } 734 735 static int gss_pipe_open(struct inode *inode, int new_version) 736 { 737 struct net *net = inode->i_sb->s_fs_info; 738 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 739 int ret = 0; 740 741 spin_lock(&pipe_version_lock); 742 if (sn->pipe_version < 0) { 743 /* First open of any gss pipe determines the version: */ 744 sn->pipe_version = new_version; 745 rpc_wake_up(&pipe_version_rpc_waitqueue); 746 wake_up(&pipe_version_waitqueue); 747 } else if (sn->pipe_version != new_version) { 748 /* Trying to open a pipe of a different version */ 749 ret = -EBUSY; 750 goto out; 751 } 752 atomic_inc(&sn->pipe_users); 753 out: 754 spin_unlock(&pipe_version_lock); 755 return ret; 756 757 } 758 759 static int gss_pipe_open_v0(struct inode *inode) 760 { 761 return gss_pipe_open(inode, 0); 762 } 763 764 static int gss_pipe_open_v1(struct inode *inode) 765 { 766 return gss_pipe_open(inode, 1); 767 } 768 769 static void 770 gss_pipe_release(struct inode *inode) 771 { 772 struct net *net = inode->i_sb->s_fs_info; 773 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 774 struct gss_upcall_msg *gss_msg; 775 776 restart: 777 spin_lock(&pipe->lock); 778 list_for_each_entry(gss_msg, &pipe->in_downcall, list) { 779 780 if (!list_empty(&gss_msg->msg.list)) 781 continue; 782 gss_msg->msg.errno = -EPIPE; 783 atomic_inc(&gss_msg->count); 784 __gss_unhash_msg(gss_msg); 785 spin_unlock(&pipe->lock); 786 gss_release_msg(gss_msg); 787 goto restart; 788 } 789 spin_unlock(&pipe->lock); 790 791 put_pipe_version(net); 792 } 793 794 static void 795 gss_pipe_destroy_msg(struct rpc_pipe_msg *msg) 796 { 797 struct gss_upcall_msg *gss_msg = container_of(msg, struct gss_upcall_msg, msg); 798 799 if (msg->errno < 0) { 800 dprintk("RPC: %s releasing msg %p\n", 801 __func__, gss_msg); 802 atomic_inc(&gss_msg->count); 803 gss_unhash_msg(gss_msg); 804 if (msg->errno == -ETIMEDOUT) 805 warn_gssd(); 806 gss_release_msg(gss_msg); 807 } 808 } 809 810 static void gss_pipe_dentry_destroy(struct dentry *dir, 811 struct rpc_pipe_dir_object *pdo) 812 { 813 struct gss_pipe *gss_pipe = pdo->pdo_data; 814 struct rpc_pipe *pipe = gss_pipe->pipe; 815 816 if (pipe->dentry != NULL) { 817 rpc_unlink(pipe->dentry); 818 pipe->dentry = NULL; 819 } 820 } 821 822 static int gss_pipe_dentry_create(struct dentry *dir, 823 struct rpc_pipe_dir_object *pdo) 824 { 825 struct gss_pipe *p = pdo->pdo_data; 826 struct dentry *dentry; 827 828 dentry = rpc_mkpipe_dentry(dir, p->name, p->clnt, p->pipe); 829 if (IS_ERR(dentry)) 830 return PTR_ERR(dentry); 831 p->pipe->dentry = dentry; 832 return 0; 833 } 834 835 static const struct rpc_pipe_dir_object_ops gss_pipe_dir_object_ops = { 836 .create = gss_pipe_dentry_create, 837 .destroy = gss_pipe_dentry_destroy, 838 }; 839 840 static struct gss_pipe *gss_pipe_alloc(struct rpc_clnt *clnt, 841 const char *name, 842 const struct rpc_pipe_ops *upcall_ops) 843 { 844 struct gss_pipe *p; 845 int err = -ENOMEM; 846 847 p = kmalloc(sizeof(*p), GFP_KERNEL); 848 if (p == NULL) 849 goto err; 850 p->pipe = rpc_mkpipe_data(upcall_ops, RPC_PIPE_WAIT_FOR_OPEN); 851 if (IS_ERR(p->pipe)) { 852 err = PTR_ERR(p->pipe); 853 goto err_free_gss_pipe; 854 } 855 p->name = name; 856 p->clnt = clnt; 857 kref_init(&p->kref); 858 rpc_init_pipe_dir_object(&p->pdo, 859 &gss_pipe_dir_object_ops, 860 p); 861 return p; 862 err_free_gss_pipe: 863 kfree(p); 864 err: 865 return ERR_PTR(err); 866 } 867 868 struct gss_alloc_pdo { 869 struct rpc_clnt *clnt; 870 const char *name; 871 const struct rpc_pipe_ops *upcall_ops; 872 }; 873 874 static int gss_pipe_match_pdo(struct rpc_pipe_dir_object *pdo, void *data) 875 { 876 struct gss_pipe *gss_pipe; 877 struct gss_alloc_pdo *args = data; 878 879 if (pdo->pdo_ops != &gss_pipe_dir_object_ops) 880 return 0; 881 gss_pipe = container_of(pdo, struct gss_pipe, pdo); 882 if (strcmp(gss_pipe->name, args->name) != 0) 883 return 0; 884 if (!kref_get_unless_zero(&gss_pipe->kref)) 885 return 0; 886 return 1; 887 } 888 889 static struct rpc_pipe_dir_object *gss_pipe_alloc_pdo(void *data) 890 { 891 struct gss_pipe *gss_pipe; 892 struct gss_alloc_pdo *args = data; 893 894 gss_pipe = gss_pipe_alloc(args->clnt, args->name, args->upcall_ops); 895 if (!IS_ERR(gss_pipe)) 896 return &gss_pipe->pdo; 897 return NULL; 898 } 899 900 static struct gss_pipe *gss_pipe_get(struct rpc_clnt *clnt, 901 const char *name, 902 const struct rpc_pipe_ops *upcall_ops) 903 { 904 struct net *net = rpc_net_ns(clnt); 905 struct rpc_pipe_dir_object *pdo; 906 struct gss_alloc_pdo args = { 907 .clnt = clnt, 908 .name = name, 909 .upcall_ops = upcall_ops, 910 }; 911 912 pdo = rpc_find_or_alloc_pipe_dir_object(net, 913 &clnt->cl_pipedir_objects, 914 gss_pipe_match_pdo, 915 gss_pipe_alloc_pdo, 916 &args); 917 if (pdo != NULL) 918 return container_of(pdo, struct gss_pipe, pdo); 919 return ERR_PTR(-ENOMEM); 920 } 921 922 static void __gss_pipe_free(struct gss_pipe *p) 923 { 924 struct rpc_clnt *clnt = p->clnt; 925 struct net *net = rpc_net_ns(clnt); 926 927 rpc_remove_pipe_dir_object(net, 928 &clnt->cl_pipedir_objects, 929 &p->pdo); 930 rpc_destroy_pipe_data(p->pipe); 931 kfree(p); 932 } 933 934 static void __gss_pipe_release(struct kref *kref) 935 { 936 struct gss_pipe *p = container_of(kref, struct gss_pipe, kref); 937 938 __gss_pipe_free(p); 939 } 940 941 static void gss_pipe_free(struct gss_pipe *p) 942 { 943 if (p != NULL) 944 kref_put(&p->kref, __gss_pipe_release); 945 } 946 947 /* 948 * NOTE: we have the opportunity to use different 949 * parameters based on the input flavor (which must be a pseudoflavor) 950 */ 951 static struct gss_auth * 952 gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 953 { 954 rpc_authflavor_t flavor = args->pseudoflavor; 955 struct gss_auth *gss_auth; 956 struct gss_pipe *gss_pipe; 957 struct rpc_auth * auth; 958 int err = -ENOMEM; /* XXX? */ 959 960 dprintk("RPC: creating GSS authenticator for client %p\n", clnt); 961 962 if (!try_module_get(THIS_MODULE)) 963 return ERR_PTR(err); 964 if (!(gss_auth = kmalloc(sizeof(*gss_auth), GFP_KERNEL))) 965 goto out_dec; 966 INIT_HLIST_NODE(&gss_auth->hash); 967 gss_auth->target_name = NULL; 968 if (args->target_name) { 969 gss_auth->target_name = kstrdup(args->target_name, GFP_KERNEL); 970 if (gss_auth->target_name == NULL) 971 goto err_free; 972 } 973 gss_auth->client = clnt; 974 gss_auth->net = get_net(rpc_net_ns(clnt)); 975 err = -EINVAL; 976 gss_auth->mech = gss_mech_get_by_pseudoflavor(flavor); 977 if (!gss_auth->mech) { 978 dprintk("RPC: Pseudoflavor %d not found!\n", flavor); 979 goto err_put_net; 980 } 981 gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor); 982 if (gss_auth->service == 0) 983 goto err_put_mech; 984 auth = &gss_auth->rpc_auth; 985 auth->au_cslack = GSS_CRED_SLACK >> 2; 986 auth->au_rslack = GSS_VERF_SLACK >> 2; 987 auth->au_ops = &authgss_ops; 988 auth->au_flavor = flavor; 989 atomic_set(&auth->au_count, 1); 990 kref_init(&gss_auth->kref); 991 992 err = rpcauth_init_credcache(auth); 993 if (err) 994 goto err_put_mech; 995 /* 996 * Note: if we created the old pipe first, then someone who 997 * examined the directory at the right moment might conclude 998 * that we supported only the old pipe. So we instead create 999 * the new pipe first. 1000 */ 1001 gss_pipe = gss_pipe_get(clnt, "gssd", &gss_upcall_ops_v1); 1002 if (IS_ERR(gss_pipe)) { 1003 err = PTR_ERR(gss_pipe); 1004 goto err_destroy_credcache; 1005 } 1006 gss_auth->gss_pipe[1] = gss_pipe; 1007 1008 gss_pipe = gss_pipe_get(clnt, gss_auth->mech->gm_name, 1009 &gss_upcall_ops_v0); 1010 if (IS_ERR(gss_pipe)) { 1011 err = PTR_ERR(gss_pipe); 1012 goto err_destroy_pipe_1; 1013 } 1014 gss_auth->gss_pipe[0] = gss_pipe; 1015 1016 return gss_auth; 1017 err_destroy_pipe_1: 1018 gss_pipe_free(gss_auth->gss_pipe[1]); 1019 err_destroy_credcache: 1020 rpcauth_destroy_credcache(auth); 1021 err_put_mech: 1022 gss_mech_put(gss_auth->mech); 1023 err_put_net: 1024 put_net(gss_auth->net); 1025 err_free: 1026 kfree(gss_auth->target_name); 1027 kfree(gss_auth); 1028 out_dec: 1029 module_put(THIS_MODULE); 1030 return ERR_PTR(err); 1031 } 1032 1033 static void 1034 gss_free(struct gss_auth *gss_auth) 1035 { 1036 gss_pipe_free(gss_auth->gss_pipe[0]); 1037 gss_pipe_free(gss_auth->gss_pipe[1]); 1038 gss_mech_put(gss_auth->mech); 1039 put_net(gss_auth->net); 1040 kfree(gss_auth->target_name); 1041 1042 kfree(gss_auth); 1043 module_put(THIS_MODULE); 1044 } 1045 1046 static void 1047 gss_free_callback(struct kref *kref) 1048 { 1049 struct gss_auth *gss_auth = container_of(kref, struct gss_auth, kref); 1050 1051 gss_free(gss_auth); 1052 } 1053 1054 static void 1055 gss_destroy(struct rpc_auth *auth) 1056 { 1057 struct gss_auth *gss_auth = container_of(auth, 1058 struct gss_auth, rpc_auth); 1059 1060 dprintk("RPC: destroying GSS authenticator %p flavor %d\n", 1061 auth, auth->au_flavor); 1062 1063 if (hash_hashed(&gss_auth->hash)) { 1064 spin_lock(&gss_auth_hash_lock); 1065 hash_del(&gss_auth->hash); 1066 spin_unlock(&gss_auth_hash_lock); 1067 } 1068 1069 gss_pipe_free(gss_auth->gss_pipe[0]); 1070 gss_auth->gss_pipe[0] = NULL; 1071 gss_pipe_free(gss_auth->gss_pipe[1]); 1072 gss_auth->gss_pipe[1] = NULL; 1073 rpcauth_destroy_credcache(auth); 1074 1075 kref_put(&gss_auth->kref, gss_free_callback); 1076 } 1077 1078 /* 1079 * Auths may be shared between rpc clients that were cloned from a 1080 * common client with the same xprt, if they also share the flavor and 1081 * target_name. 1082 * 1083 * The auth is looked up from the oldest parent sharing the same 1084 * cl_xprt, and the auth itself references only that common parent 1085 * (which is guaranteed to last as long as any of its descendants). 1086 */ 1087 static struct gss_auth * 1088 gss_auth_find_or_add_hashed(struct rpc_auth_create_args *args, 1089 struct rpc_clnt *clnt, 1090 struct gss_auth *new) 1091 { 1092 struct gss_auth *gss_auth; 1093 unsigned long hashval = (unsigned long)clnt; 1094 1095 spin_lock(&gss_auth_hash_lock); 1096 hash_for_each_possible(gss_auth_hash_table, 1097 gss_auth, 1098 hash, 1099 hashval) { 1100 if (gss_auth->client != clnt) 1101 continue; 1102 if (gss_auth->rpc_auth.au_flavor != args->pseudoflavor) 1103 continue; 1104 if (gss_auth->target_name != args->target_name) { 1105 if (gss_auth->target_name == NULL) 1106 continue; 1107 if (args->target_name == NULL) 1108 continue; 1109 if (strcmp(gss_auth->target_name, args->target_name)) 1110 continue; 1111 } 1112 if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count)) 1113 continue; 1114 goto out; 1115 } 1116 if (new) 1117 hash_add(gss_auth_hash_table, &new->hash, hashval); 1118 gss_auth = new; 1119 out: 1120 spin_unlock(&gss_auth_hash_lock); 1121 return gss_auth; 1122 } 1123 1124 static struct gss_auth * 1125 gss_create_hashed(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1126 { 1127 struct gss_auth *gss_auth; 1128 struct gss_auth *new; 1129 1130 gss_auth = gss_auth_find_or_add_hashed(args, clnt, NULL); 1131 if (gss_auth != NULL) 1132 goto out; 1133 new = gss_create_new(args, clnt); 1134 if (IS_ERR(new)) 1135 return new; 1136 gss_auth = gss_auth_find_or_add_hashed(args, clnt, new); 1137 if (gss_auth != new) 1138 gss_destroy(&new->rpc_auth); 1139 out: 1140 return gss_auth; 1141 } 1142 1143 static struct rpc_auth * 1144 gss_create(struct rpc_auth_create_args *args, struct rpc_clnt *clnt) 1145 { 1146 struct gss_auth *gss_auth; 1147 struct rpc_xprt *xprt = rcu_access_pointer(clnt->cl_xprt); 1148 1149 while (clnt != clnt->cl_parent) { 1150 struct rpc_clnt *parent = clnt->cl_parent; 1151 /* Find the original parent for this transport */ 1152 if (rcu_access_pointer(parent->cl_xprt) != xprt) 1153 break; 1154 clnt = parent; 1155 } 1156 1157 gss_auth = gss_create_hashed(args, clnt); 1158 if (IS_ERR(gss_auth)) 1159 return ERR_CAST(gss_auth); 1160 return &gss_auth->rpc_auth; 1161 } 1162 1163 /* 1164 * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call 1165 * to the server with the GSS control procedure field set to 1166 * RPC_GSS_PROC_DESTROY. This should normally cause the server to release 1167 * all RPCSEC_GSS state associated with that context. 1168 */ 1169 static int 1170 gss_destroying_context(struct rpc_cred *cred) 1171 { 1172 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1173 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1174 struct rpc_task *task; 1175 1176 if (gss_cred->gc_ctx == NULL || 1177 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0) 1178 return 0; 1179 1180 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 1181 cred->cr_ops = &gss_nullops; 1182 1183 /* Take a reference to ensure the cred will be destroyed either 1184 * by the RPC call or by the put_rpccred() below */ 1185 get_rpccred(cred); 1186 1187 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT); 1188 if (!IS_ERR(task)) 1189 rpc_put_task(task); 1190 1191 put_rpccred(cred); 1192 return 1; 1193 } 1194 1195 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure 1196 * to create a new cred or context, so they check that things have been 1197 * allocated before freeing them. */ 1198 static void 1199 gss_do_free_ctx(struct gss_cl_ctx *ctx) 1200 { 1201 dprintk("RPC: %s\n", __func__); 1202 1203 gss_delete_sec_context(&ctx->gc_gss_ctx); 1204 kfree(ctx->gc_wire_ctx.data); 1205 kfree(ctx); 1206 } 1207 1208 static void 1209 gss_free_ctx_callback(struct rcu_head *head) 1210 { 1211 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu); 1212 gss_do_free_ctx(ctx); 1213 } 1214 1215 static void 1216 gss_free_ctx(struct gss_cl_ctx *ctx) 1217 { 1218 call_rcu(&ctx->gc_rcu, gss_free_ctx_callback); 1219 } 1220 1221 static void 1222 gss_free_cred(struct gss_cred *gss_cred) 1223 { 1224 dprintk("RPC: %s cred=%p\n", __func__, gss_cred); 1225 kfree(gss_cred); 1226 } 1227 1228 static void 1229 gss_free_cred_callback(struct rcu_head *head) 1230 { 1231 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu); 1232 gss_free_cred(gss_cred); 1233 } 1234 1235 static void 1236 gss_destroy_nullcred(struct rpc_cred *cred) 1237 { 1238 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1239 struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth); 1240 struct gss_cl_ctx *ctx = gss_cred->gc_ctx; 1241 1242 RCU_INIT_POINTER(gss_cred->gc_ctx, NULL); 1243 call_rcu(&cred->cr_rcu, gss_free_cred_callback); 1244 if (ctx) 1245 gss_put_ctx(ctx); 1246 kref_put(&gss_auth->kref, gss_free_callback); 1247 } 1248 1249 static void 1250 gss_destroy_cred(struct rpc_cred *cred) 1251 { 1252 1253 if (gss_destroying_context(cred)) 1254 return; 1255 gss_destroy_nullcred(cred); 1256 } 1257 1258 /* 1259 * Lookup RPCSEC_GSS cred for the current process 1260 */ 1261 static struct rpc_cred * 1262 gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1263 { 1264 return rpcauth_lookup_credcache(auth, acred, flags); 1265 } 1266 1267 static struct rpc_cred * 1268 gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 1269 { 1270 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1271 struct gss_cred *cred = NULL; 1272 int err = -ENOMEM; 1273 1274 dprintk("RPC: %s for uid %d, flavor %d\n", 1275 __func__, from_kuid(&init_user_ns, acred->uid), 1276 auth->au_flavor); 1277 1278 if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS))) 1279 goto out_err; 1280 1281 rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops); 1282 /* 1283 * Note: in order to force a call to call_refresh(), we deliberately 1284 * fail to flag the credential as RPCAUTH_CRED_UPTODATE. 1285 */ 1286 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 1287 cred->gc_service = gss_auth->service; 1288 cred->gc_principal = NULL; 1289 if (acred->machine_cred) 1290 cred->gc_principal = acred->principal; 1291 kref_get(&gss_auth->kref); 1292 return &cred->gc_base; 1293 1294 out_err: 1295 dprintk("RPC: %s failed with error %d\n", __func__, err); 1296 return ERR_PTR(err); 1297 } 1298 1299 static int 1300 gss_cred_init(struct rpc_auth *auth, struct rpc_cred *cred) 1301 { 1302 struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth); 1303 struct gss_cred *gss_cred = container_of(cred,struct gss_cred, gc_base); 1304 int err; 1305 1306 do { 1307 err = gss_create_upcall(gss_auth, gss_cred); 1308 } while (err == -EAGAIN); 1309 return err; 1310 } 1311 1312 /* 1313 * Returns -EACCES if GSS context is NULL or will expire within the 1314 * timeout (miliseconds) 1315 */ 1316 static int 1317 gss_key_timeout(struct rpc_cred *rc) 1318 { 1319 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1320 unsigned long now = jiffies; 1321 unsigned long expire; 1322 1323 if (gss_cred->gc_ctx == NULL) 1324 return -EACCES; 1325 1326 expire = gss_cred->gc_ctx->gc_expiry - (gss_key_expire_timeo * HZ); 1327 1328 if (time_after(now, expire)) 1329 return -EACCES; 1330 return 0; 1331 } 1332 1333 static int 1334 gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags) 1335 { 1336 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 1337 int ret; 1338 1339 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags)) 1340 goto out; 1341 /* Don't match with creds that have expired. */ 1342 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 1343 return 0; 1344 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags)) 1345 return 0; 1346 out: 1347 if (acred->principal != NULL) { 1348 if (gss_cred->gc_principal == NULL) 1349 return 0; 1350 ret = strcmp(acred->principal, gss_cred->gc_principal) == 0; 1351 goto check_expire; 1352 } 1353 if (gss_cred->gc_principal != NULL) 1354 return 0; 1355 ret = uid_eq(rc->cr_uid, acred->uid); 1356 1357 check_expire: 1358 if (ret == 0) 1359 return ret; 1360 1361 /* Notify acred users of GSS context expiration timeout */ 1362 if (test_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags) && 1363 (gss_key_timeout(rc) != 0)) { 1364 /* test will now be done from generic cred */ 1365 test_and_clear_bit(RPC_CRED_NOTIFY_TIMEOUT, &acred->ac_flags); 1366 /* tell NFS layer that key will expire soon */ 1367 set_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); 1368 } 1369 return ret; 1370 } 1371 1372 /* 1373 * Marshal credentials. 1374 * Maybe we should keep a cached credential for performance reasons. 1375 */ 1376 static __be32 * 1377 gss_marshal(struct rpc_task *task, __be32 *p) 1378 { 1379 struct rpc_rqst *req = task->tk_rqstp; 1380 struct rpc_cred *cred = req->rq_cred; 1381 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1382 gc_base); 1383 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1384 __be32 *cred_len; 1385 u32 maj_stat = 0; 1386 struct xdr_netobj mic; 1387 struct kvec iov; 1388 struct xdr_buf verf_buf; 1389 1390 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1391 1392 *p++ = htonl(RPC_AUTH_GSS); 1393 cred_len = p++; 1394 1395 spin_lock(&ctx->gc_seq_lock); 1396 req->rq_seqno = ctx->gc_seq++; 1397 spin_unlock(&ctx->gc_seq_lock); 1398 1399 *p++ = htonl((u32) RPC_GSS_VERSION); 1400 *p++ = htonl((u32) ctx->gc_proc); 1401 *p++ = htonl((u32) req->rq_seqno); 1402 *p++ = htonl((u32) gss_cred->gc_service); 1403 p = xdr_encode_netobj(p, &ctx->gc_wire_ctx); 1404 *cred_len = htonl((p - (cred_len + 1)) << 2); 1405 1406 /* We compute the checksum for the verifier over the xdr-encoded bytes 1407 * starting with the xid and ending at the end of the credential: */ 1408 iov.iov_base = xprt_skip_transport_header(req->rq_xprt, 1409 req->rq_snd_buf.head[0].iov_base); 1410 iov.iov_len = (u8 *)p - (u8 *)iov.iov_base; 1411 xdr_buf_from_iov(&iov, &verf_buf); 1412 1413 /* set verifier flavor*/ 1414 *p++ = htonl(RPC_AUTH_GSS); 1415 1416 mic.data = (u8 *)(p + 1); 1417 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1418 if (maj_stat == GSS_S_CONTEXT_EXPIRED) { 1419 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1420 } else if (maj_stat != 0) { 1421 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat); 1422 goto out_put_ctx; 1423 } 1424 p = xdr_encode_opaque(p, NULL, mic.len); 1425 gss_put_ctx(ctx); 1426 return p; 1427 out_put_ctx: 1428 gss_put_ctx(ctx); 1429 return NULL; 1430 } 1431 1432 static int gss_renew_cred(struct rpc_task *task) 1433 { 1434 struct rpc_cred *oldcred = task->tk_rqstp->rq_cred; 1435 struct gss_cred *gss_cred = container_of(oldcred, 1436 struct gss_cred, 1437 gc_base); 1438 struct rpc_auth *auth = oldcred->cr_auth; 1439 struct auth_cred acred = { 1440 .uid = oldcred->cr_uid, 1441 .principal = gss_cred->gc_principal, 1442 .machine_cred = (gss_cred->gc_principal != NULL ? 1 : 0), 1443 }; 1444 struct rpc_cred *new; 1445 1446 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW); 1447 if (IS_ERR(new)) 1448 return PTR_ERR(new); 1449 task->tk_rqstp->rq_cred = new; 1450 put_rpccred(oldcred); 1451 return 0; 1452 } 1453 1454 static int gss_cred_is_negative_entry(struct rpc_cred *cred) 1455 { 1456 if (test_bit(RPCAUTH_CRED_NEGATIVE, &cred->cr_flags)) { 1457 unsigned long now = jiffies; 1458 unsigned long begin, expire; 1459 struct gss_cred *gss_cred; 1460 1461 gss_cred = container_of(cred, struct gss_cred, gc_base); 1462 begin = gss_cred->gc_upcall_timestamp; 1463 expire = begin + gss_expired_cred_retry_delay * HZ; 1464 1465 if (time_in_range_open(now, begin, expire)) 1466 return 1; 1467 } 1468 return 0; 1469 } 1470 1471 /* 1472 * Refresh credentials. XXX - finish 1473 */ 1474 static int 1475 gss_refresh(struct rpc_task *task) 1476 { 1477 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1478 int ret = 0; 1479 1480 if (gss_cred_is_negative_entry(cred)) 1481 return -EKEYEXPIRED; 1482 1483 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) && 1484 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) { 1485 ret = gss_renew_cred(task); 1486 if (ret < 0) 1487 goto out; 1488 cred = task->tk_rqstp->rq_cred; 1489 } 1490 1491 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags)) 1492 ret = gss_refresh_upcall(task); 1493 out: 1494 return ret; 1495 } 1496 1497 /* Dummy refresh routine: used only when destroying the context */ 1498 static int 1499 gss_refresh_null(struct rpc_task *task) 1500 { 1501 return -EACCES; 1502 } 1503 1504 static __be32 * 1505 gss_validate(struct rpc_task *task, __be32 *p) 1506 { 1507 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1508 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1509 __be32 seq; 1510 struct kvec iov; 1511 struct xdr_buf verf_buf; 1512 struct xdr_netobj mic; 1513 u32 flav,len; 1514 u32 maj_stat; 1515 __be32 *ret = ERR_PTR(-EIO); 1516 1517 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1518 1519 flav = ntohl(*p++); 1520 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 1521 goto out_bad; 1522 if (flav != RPC_AUTH_GSS) 1523 goto out_bad; 1524 seq = htonl(task->tk_rqstp->rq_seqno); 1525 iov.iov_base = &seq; 1526 iov.iov_len = sizeof(seq); 1527 xdr_buf_from_iov(&iov, &verf_buf); 1528 mic.data = (u8 *)p; 1529 mic.len = len; 1530 1531 ret = ERR_PTR(-EACCES); 1532 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic); 1533 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1534 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1535 if (maj_stat) { 1536 dprintk("RPC: %5u %s: gss_verify_mic returned error 0x%08x\n", 1537 task->tk_pid, __func__, maj_stat); 1538 goto out_bad; 1539 } 1540 /* We leave it to unwrap to calculate au_rslack. For now we just 1541 * calculate the length of the verifier: */ 1542 cred->cr_auth->au_verfsize = XDR_QUADLEN(len) + 2; 1543 gss_put_ctx(ctx); 1544 dprintk("RPC: %5u %s: gss_verify_mic succeeded.\n", 1545 task->tk_pid, __func__); 1546 return p + XDR_QUADLEN(len); 1547 out_bad: 1548 gss_put_ctx(ctx); 1549 dprintk("RPC: %5u %s failed ret %ld.\n", task->tk_pid, __func__, 1550 PTR_ERR(ret)); 1551 return ret; 1552 } 1553 1554 static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp, 1555 __be32 *p, void *obj) 1556 { 1557 struct xdr_stream xdr; 1558 1559 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p); 1560 encode(rqstp, &xdr, obj); 1561 } 1562 1563 static inline int 1564 gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1565 kxdreproc_t encode, struct rpc_rqst *rqstp, 1566 __be32 *p, void *obj) 1567 { 1568 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1569 struct xdr_buf integ_buf; 1570 __be32 *integ_len = NULL; 1571 struct xdr_netobj mic; 1572 u32 offset; 1573 __be32 *q; 1574 struct kvec *iov; 1575 u32 maj_stat = 0; 1576 int status = -EIO; 1577 1578 integ_len = p++; 1579 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1580 *p++ = htonl(rqstp->rq_seqno); 1581 1582 gss_wrap_req_encode(encode, rqstp, p, obj); 1583 1584 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1585 offset, snd_buf->len - offset)) 1586 return status; 1587 *integ_len = htonl(integ_buf.len); 1588 1589 /* guess whether we're in the head or the tail: */ 1590 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1591 iov = snd_buf->tail; 1592 else 1593 iov = snd_buf->head; 1594 p = iov->iov_base + iov->iov_len; 1595 mic.data = (u8 *)(p + 1); 1596 1597 maj_stat = gss_get_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1598 status = -EIO; /* XXX? */ 1599 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1600 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1601 else if (maj_stat) 1602 return status; 1603 q = xdr_encode_opaque(p, NULL, mic.len); 1604 1605 offset = (u8 *)q - (u8 *)p; 1606 iov->iov_len += offset; 1607 snd_buf->len += offset; 1608 return 0; 1609 } 1610 1611 static void 1612 priv_release_snd_buf(struct rpc_rqst *rqstp) 1613 { 1614 int i; 1615 1616 for (i=0; i < rqstp->rq_enc_pages_num; i++) 1617 __free_page(rqstp->rq_enc_pages[i]); 1618 kfree(rqstp->rq_enc_pages); 1619 } 1620 1621 static int 1622 alloc_enc_pages(struct rpc_rqst *rqstp) 1623 { 1624 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1625 int first, last, i; 1626 1627 if (snd_buf->page_len == 0) { 1628 rqstp->rq_enc_pages_num = 0; 1629 return 0; 1630 } 1631 1632 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1633 last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; 1634 rqstp->rq_enc_pages_num = last - first + 1 + 1; 1635 rqstp->rq_enc_pages 1636 = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), 1637 GFP_NOFS); 1638 if (!rqstp->rq_enc_pages) 1639 goto out; 1640 for (i=0; i < rqstp->rq_enc_pages_num; i++) { 1641 rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS); 1642 if (rqstp->rq_enc_pages[i] == NULL) 1643 goto out_free; 1644 } 1645 rqstp->rq_release_snd_buf = priv_release_snd_buf; 1646 return 0; 1647 out_free: 1648 rqstp->rq_enc_pages_num = i; 1649 priv_release_snd_buf(rqstp); 1650 out: 1651 return -EAGAIN; 1652 } 1653 1654 static inline int 1655 gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1656 kxdreproc_t encode, struct rpc_rqst *rqstp, 1657 __be32 *p, void *obj) 1658 { 1659 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1660 u32 offset; 1661 u32 maj_stat; 1662 int status; 1663 __be32 *opaque_len; 1664 struct page **inpages; 1665 int first; 1666 int pad; 1667 struct kvec *iov; 1668 char *tmp; 1669 1670 opaque_len = p++; 1671 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1672 *p++ = htonl(rqstp->rq_seqno); 1673 1674 gss_wrap_req_encode(encode, rqstp, p, obj); 1675 1676 status = alloc_enc_pages(rqstp); 1677 if (status) 1678 return status; 1679 first = snd_buf->page_base >> PAGE_CACHE_SHIFT; 1680 inpages = snd_buf->pages + first; 1681 snd_buf->pages = rqstp->rq_enc_pages; 1682 snd_buf->page_base -= first << PAGE_CACHE_SHIFT; 1683 /* 1684 * Give the tail its own page, in case we need extra space in the 1685 * head when wrapping: 1686 * 1687 * call_allocate() allocates twice the slack space required 1688 * by the authentication flavor to rq_callsize. 1689 * For GSS, slack is GSS_CRED_SLACK. 1690 */ 1691 if (snd_buf->page_len || snd_buf->tail[0].iov_len) { 1692 tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]); 1693 memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len); 1694 snd_buf->tail[0].iov_base = tmp; 1695 } 1696 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1697 /* slack space should prevent this ever happening: */ 1698 BUG_ON(snd_buf->len > snd_buf->buflen); 1699 status = -EIO; 1700 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1701 * done anyway, so it's safe to put the request on the wire: */ 1702 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1703 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1704 else if (maj_stat) 1705 return status; 1706 1707 *opaque_len = htonl(snd_buf->len - offset); 1708 /* guess whether we're in the head or the tail: */ 1709 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 1710 iov = snd_buf->tail; 1711 else 1712 iov = snd_buf->head; 1713 p = iov->iov_base + iov->iov_len; 1714 pad = 3 - ((snd_buf->len - offset - 1) & 3); 1715 memset(p, 0, pad); 1716 iov->iov_len += pad; 1717 snd_buf->len += pad; 1718 1719 return 0; 1720 } 1721 1722 static int 1723 gss_wrap_req(struct rpc_task *task, 1724 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj) 1725 { 1726 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1727 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1728 gc_base); 1729 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1730 int status = -EIO; 1731 1732 dprintk("RPC: %5u %s\n", task->tk_pid, __func__); 1733 if (ctx->gc_proc != RPC_GSS_PROC_DATA) { 1734 /* The spec seems a little ambiguous here, but I think that not 1735 * wrapping context destruction requests makes the most sense. 1736 */ 1737 gss_wrap_req_encode(encode, rqstp, p, obj); 1738 status = 0; 1739 goto out; 1740 } 1741 switch (gss_cred->gc_service) { 1742 case RPC_GSS_SVC_NONE: 1743 gss_wrap_req_encode(encode, rqstp, p, obj); 1744 status = 0; 1745 break; 1746 case RPC_GSS_SVC_INTEGRITY: 1747 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj); 1748 break; 1749 case RPC_GSS_SVC_PRIVACY: 1750 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj); 1751 break; 1752 } 1753 out: 1754 gss_put_ctx(ctx); 1755 dprintk("RPC: %5u %s returning %d\n", task->tk_pid, __func__, status); 1756 return status; 1757 } 1758 1759 static inline int 1760 gss_unwrap_resp_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1761 struct rpc_rqst *rqstp, __be32 **p) 1762 { 1763 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1764 struct xdr_buf integ_buf; 1765 struct xdr_netobj mic; 1766 u32 data_offset, mic_offset; 1767 u32 integ_len; 1768 u32 maj_stat; 1769 int status = -EIO; 1770 1771 integ_len = ntohl(*(*p)++); 1772 if (integ_len & 3) 1773 return status; 1774 data_offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1775 mic_offset = integ_len + data_offset; 1776 if (mic_offset > rcv_buf->len) 1777 return status; 1778 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1779 return status; 1780 1781 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset, 1782 mic_offset - data_offset)) 1783 return status; 1784 1785 if (xdr_buf_read_netobj(rcv_buf, &mic, mic_offset)) 1786 return status; 1787 1788 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf, &mic); 1789 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1790 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1791 if (maj_stat != GSS_S_COMPLETE) 1792 return status; 1793 return 0; 1794 } 1795 1796 static inline int 1797 gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1798 struct rpc_rqst *rqstp, __be32 **p) 1799 { 1800 struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf; 1801 u32 offset; 1802 u32 opaque_len; 1803 u32 maj_stat; 1804 int status = -EIO; 1805 1806 opaque_len = ntohl(*(*p)++); 1807 offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base; 1808 if (offset + opaque_len > rcv_buf->len) 1809 return status; 1810 /* remove padding: */ 1811 rcv_buf->len = offset + opaque_len; 1812 1813 maj_stat = gss_unwrap(ctx->gc_gss_ctx, offset, rcv_buf); 1814 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1815 clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 1816 if (maj_stat != GSS_S_COMPLETE) 1817 return status; 1818 if (ntohl(*(*p)++) != rqstp->rq_seqno) 1819 return status; 1820 1821 return 0; 1822 } 1823 1824 static int 1825 gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp, 1826 __be32 *p, void *obj) 1827 { 1828 struct xdr_stream xdr; 1829 1830 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p); 1831 return decode(rqstp, &xdr, obj); 1832 } 1833 1834 static int 1835 gss_unwrap_resp(struct rpc_task *task, 1836 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj) 1837 { 1838 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1839 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1840 gc_base); 1841 struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred); 1842 __be32 *savedp = p; 1843 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; 1844 int savedlen = head->iov_len; 1845 int status = -EIO; 1846 1847 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1848 goto out_decode; 1849 switch (gss_cred->gc_service) { 1850 case RPC_GSS_SVC_NONE: 1851 break; 1852 case RPC_GSS_SVC_INTEGRITY: 1853 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1854 if (status) 1855 goto out; 1856 break; 1857 case RPC_GSS_SVC_PRIVACY: 1858 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1859 if (status) 1860 goto out; 1861 break; 1862 } 1863 /* take into account extra slack for integrity and privacy cases: */ 1864 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1865 + (savedlen - head->iov_len); 1866 out_decode: 1867 status = gss_unwrap_req_decode(decode, rqstp, p, obj); 1868 out: 1869 gss_put_ctx(ctx); 1870 dprintk("RPC: %5u %s returning %d\n", 1871 task->tk_pid, __func__, status); 1872 return status; 1873 } 1874 1875 static const struct rpc_authops authgss_ops = { 1876 .owner = THIS_MODULE, 1877 .au_flavor = RPC_AUTH_GSS, 1878 .au_name = "RPCSEC_GSS", 1879 .create = gss_create, 1880 .destroy = gss_destroy, 1881 .lookup_cred = gss_lookup_cred, 1882 .crcreate = gss_create_cred, 1883 .list_pseudoflavors = gss_mech_list_pseudoflavors, 1884 .info2flavor = gss_mech_info2flavor, 1885 .flavor2info = gss_mech_flavor2info, 1886 }; 1887 1888 static const struct rpc_credops gss_credops = { 1889 .cr_name = "AUTH_GSS", 1890 .crdestroy = gss_destroy_cred, 1891 .cr_init = gss_cred_init, 1892 .crbind = rpcauth_generic_bind_cred, 1893 .crmatch = gss_match, 1894 .crmarshal = gss_marshal, 1895 .crrefresh = gss_refresh, 1896 .crvalidate = gss_validate, 1897 .crwrap_req = gss_wrap_req, 1898 .crunwrap_resp = gss_unwrap_resp, 1899 .crkey_timeout = gss_key_timeout, 1900 }; 1901 1902 static const struct rpc_credops gss_nullops = { 1903 .cr_name = "AUTH_GSS", 1904 .crdestroy = gss_destroy_nullcred, 1905 .crbind = rpcauth_generic_bind_cred, 1906 .crmatch = gss_match, 1907 .crmarshal = gss_marshal, 1908 .crrefresh = gss_refresh_null, 1909 .crvalidate = gss_validate, 1910 .crwrap_req = gss_wrap_req, 1911 .crunwrap_resp = gss_unwrap_resp, 1912 }; 1913 1914 static const struct rpc_pipe_ops gss_upcall_ops_v0 = { 1915 .upcall = rpc_pipe_generic_upcall, 1916 .downcall = gss_pipe_downcall, 1917 .destroy_msg = gss_pipe_destroy_msg, 1918 .open_pipe = gss_pipe_open_v0, 1919 .release_pipe = gss_pipe_release, 1920 }; 1921 1922 static const struct rpc_pipe_ops gss_upcall_ops_v1 = { 1923 .upcall = rpc_pipe_generic_upcall, 1924 .downcall = gss_pipe_downcall, 1925 .destroy_msg = gss_pipe_destroy_msg, 1926 .open_pipe = gss_pipe_open_v1, 1927 .release_pipe = gss_pipe_release, 1928 }; 1929 1930 static __net_init int rpcsec_gss_init_net(struct net *net) 1931 { 1932 return gss_svc_init_net(net); 1933 } 1934 1935 static __net_exit void rpcsec_gss_exit_net(struct net *net) 1936 { 1937 gss_svc_shutdown_net(net); 1938 } 1939 1940 static struct pernet_operations rpcsec_gss_net_ops = { 1941 .init = rpcsec_gss_init_net, 1942 .exit = rpcsec_gss_exit_net, 1943 }; 1944 1945 /* 1946 * Initialize RPCSEC_GSS module 1947 */ 1948 static int __init init_rpcsec_gss(void) 1949 { 1950 int err = 0; 1951 1952 err = rpcauth_register(&authgss_ops); 1953 if (err) 1954 goto out; 1955 err = gss_svc_init(); 1956 if (err) 1957 goto out_unregister; 1958 err = register_pernet_subsys(&rpcsec_gss_net_ops); 1959 if (err) 1960 goto out_svc_exit; 1961 rpc_init_wait_queue(&pipe_version_rpc_waitqueue, "gss pipe version"); 1962 return 0; 1963 out_svc_exit: 1964 gss_svc_shutdown(); 1965 out_unregister: 1966 rpcauth_unregister(&authgss_ops); 1967 out: 1968 return err; 1969 } 1970 1971 static void __exit exit_rpcsec_gss(void) 1972 { 1973 unregister_pernet_subsys(&rpcsec_gss_net_ops); 1974 gss_svc_shutdown(); 1975 rpcauth_unregister(&authgss_ops); 1976 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 1977 } 1978 1979 MODULE_ALIAS("rpc-auth-6"); 1980 MODULE_LICENSE("GPL"); 1981 module_param_named(expired_cred_retry_delay, 1982 gss_expired_cred_retry_delay, 1983 uint, 0644); 1984 MODULE_PARM_DESC(expired_cred_retry_delay, "Timeout (in seconds) until " 1985 "the RPC engine retries an expired credential"); 1986 1987 module_param_named(key_expire_timeo, 1988 gss_key_expire_timeo, 1989 uint, 0644); 1990 MODULE_PARM_DESC(key_expire_timeo, "Time (in seconds) at the end of a " 1991 "credential keys lifetime where the NFS layer cleans up " 1992 "prior to key expiration"); 1993 1994 module_init(init_rpcsec_gss) 1995 module_exit(exit_rpcsec_gss) 1996