1 /* 2 * linux/fs/lockd/clntproc.c 3 * 4 * RPC procedures for the client side NLM implementation 5 * 6 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/slab.h> 11 #include <linux/types.h> 12 #include <linux/errno.h> 13 #include <linux/fs.h> 14 #include <linux/nfs_fs.h> 15 #include <linux/utsname.h> 16 #include <linux/freezer.h> 17 #include <linux/sunrpc/clnt.h> 18 #include <linux/sunrpc/svc.h> 19 #include <linux/lockd/lockd.h> 20 21 #define NLMDBG_FACILITY NLMDBG_CLIENT 22 #define NLMCLNT_GRACE_WAIT (5*HZ) 23 #define NLMCLNT_POLL_TIMEOUT (30*HZ) 24 #define NLMCLNT_MAX_RETRIES 3 25 26 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 27 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); 28 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); 29 static int nlm_stat_to_errno(__be32 stat); 30 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); 31 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); 32 33 static const struct rpc_call_ops nlmclnt_unlock_ops; 34 static const struct rpc_call_ops nlmclnt_cancel_ops; 35 36 /* 37 * Cookie counter for NLM requests 38 */ 39 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); 40 41 void nlmclnt_next_cookie(struct nlm_cookie *c) 42 { 43 u32 cookie = atomic_inc_return(&nlm_cookie); 44 45 memcpy(c->data, &cookie, 4); 46 c->len=4; 47 } 48 49 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) 50 { 51 atomic_inc(&lockowner->count); 52 return lockowner; 53 } 54 55 static void nlm_put_lockowner(struct nlm_lockowner *lockowner) 56 { 57 if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) 58 return; 59 list_del(&lockowner->list); 60 spin_unlock(&lockowner->host->h_lock); 61 nlmclnt_release_host(lockowner->host); 62 kfree(lockowner); 63 } 64 65 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) 66 { 67 struct nlm_lockowner *lockowner; 68 list_for_each_entry(lockowner, &host->h_lockowners, list) { 69 if (lockowner->pid == pid) 70 return -EBUSY; 71 } 72 return 0; 73 } 74 75 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) 76 { 77 uint32_t res; 78 do { 79 res = host->h_pidcount++; 80 } while (nlm_pidbusy(host, res) < 0); 81 return res; 82 } 83 84 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) 85 { 86 struct nlm_lockowner *lockowner; 87 list_for_each_entry(lockowner, &host->h_lockowners, list) { 88 if (lockowner->owner != owner) 89 continue; 90 return nlm_get_lockowner(lockowner); 91 } 92 return NULL; 93 } 94 95 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) 96 { 97 struct nlm_lockowner *res, *new = NULL; 98 99 spin_lock(&host->h_lock); 100 res = __nlm_find_lockowner(host, owner); 101 if (res == NULL) { 102 spin_unlock(&host->h_lock); 103 new = kmalloc(sizeof(*new), GFP_KERNEL); 104 spin_lock(&host->h_lock); 105 res = __nlm_find_lockowner(host, owner); 106 if (res == NULL && new != NULL) { 107 res = new; 108 atomic_set(&new->count, 1); 109 new->owner = owner; 110 new->pid = __nlm_alloc_pid(host); 111 new->host = nlm_get_host(host); 112 list_add(&new->list, &host->h_lockowners); 113 new = NULL; 114 } 115 } 116 spin_unlock(&host->h_lock); 117 kfree(new); 118 return res; 119 } 120 121 /* 122 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls 123 */ 124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) 125 { 126 struct nlm_args *argp = &req->a_args; 127 struct nlm_lock *lock = &argp->lock; 128 129 nlmclnt_next_cookie(&argp->cookie); 130 memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh)); 131 lock->caller = utsname()->nodename; 132 lock->oh.data = req->a_owner; 133 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", 134 (unsigned int)fl->fl_u.nfs_fl.owner->pid, 135 utsname()->nodename); 136 lock->svid = fl->fl_u.nfs_fl.owner->pid; 137 lock->fl.fl_start = fl->fl_start; 138 lock->fl.fl_end = fl->fl_end; 139 lock->fl.fl_type = fl->fl_type; 140 } 141 142 static void nlmclnt_release_lockargs(struct nlm_rqst *req) 143 { 144 BUG_ON(req->a_args.lock.fl.fl_ops != NULL); 145 } 146 147 /** 148 * nlmclnt_proc - Perform a single client-side lock request 149 * @host: address of a valid nlm_host context representing the NLM server 150 * @cmd: fcntl-style file lock operation to perform 151 * @fl: address of arguments for the lock operation 152 * 153 */ 154 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) 155 { 156 struct nlm_rqst *call; 157 int status; 158 159 call = nlm_alloc_call(host); 160 if (call == NULL) 161 return -ENOMEM; 162 163 nlmclnt_locks_init_private(fl, host); 164 if (!fl->fl_u.nfs_fl.owner) { 165 /* lockowner allocation has failed */ 166 nlmclnt_release_call(call); 167 return -ENOMEM; 168 } 169 /* Set up the argument struct */ 170 nlmclnt_setlockargs(call, fl); 171 172 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { 173 if (fl->fl_type != F_UNLCK) { 174 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; 175 status = nlmclnt_lock(call, fl); 176 } else 177 status = nlmclnt_unlock(call, fl); 178 } else if (IS_GETLK(cmd)) 179 status = nlmclnt_test(call, fl); 180 else 181 status = -EINVAL; 182 fl->fl_ops->fl_release_private(fl); 183 fl->fl_ops = NULL; 184 185 dprintk("lockd: clnt proc returns %d\n", status); 186 return status; 187 } 188 EXPORT_SYMBOL_GPL(nlmclnt_proc); 189 190 /* 191 * Allocate an NLM RPC call struct 192 */ 193 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) 194 { 195 struct nlm_rqst *call; 196 197 for(;;) { 198 call = kzalloc(sizeof(*call), GFP_KERNEL); 199 if (call != NULL) { 200 atomic_set(&call->a_count, 1); 201 locks_init_lock(&call->a_args.lock.fl); 202 locks_init_lock(&call->a_res.lock.fl); 203 call->a_host = nlm_get_host(host); 204 return call; 205 } 206 if (signalled()) 207 break; 208 printk("nlm_alloc_call: failed, waiting for memory\n"); 209 schedule_timeout_interruptible(5*HZ); 210 } 211 return NULL; 212 } 213 214 void nlmclnt_release_call(struct nlm_rqst *call) 215 { 216 if (!atomic_dec_and_test(&call->a_count)) 217 return; 218 nlmclnt_release_host(call->a_host); 219 nlmclnt_release_lockargs(call); 220 kfree(call); 221 } 222 223 static void nlmclnt_rpc_release(void *data) 224 { 225 nlmclnt_release_call(data); 226 } 227 228 static int nlm_wait_on_grace(wait_queue_head_t *queue) 229 { 230 DEFINE_WAIT(wait); 231 int status = -EINTR; 232 233 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); 234 if (!signalled ()) { 235 schedule_timeout(NLMCLNT_GRACE_WAIT); 236 try_to_freeze(); 237 if (!signalled ()) 238 status = 0; 239 } 240 finish_wait(queue, &wait); 241 return status; 242 } 243 244 /* 245 * Generic NLM call 246 */ 247 static int 248 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) 249 { 250 struct nlm_host *host = req->a_host; 251 struct rpc_clnt *clnt; 252 struct nlm_args *argp = &req->a_args; 253 struct nlm_res *resp = &req->a_res; 254 struct rpc_message msg = { 255 .rpc_argp = argp, 256 .rpc_resp = resp, 257 .rpc_cred = cred, 258 }; 259 int status; 260 261 dprintk("lockd: call procedure %d on %s\n", 262 (int)proc, host->h_name); 263 264 do { 265 if (host->h_reclaiming && !argp->reclaim) 266 goto in_grace_period; 267 268 /* If we have no RPC client yet, create one. */ 269 if ((clnt = nlm_bind_host(host)) == NULL) 270 return -ENOLCK; 271 msg.rpc_proc = &clnt->cl_procinfo[proc]; 272 273 /* Perform the RPC call. If an error occurs, try again */ 274 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { 275 dprintk("lockd: rpc_call returned error %d\n", -status); 276 switch (status) { 277 case -EPROTONOSUPPORT: 278 status = -EINVAL; 279 break; 280 case -ECONNREFUSED: 281 case -ETIMEDOUT: 282 case -ENOTCONN: 283 nlm_rebind_host(host); 284 status = -EAGAIN; 285 break; 286 case -ERESTARTSYS: 287 return signalled () ? -EINTR : status; 288 default: 289 break; 290 } 291 break; 292 } else 293 if (resp->status == nlm_lck_denied_grace_period) { 294 dprintk("lockd: server in grace period\n"); 295 if (argp->reclaim) { 296 printk(KERN_WARNING 297 "lockd: spurious grace period reject?!\n"); 298 return -ENOLCK; 299 } 300 } else { 301 if (!argp->reclaim) { 302 /* We appear to be out of the grace period */ 303 wake_up_all(&host->h_gracewait); 304 } 305 dprintk("lockd: server returns status %d\n", 306 ntohl(resp->status)); 307 return 0; /* Okay, call complete */ 308 } 309 310 in_grace_period: 311 /* 312 * The server has rebooted and appears to be in the grace 313 * period during which locks are only allowed to be 314 * reclaimed. 315 * We can only back off and try again later. 316 */ 317 status = nlm_wait_on_grace(&host->h_gracewait); 318 } while (status == 0); 319 320 return status; 321 } 322 323 /* 324 * Generic NLM call, async version. 325 */ 326 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) 327 { 328 struct nlm_host *host = req->a_host; 329 struct rpc_clnt *clnt; 330 struct rpc_task_setup task_setup_data = { 331 .rpc_message = msg, 332 .callback_ops = tk_ops, 333 .callback_data = req, 334 .flags = RPC_TASK_ASYNC, 335 }; 336 337 dprintk("lockd: call procedure %d on %s (async)\n", 338 (int)proc, host->h_name); 339 340 /* If we have no RPC client yet, create one. */ 341 clnt = nlm_bind_host(host); 342 if (clnt == NULL) 343 goto out_err; 344 msg->rpc_proc = &clnt->cl_procinfo[proc]; 345 task_setup_data.rpc_client = clnt; 346 347 /* bootstrap and kick off the async RPC call */ 348 return rpc_run_task(&task_setup_data); 349 out_err: 350 tk_ops->rpc_release(req); 351 return ERR_PTR(-ENOLCK); 352 } 353 354 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) 355 { 356 struct rpc_task *task; 357 358 task = __nlm_async_call(req, proc, msg, tk_ops); 359 if (IS_ERR(task)) 360 return PTR_ERR(task); 361 rpc_put_task(task); 362 return 0; 363 } 364 365 /* 366 * NLM asynchronous call. 367 */ 368 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 369 { 370 struct rpc_message msg = { 371 .rpc_argp = &req->a_args, 372 .rpc_resp = &req->a_res, 373 }; 374 return nlm_do_async_call(req, proc, &msg, tk_ops); 375 } 376 377 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 378 { 379 struct rpc_message msg = { 380 .rpc_argp = &req->a_res, 381 }; 382 return nlm_do_async_call(req, proc, &msg, tk_ops); 383 } 384 385 /* 386 * NLM client asynchronous call. 387 * 388 * Note that although the calls are asynchronous, and are therefore 389 * guaranteed to complete, we still always attempt to wait for 390 * completion in order to be able to correctly track the lock 391 * state. 392 */ 393 static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 394 { 395 struct rpc_message msg = { 396 .rpc_argp = &req->a_args, 397 .rpc_resp = &req->a_res, 398 .rpc_cred = cred, 399 }; 400 struct rpc_task *task; 401 int err; 402 403 task = __nlm_async_call(req, proc, &msg, tk_ops); 404 if (IS_ERR(task)) 405 return PTR_ERR(task); 406 err = rpc_wait_for_completion_task(task); 407 rpc_put_task(task); 408 return err; 409 } 410 411 /* 412 * TEST for the presence of a conflicting lock 413 */ 414 static int 415 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) 416 { 417 int status; 418 419 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); 420 if (status < 0) 421 goto out; 422 423 switch (req->a_res.status) { 424 case nlm_granted: 425 fl->fl_type = F_UNLCK; 426 break; 427 case nlm_lck_denied: 428 /* 429 * Report the conflicting lock back to the application. 430 */ 431 fl->fl_start = req->a_res.lock.fl.fl_start; 432 fl->fl_end = req->a_res.lock.fl.fl_end; 433 fl->fl_type = req->a_res.lock.fl.fl_type; 434 fl->fl_pid = 0; 435 break; 436 default: 437 status = nlm_stat_to_errno(req->a_res.status); 438 } 439 out: 440 nlmclnt_release_call(req); 441 return status; 442 } 443 444 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) 445 { 446 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); 447 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; 448 new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); 449 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); 450 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); 451 } 452 453 static void nlmclnt_locks_release_private(struct file_lock *fl) 454 { 455 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); 456 list_del(&fl->fl_u.nfs_fl.list); 457 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); 458 nlm_put_lockowner(fl->fl_u.nfs_fl.owner); 459 } 460 461 static const struct file_lock_operations nlmclnt_lock_ops = { 462 .fl_copy_lock = nlmclnt_locks_copy_lock, 463 .fl_release_private = nlmclnt_locks_release_private, 464 }; 465 466 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) 467 { 468 BUG_ON(fl->fl_ops != NULL); 469 fl->fl_u.nfs_fl.state = 0; 470 fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); 471 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); 472 fl->fl_ops = &nlmclnt_lock_ops; 473 } 474 475 static int do_vfs_lock(struct file_lock *fl) 476 { 477 int res = 0; 478 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { 479 case FL_POSIX: 480 res = posix_lock_file_wait(fl->fl_file, fl); 481 break; 482 case FL_FLOCK: 483 res = flock_lock_file_wait(fl->fl_file, fl); 484 break; 485 default: 486 BUG(); 487 } 488 return res; 489 } 490 491 /* 492 * LOCK: Try to create a lock 493 * 494 * Programmer Harassment Alert 495 * 496 * When given a blocking lock request in a sync RPC call, the HPUX lockd 497 * will faithfully return LCK_BLOCKED but never cares to notify us when 498 * the lock could be granted. This way, our local process could hang 499 * around forever waiting for the callback. 500 * 501 * Solution A: Implement busy-waiting 502 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) 503 * 504 * For now I am implementing solution A, because I hate the idea of 505 * re-implementing lockd for a third time in two months. The async 506 * calls shouldn't be too hard to do, however. 507 * 508 * This is one of the lovely things about standards in the NFS area: 509 * they're so soft and squishy you can't really blame HP for doing this. 510 */ 511 static int 512 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) 513 { 514 struct rpc_cred *cred = nfs_file_cred(fl->fl_file); 515 struct nlm_host *host = req->a_host; 516 struct nlm_res *resp = &req->a_res; 517 struct nlm_wait *block = NULL; 518 unsigned char fl_flags = fl->fl_flags; 519 unsigned char fl_type; 520 int status = -ENOLCK; 521 522 if (nsm_monitor(host) < 0) 523 goto out; 524 req->a_args.state = nsm_local_state; 525 526 fl->fl_flags |= FL_ACCESS; 527 status = do_vfs_lock(fl); 528 fl->fl_flags = fl_flags; 529 if (status < 0) 530 goto out; 531 532 block = nlmclnt_prepare_block(host, fl); 533 again: 534 /* 535 * Initialise resp->status to a valid non-zero value, 536 * since 0 == nlm_lck_granted 537 */ 538 resp->status = nlm_lck_blocked; 539 for(;;) { 540 /* Reboot protection */ 541 fl->fl_u.nfs_fl.state = host->h_state; 542 status = nlmclnt_call(cred, req, NLMPROC_LOCK); 543 if (status < 0) 544 break; 545 /* Did a reclaimer thread notify us of a server reboot? */ 546 if (resp->status == nlm_lck_denied_grace_period) 547 continue; 548 if (resp->status != nlm_lck_blocked) 549 break; 550 /* Wait on an NLM blocking lock */ 551 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); 552 if (status < 0) 553 break; 554 if (resp->status != nlm_lck_blocked) 555 break; 556 } 557 558 /* if we were interrupted while blocking, then cancel the lock request 559 * and exit 560 */ 561 if (resp->status == nlm_lck_blocked) { 562 if (!req->a_args.block) 563 goto out_unlock; 564 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) 565 goto out_unblock; 566 } 567 568 if (resp->status == nlm_granted) { 569 down_read(&host->h_rwsem); 570 /* Check whether or not the server has rebooted */ 571 if (fl->fl_u.nfs_fl.state != host->h_state) { 572 up_read(&host->h_rwsem); 573 goto again; 574 } 575 /* Ensure the resulting lock will get added to granted list */ 576 fl->fl_flags |= FL_SLEEP; 577 if (do_vfs_lock(fl) < 0) 578 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); 579 up_read(&host->h_rwsem); 580 fl->fl_flags = fl_flags; 581 status = 0; 582 } 583 if (status < 0) 584 goto out_unlock; 585 /* 586 * EAGAIN doesn't make sense for sleeping locks, and in some 587 * cases NLM_LCK_DENIED is returned for a permanent error. So 588 * turn it into an ENOLCK. 589 */ 590 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) 591 status = -ENOLCK; 592 else 593 status = nlm_stat_to_errno(resp->status); 594 out_unblock: 595 nlmclnt_finish_block(block); 596 out: 597 nlmclnt_release_call(req); 598 return status; 599 out_unlock: 600 /* Fatal error: ensure that we remove the lock altogether */ 601 dprintk("lockd: lock attempt ended in fatal error.\n" 602 " Attempting to unlock.\n"); 603 nlmclnt_finish_block(block); 604 fl_type = fl->fl_type; 605 fl->fl_type = F_UNLCK; 606 down_read(&host->h_rwsem); 607 do_vfs_lock(fl); 608 up_read(&host->h_rwsem); 609 fl->fl_type = fl_type; 610 fl->fl_flags = fl_flags; 611 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); 612 return status; 613 } 614 615 /* 616 * RECLAIM: Try to reclaim a lock 617 */ 618 int 619 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) 620 { 621 struct nlm_rqst reqst, *req; 622 int status; 623 624 req = &reqst; 625 memset(req, 0, sizeof(*req)); 626 locks_init_lock(&req->a_args.lock.fl); 627 locks_init_lock(&req->a_res.lock.fl); 628 req->a_host = host; 629 req->a_flags = 0; 630 631 /* Set up the argument struct */ 632 nlmclnt_setlockargs(req, fl); 633 req->a_args.reclaim = 1; 634 635 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); 636 if (status >= 0 && req->a_res.status == nlm_granted) 637 return 0; 638 639 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " 640 "(errno %d, status %d)\n", fl->fl_pid, 641 status, ntohl(req->a_res.status)); 642 643 /* 644 * FIXME: This is a serious failure. We can 645 * 646 * a. Ignore the problem 647 * b. Send the owning process some signal (Linux doesn't have 648 * SIGLOST, though...) 649 * c. Retry the operation 650 * 651 * Until someone comes up with a simple implementation 652 * for b or c, I'll choose option a. 653 */ 654 655 return -ENOLCK; 656 } 657 658 /* 659 * UNLOCK: remove an existing lock 660 */ 661 static int 662 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) 663 { 664 struct nlm_host *host = req->a_host; 665 struct nlm_res *resp = &req->a_res; 666 int status; 667 unsigned char fl_flags = fl->fl_flags; 668 669 /* 670 * Note: the server is supposed to either grant us the unlock 671 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either 672 * case, we want to unlock. 673 */ 674 fl->fl_flags |= FL_EXISTS; 675 down_read(&host->h_rwsem); 676 status = do_vfs_lock(fl); 677 up_read(&host->h_rwsem); 678 fl->fl_flags = fl_flags; 679 if (status == -ENOENT) { 680 status = 0; 681 goto out; 682 } 683 684 atomic_inc(&req->a_count); 685 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, 686 NLMPROC_UNLOCK, &nlmclnt_unlock_ops); 687 if (status < 0) 688 goto out; 689 690 if (resp->status == nlm_granted) 691 goto out; 692 693 if (resp->status != nlm_lck_denied_nolocks) 694 printk("lockd: unexpected unlock status: %d\n", 695 ntohl(resp->status)); 696 /* What to do now? I'm out of my depth... */ 697 status = -ENOLCK; 698 out: 699 nlmclnt_release_call(req); 700 return status; 701 } 702 703 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) 704 { 705 struct nlm_rqst *req = data; 706 u32 status = ntohl(req->a_res.status); 707 708 if (RPC_ASSASSINATED(task)) 709 goto die; 710 711 if (task->tk_status < 0) { 712 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); 713 switch (task->tk_status) { 714 case -EACCES: 715 case -EIO: 716 goto die; 717 default: 718 goto retry_rebind; 719 } 720 } 721 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 722 rpc_delay(task, NLMCLNT_GRACE_WAIT); 723 goto retry_unlock; 724 } 725 if (status != NLM_LCK_GRANTED) 726 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); 727 die: 728 return; 729 retry_rebind: 730 nlm_rebind_host(req->a_host); 731 retry_unlock: 732 rpc_restart_call(task); 733 } 734 735 static const struct rpc_call_ops nlmclnt_unlock_ops = { 736 .rpc_call_done = nlmclnt_unlock_callback, 737 .rpc_release = nlmclnt_rpc_release, 738 }; 739 740 /* 741 * Cancel a blocked lock request. 742 * We always use an async RPC call for this in order not to hang a 743 * process that has been Ctrl-C'ed. 744 */ 745 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) 746 { 747 struct nlm_rqst *req; 748 int status; 749 750 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" 751 " Attempting to cancel lock.\n"); 752 753 req = nlm_alloc_call(host); 754 if (!req) 755 return -ENOMEM; 756 req->a_flags = RPC_TASK_ASYNC; 757 758 nlmclnt_setlockargs(req, fl); 759 req->a_args.block = block; 760 761 atomic_inc(&req->a_count); 762 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, 763 NLMPROC_CANCEL, &nlmclnt_cancel_ops); 764 if (status == 0 && req->a_res.status == nlm_lck_denied) 765 status = -ENOLCK; 766 nlmclnt_release_call(req); 767 return status; 768 } 769 770 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) 771 { 772 struct nlm_rqst *req = data; 773 u32 status = ntohl(req->a_res.status); 774 775 if (RPC_ASSASSINATED(task)) 776 goto die; 777 778 if (task->tk_status < 0) { 779 dprintk("lockd: CANCEL call error %d, retrying.\n", 780 task->tk_status); 781 goto retry_cancel; 782 } 783 784 dprintk("lockd: cancel status %u (task %u)\n", 785 status, task->tk_pid); 786 787 switch (status) { 788 case NLM_LCK_GRANTED: 789 case NLM_LCK_DENIED_GRACE_PERIOD: 790 case NLM_LCK_DENIED: 791 /* Everything's good */ 792 break; 793 case NLM_LCK_DENIED_NOLOCKS: 794 dprintk("lockd: CANCEL failed (server has no locks)\n"); 795 goto retry_cancel; 796 default: 797 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", 798 status); 799 } 800 801 die: 802 return; 803 804 retry_cancel: 805 /* Don't ever retry more than 3 times */ 806 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) 807 goto die; 808 nlm_rebind_host(req->a_host); 809 rpc_restart_call(task); 810 rpc_delay(task, 30 * HZ); 811 } 812 813 static const struct rpc_call_ops nlmclnt_cancel_ops = { 814 .rpc_call_done = nlmclnt_cancel_callback, 815 .rpc_release = nlmclnt_rpc_release, 816 }; 817 818 /* 819 * Convert an NLM status code to a generic kernel errno 820 */ 821 static int 822 nlm_stat_to_errno(__be32 status) 823 { 824 switch(ntohl(status)) { 825 case NLM_LCK_GRANTED: 826 return 0; 827 case NLM_LCK_DENIED: 828 return -EAGAIN; 829 case NLM_LCK_DENIED_NOLOCKS: 830 case NLM_LCK_DENIED_GRACE_PERIOD: 831 return -ENOLCK; 832 case NLM_LCK_BLOCKED: 833 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); 834 return -ENOLCK; 835 #ifdef CONFIG_LOCKD_V4 836 case NLM_DEADLCK: 837 return -EDEADLK; 838 case NLM_ROFS: 839 return -EROFS; 840 case NLM_STALE_FH: 841 return -ESTALE; 842 case NLM_FBIG: 843 return -EOVERFLOW; 844 case NLM_FAILED: 845 return -ENOLCK; 846 #endif 847 } 848 printk(KERN_NOTICE "lockd: unexpected server status %d\n", 849 ntohl(status)); 850 return -ENOLCK; 851 } 852