1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/lockd/clntproc.c 4 * 5 * RPC procedures for the client side NLM implementation 6 * 7 * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/fs.h> 15 #include <linux/nfs_fs.h> 16 #include <linux/utsname.h> 17 #include <linux/freezer.h> 18 #include <linux/sunrpc/clnt.h> 19 #include <linux/sunrpc/svc.h> 20 #include <linux/lockd/lockd.h> 21 22 #define NLMDBG_FACILITY NLMDBG_CLIENT 23 #define NLMCLNT_GRACE_WAIT (5*HZ) 24 #define NLMCLNT_POLL_TIMEOUT (30*HZ) 25 #define NLMCLNT_MAX_RETRIES 3 26 27 static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); 28 static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); 29 static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); 30 static int nlm_stat_to_errno(__be32 stat); 31 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); 32 static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); 33 34 static const struct rpc_call_ops nlmclnt_unlock_ops; 35 static const struct rpc_call_ops nlmclnt_cancel_ops; 36 37 /* 38 * Cookie counter for NLM requests 39 */ 40 static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); 41 42 void nlmclnt_next_cookie(struct nlm_cookie *c) 43 { 44 u32 cookie = atomic_inc_return(&nlm_cookie); 45 46 memcpy(c->data, &cookie, 4); 47 c->len=4; 48 } 49 50 static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) 51 { 52 refcount_inc(&lockowner->count); 53 return lockowner; 54 } 55 56 static void nlm_put_lockowner(struct nlm_lockowner *lockowner) 57 { 58 if (!refcount_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) 59 return; 60 list_del(&lockowner->list); 61 spin_unlock(&lockowner->host->h_lock); 62 nlmclnt_release_host(lockowner->host); 63 kfree(lockowner); 64 } 65 66 static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) 67 { 68 struct nlm_lockowner *lockowner; 69 list_for_each_entry(lockowner, &host->h_lockowners, list) { 70 if (lockowner->pid == pid) 71 return -EBUSY; 72 } 73 return 0; 74 } 75 76 static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) 77 { 78 uint32_t res; 79 do { 80 res = host->h_pidcount++; 81 } while (nlm_pidbusy(host, res) < 0); 82 return res; 83 } 84 85 static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) 86 { 87 struct nlm_lockowner *lockowner; 88 list_for_each_entry(lockowner, &host->h_lockowners, list) { 89 if (lockowner->owner != owner) 90 continue; 91 return nlm_get_lockowner(lockowner); 92 } 93 return NULL; 94 } 95 96 static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) 97 { 98 struct nlm_lockowner *res, *new = NULL; 99 100 spin_lock(&host->h_lock); 101 res = __nlm_find_lockowner(host, owner); 102 if (res == NULL) { 103 spin_unlock(&host->h_lock); 104 new = kmalloc(sizeof(*new), GFP_KERNEL); 105 spin_lock(&host->h_lock); 106 res = __nlm_find_lockowner(host, owner); 107 if (res == NULL && new != NULL) { 108 res = new; 109 refcount_set(&new->count, 1); 110 new->owner = owner; 111 new->pid = __nlm_alloc_pid(host); 112 new->host = nlm_get_host(host); 113 list_add(&new->list, &host->h_lockowners); 114 new = NULL; 115 } 116 } 117 spin_unlock(&host->h_lock); 118 kfree(new); 119 return res; 120 } 121 122 /* 123 * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls 124 */ 125 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) 126 { 127 struct nlm_args *argp = &req->a_args; 128 struct nlm_lock *lock = &argp->lock; 129 char *nodename = req->a_host->h_rpcclnt->cl_nodename; 130 131 nlmclnt_next_cookie(&argp->cookie); 132 memcpy(&lock->fh, NFS_FH(locks_inode(fl->fl_file)), sizeof(struct nfs_fh)); 133 lock->caller = nodename; 134 lock->oh.data = req->a_owner; 135 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", 136 (unsigned int)fl->fl_u.nfs_fl.owner->pid, 137 nodename); 138 lock->svid = fl->fl_u.nfs_fl.owner->pid; 139 lock->fl.fl_start = fl->fl_start; 140 lock->fl.fl_end = fl->fl_end; 141 lock->fl.fl_type = fl->fl_type; 142 } 143 144 static void nlmclnt_release_lockargs(struct nlm_rqst *req) 145 { 146 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); 147 } 148 149 /** 150 * nlmclnt_proc - Perform a single client-side lock request 151 * @host: address of a valid nlm_host context representing the NLM server 152 * @cmd: fcntl-style file lock operation to perform 153 * @fl: address of arguments for the lock operation 154 * @data: address of data to be sent to callback operations 155 * 156 */ 157 int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data) 158 { 159 struct nlm_rqst *call; 160 int status; 161 const struct nlmclnt_operations *nlmclnt_ops = host->h_nlmclnt_ops; 162 163 call = nlm_alloc_call(host); 164 if (call == NULL) 165 return -ENOMEM; 166 167 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_alloc_call) 168 nlmclnt_ops->nlmclnt_alloc_call(data); 169 170 nlmclnt_locks_init_private(fl, host); 171 if (!fl->fl_u.nfs_fl.owner) { 172 /* lockowner allocation has failed */ 173 nlmclnt_release_call(call); 174 return -ENOMEM; 175 } 176 /* Set up the argument struct */ 177 nlmclnt_setlockargs(call, fl); 178 call->a_callback_data = data; 179 180 if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { 181 if (fl->fl_type != F_UNLCK) { 182 call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; 183 status = nlmclnt_lock(call, fl); 184 } else 185 status = nlmclnt_unlock(call, fl); 186 } else if (IS_GETLK(cmd)) 187 status = nlmclnt_test(call, fl); 188 else 189 status = -EINVAL; 190 fl->fl_ops->fl_release_private(fl); 191 fl->fl_ops = NULL; 192 193 dprintk("lockd: clnt proc returns %d\n", status); 194 return status; 195 } 196 EXPORT_SYMBOL_GPL(nlmclnt_proc); 197 198 /* 199 * Allocate an NLM RPC call struct 200 */ 201 struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) 202 { 203 struct nlm_rqst *call; 204 205 for(;;) { 206 call = kzalloc(sizeof(*call), GFP_KERNEL); 207 if (call != NULL) { 208 refcount_set(&call->a_count, 1); 209 locks_init_lock(&call->a_args.lock.fl); 210 locks_init_lock(&call->a_res.lock.fl); 211 call->a_host = nlm_get_host(host); 212 return call; 213 } 214 if (signalled()) 215 break; 216 printk("nlm_alloc_call: failed, waiting for memory\n"); 217 schedule_timeout_interruptible(5*HZ); 218 } 219 return NULL; 220 } 221 222 void nlmclnt_release_call(struct nlm_rqst *call) 223 { 224 const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops; 225 226 if (!refcount_dec_and_test(&call->a_count)) 227 return; 228 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_release_call) 229 nlmclnt_ops->nlmclnt_release_call(call->a_callback_data); 230 nlmclnt_release_host(call->a_host); 231 nlmclnt_release_lockargs(call); 232 kfree(call); 233 } 234 235 static void nlmclnt_rpc_release(void *data) 236 { 237 nlmclnt_release_call(data); 238 } 239 240 static int nlm_wait_on_grace(wait_queue_head_t *queue) 241 { 242 DEFINE_WAIT(wait); 243 int status = -EINTR; 244 245 prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); 246 if (!signalled ()) { 247 schedule_timeout(NLMCLNT_GRACE_WAIT); 248 try_to_freeze(); 249 if (!signalled ()) 250 status = 0; 251 } 252 finish_wait(queue, &wait); 253 return status; 254 } 255 256 /* 257 * Generic NLM call 258 */ 259 static int 260 nlmclnt_call(const struct cred *cred, struct nlm_rqst *req, u32 proc) 261 { 262 struct nlm_host *host = req->a_host; 263 struct rpc_clnt *clnt; 264 struct nlm_args *argp = &req->a_args; 265 struct nlm_res *resp = &req->a_res; 266 struct rpc_message msg = { 267 .rpc_argp = argp, 268 .rpc_resp = resp, 269 .rpc_cred = cred, 270 }; 271 int status; 272 273 dprintk("lockd: call procedure %d on %s\n", 274 (int)proc, host->h_name); 275 276 do { 277 if (host->h_reclaiming && !argp->reclaim) 278 goto in_grace_period; 279 280 /* If we have no RPC client yet, create one. */ 281 if ((clnt = nlm_bind_host(host)) == NULL) 282 return -ENOLCK; 283 msg.rpc_proc = &clnt->cl_procinfo[proc]; 284 285 /* Perform the RPC call. If an error occurs, try again */ 286 if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { 287 dprintk("lockd: rpc_call returned error %d\n", -status); 288 switch (status) { 289 case -EPROTONOSUPPORT: 290 status = -EINVAL; 291 break; 292 case -ECONNREFUSED: 293 case -ETIMEDOUT: 294 case -ENOTCONN: 295 nlm_rebind_host(host); 296 status = -EAGAIN; 297 break; 298 case -ERESTARTSYS: 299 return signalled () ? -EINTR : status; 300 default: 301 break; 302 } 303 break; 304 } else 305 if (resp->status == nlm_lck_denied_grace_period) { 306 dprintk("lockd: server in grace period\n"); 307 if (argp->reclaim) { 308 printk(KERN_WARNING 309 "lockd: spurious grace period reject?!\n"); 310 return -ENOLCK; 311 } 312 } else { 313 if (!argp->reclaim) { 314 /* We appear to be out of the grace period */ 315 wake_up_all(&host->h_gracewait); 316 } 317 dprintk("lockd: server returns status %d\n", 318 ntohl(resp->status)); 319 return 0; /* Okay, call complete */ 320 } 321 322 in_grace_period: 323 /* 324 * The server has rebooted and appears to be in the grace 325 * period during which locks are only allowed to be 326 * reclaimed. 327 * We can only back off and try again later. 328 */ 329 status = nlm_wait_on_grace(&host->h_gracewait); 330 } while (status == 0); 331 332 return status; 333 } 334 335 /* 336 * Generic NLM call, async version. 337 */ 338 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) 339 { 340 struct nlm_host *host = req->a_host; 341 struct rpc_clnt *clnt; 342 struct rpc_task_setup task_setup_data = { 343 .rpc_message = msg, 344 .callback_ops = tk_ops, 345 .callback_data = req, 346 .flags = RPC_TASK_ASYNC, 347 }; 348 349 dprintk("lockd: call procedure %d on %s (async)\n", 350 (int)proc, host->h_name); 351 352 /* If we have no RPC client yet, create one. */ 353 clnt = nlm_bind_host(host); 354 if (clnt == NULL) 355 goto out_err; 356 msg->rpc_proc = &clnt->cl_procinfo[proc]; 357 task_setup_data.rpc_client = clnt; 358 359 /* bootstrap and kick off the async RPC call */ 360 return rpc_run_task(&task_setup_data); 361 out_err: 362 tk_ops->rpc_release(req); 363 return ERR_PTR(-ENOLCK); 364 } 365 366 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) 367 { 368 struct rpc_task *task; 369 370 task = __nlm_async_call(req, proc, msg, tk_ops); 371 if (IS_ERR(task)) 372 return PTR_ERR(task); 373 rpc_put_task(task); 374 return 0; 375 } 376 377 /* 378 * NLM asynchronous call. 379 */ 380 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 381 { 382 struct rpc_message msg = { 383 .rpc_argp = &req->a_args, 384 .rpc_resp = &req->a_res, 385 }; 386 return nlm_do_async_call(req, proc, &msg, tk_ops); 387 } 388 389 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 390 { 391 struct rpc_message msg = { 392 .rpc_argp = &req->a_res, 393 }; 394 return nlm_do_async_call(req, proc, &msg, tk_ops); 395 } 396 397 /* 398 * NLM client asynchronous call. 399 * 400 * Note that although the calls are asynchronous, and are therefore 401 * guaranteed to complete, we still always attempt to wait for 402 * completion in order to be able to correctly track the lock 403 * state. 404 */ 405 static int nlmclnt_async_call(const struct cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) 406 { 407 struct rpc_message msg = { 408 .rpc_argp = &req->a_args, 409 .rpc_resp = &req->a_res, 410 .rpc_cred = cred, 411 }; 412 struct rpc_task *task; 413 int err; 414 415 task = __nlm_async_call(req, proc, &msg, tk_ops); 416 if (IS_ERR(task)) 417 return PTR_ERR(task); 418 err = rpc_wait_for_completion_task(task); 419 rpc_put_task(task); 420 return err; 421 } 422 423 /* 424 * TEST for the presence of a conflicting lock 425 */ 426 static int 427 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) 428 { 429 int status; 430 431 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); 432 if (status < 0) 433 goto out; 434 435 switch (req->a_res.status) { 436 case nlm_granted: 437 fl->fl_type = F_UNLCK; 438 break; 439 case nlm_lck_denied: 440 /* 441 * Report the conflicting lock back to the application. 442 */ 443 fl->fl_start = req->a_res.lock.fl.fl_start; 444 fl->fl_end = req->a_res.lock.fl.fl_end; 445 fl->fl_type = req->a_res.lock.fl.fl_type; 446 fl->fl_pid = -req->a_res.lock.fl.fl_pid; 447 break; 448 default: 449 status = nlm_stat_to_errno(req->a_res.status); 450 } 451 out: 452 nlmclnt_release_call(req); 453 return status; 454 } 455 456 static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) 457 { 458 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); 459 new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; 460 new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); 461 list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); 462 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); 463 } 464 465 static void nlmclnt_locks_release_private(struct file_lock *fl) 466 { 467 spin_lock(&fl->fl_u.nfs_fl.owner->host->h_lock); 468 list_del(&fl->fl_u.nfs_fl.list); 469 spin_unlock(&fl->fl_u.nfs_fl.owner->host->h_lock); 470 nlm_put_lockowner(fl->fl_u.nfs_fl.owner); 471 } 472 473 static const struct file_lock_operations nlmclnt_lock_ops = { 474 .fl_copy_lock = nlmclnt_locks_copy_lock, 475 .fl_release_private = nlmclnt_locks_release_private, 476 }; 477 478 static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) 479 { 480 fl->fl_u.nfs_fl.state = 0; 481 fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); 482 INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); 483 fl->fl_ops = &nlmclnt_lock_ops; 484 } 485 486 static int do_vfs_lock(struct file_lock *fl) 487 { 488 return locks_lock_file_wait(fl->fl_file, fl); 489 } 490 491 /* 492 * LOCK: Try to create a lock 493 * 494 * Programmer Harassment Alert 495 * 496 * When given a blocking lock request in a sync RPC call, the HPUX lockd 497 * will faithfully return LCK_BLOCKED but never cares to notify us when 498 * the lock could be granted. This way, our local process could hang 499 * around forever waiting for the callback. 500 * 501 * Solution A: Implement busy-waiting 502 * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) 503 * 504 * For now I am implementing solution A, because I hate the idea of 505 * re-implementing lockd for a third time in two months. The async 506 * calls shouldn't be too hard to do, however. 507 * 508 * This is one of the lovely things about standards in the NFS area: 509 * they're so soft and squishy you can't really blame HP for doing this. 510 */ 511 static int 512 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) 513 { 514 const struct cred *cred = nfs_file_cred(fl->fl_file); 515 struct nlm_host *host = req->a_host; 516 struct nlm_res *resp = &req->a_res; 517 struct nlm_wait *block = NULL; 518 unsigned char fl_flags = fl->fl_flags; 519 unsigned char fl_type; 520 int status = -ENOLCK; 521 522 if (nsm_monitor(host) < 0) 523 goto out; 524 req->a_args.state = nsm_local_state; 525 526 fl->fl_flags |= FL_ACCESS; 527 status = do_vfs_lock(fl); 528 fl->fl_flags = fl_flags; 529 if (status < 0) 530 goto out; 531 532 block = nlmclnt_prepare_block(host, fl); 533 again: 534 /* 535 * Initialise resp->status to a valid non-zero value, 536 * since 0 == nlm_lck_granted 537 */ 538 resp->status = nlm_lck_blocked; 539 for(;;) { 540 /* Reboot protection */ 541 fl->fl_u.nfs_fl.state = host->h_state; 542 status = nlmclnt_call(cred, req, NLMPROC_LOCK); 543 if (status < 0) 544 break; 545 /* Did a reclaimer thread notify us of a server reboot? */ 546 if (resp->status == nlm_lck_denied_grace_period) 547 continue; 548 if (resp->status != nlm_lck_blocked) 549 break; 550 /* Wait on an NLM blocking lock */ 551 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); 552 if (status < 0) 553 break; 554 if (resp->status != nlm_lck_blocked) 555 break; 556 } 557 558 /* if we were interrupted while blocking, then cancel the lock request 559 * and exit 560 */ 561 if (resp->status == nlm_lck_blocked) { 562 if (!req->a_args.block) 563 goto out_unlock; 564 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) 565 goto out_unblock; 566 } 567 568 if (resp->status == nlm_granted) { 569 down_read(&host->h_rwsem); 570 /* Check whether or not the server has rebooted */ 571 if (fl->fl_u.nfs_fl.state != host->h_state) { 572 up_read(&host->h_rwsem); 573 goto again; 574 } 575 /* Ensure the resulting lock will get added to granted list */ 576 fl->fl_flags |= FL_SLEEP; 577 if (do_vfs_lock(fl) < 0) 578 printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); 579 up_read(&host->h_rwsem); 580 fl->fl_flags = fl_flags; 581 status = 0; 582 } 583 if (status < 0) 584 goto out_unlock; 585 /* 586 * EAGAIN doesn't make sense for sleeping locks, and in some 587 * cases NLM_LCK_DENIED is returned for a permanent error. So 588 * turn it into an ENOLCK. 589 */ 590 if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) 591 status = -ENOLCK; 592 else 593 status = nlm_stat_to_errno(resp->status); 594 out_unblock: 595 nlmclnt_finish_block(block); 596 out: 597 nlmclnt_release_call(req); 598 return status; 599 out_unlock: 600 /* Fatal error: ensure that we remove the lock altogether */ 601 dprintk("lockd: lock attempt ended in fatal error.\n" 602 " Attempting to unlock.\n"); 603 nlmclnt_finish_block(block); 604 fl_type = fl->fl_type; 605 fl->fl_type = F_UNLCK; 606 down_read(&host->h_rwsem); 607 do_vfs_lock(fl); 608 up_read(&host->h_rwsem); 609 fl->fl_type = fl_type; 610 fl->fl_flags = fl_flags; 611 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); 612 return status; 613 } 614 615 /* 616 * RECLAIM: Try to reclaim a lock 617 */ 618 int 619 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl, 620 struct nlm_rqst *req) 621 { 622 int status; 623 624 memset(req, 0, sizeof(*req)); 625 locks_init_lock(&req->a_args.lock.fl); 626 locks_init_lock(&req->a_res.lock.fl); 627 req->a_host = host; 628 629 /* Set up the argument struct */ 630 nlmclnt_setlockargs(req, fl); 631 req->a_args.reclaim = 1; 632 633 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); 634 if (status >= 0 && req->a_res.status == nlm_granted) 635 return 0; 636 637 printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " 638 "(errno %d, status %d)\n", fl->fl_pid, 639 status, ntohl(req->a_res.status)); 640 641 /* 642 * FIXME: This is a serious failure. We can 643 * 644 * a. Ignore the problem 645 * b. Send the owning process some signal (Linux doesn't have 646 * SIGLOST, though...) 647 * c. Retry the operation 648 * 649 * Until someone comes up with a simple implementation 650 * for b or c, I'll choose option a. 651 */ 652 653 return -ENOLCK; 654 } 655 656 /* 657 * UNLOCK: remove an existing lock 658 */ 659 static int 660 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) 661 { 662 struct nlm_host *host = req->a_host; 663 struct nlm_res *resp = &req->a_res; 664 int status; 665 unsigned char fl_flags = fl->fl_flags; 666 667 /* 668 * Note: the server is supposed to either grant us the unlock 669 * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either 670 * case, we want to unlock. 671 */ 672 fl->fl_flags |= FL_EXISTS; 673 down_read(&host->h_rwsem); 674 status = do_vfs_lock(fl); 675 up_read(&host->h_rwsem); 676 fl->fl_flags = fl_flags; 677 if (status == -ENOENT) { 678 status = 0; 679 goto out; 680 } 681 682 refcount_inc(&req->a_count); 683 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, 684 NLMPROC_UNLOCK, &nlmclnt_unlock_ops); 685 if (status < 0) 686 goto out; 687 688 if (resp->status == nlm_granted) 689 goto out; 690 691 if (resp->status != nlm_lck_denied_nolocks) 692 printk("lockd: unexpected unlock status: %d\n", 693 ntohl(resp->status)); 694 /* What to do now? I'm out of my depth... */ 695 status = -ENOLCK; 696 out: 697 nlmclnt_release_call(req); 698 return status; 699 } 700 701 static void nlmclnt_unlock_prepare(struct rpc_task *task, void *data) 702 { 703 struct nlm_rqst *req = data; 704 const struct nlmclnt_operations *nlmclnt_ops = req->a_host->h_nlmclnt_ops; 705 bool defer_call = false; 706 707 if (nlmclnt_ops && nlmclnt_ops->nlmclnt_unlock_prepare) 708 defer_call = nlmclnt_ops->nlmclnt_unlock_prepare(task, req->a_callback_data); 709 710 if (!defer_call) 711 rpc_call_start(task); 712 } 713 714 static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) 715 { 716 struct nlm_rqst *req = data; 717 u32 status = ntohl(req->a_res.status); 718 719 if (RPC_SIGNALLED(task)) 720 goto die; 721 722 if (task->tk_status < 0) { 723 dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); 724 switch (task->tk_status) { 725 case -EACCES: 726 case -EIO: 727 goto die; 728 default: 729 goto retry_rebind; 730 } 731 } 732 if (status == NLM_LCK_DENIED_GRACE_PERIOD) { 733 rpc_delay(task, NLMCLNT_GRACE_WAIT); 734 goto retry_unlock; 735 } 736 if (status != NLM_LCK_GRANTED) 737 printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); 738 die: 739 return; 740 retry_rebind: 741 nlm_rebind_host(req->a_host); 742 retry_unlock: 743 rpc_restart_call(task); 744 } 745 746 static const struct rpc_call_ops nlmclnt_unlock_ops = { 747 .rpc_call_prepare = nlmclnt_unlock_prepare, 748 .rpc_call_done = nlmclnt_unlock_callback, 749 .rpc_release = nlmclnt_rpc_release, 750 }; 751 752 /* 753 * Cancel a blocked lock request. 754 * We always use an async RPC call for this in order not to hang a 755 * process that has been Ctrl-C'ed. 756 */ 757 static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) 758 { 759 struct nlm_rqst *req; 760 int status; 761 762 dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" 763 " Attempting to cancel lock.\n"); 764 765 req = nlm_alloc_call(host); 766 if (!req) 767 return -ENOMEM; 768 req->a_flags = RPC_TASK_ASYNC; 769 770 nlmclnt_setlockargs(req, fl); 771 req->a_args.block = block; 772 773 refcount_inc(&req->a_count); 774 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, 775 NLMPROC_CANCEL, &nlmclnt_cancel_ops); 776 if (status == 0 && req->a_res.status == nlm_lck_denied) 777 status = -ENOLCK; 778 nlmclnt_release_call(req); 779 return status; 780 } 781 782 static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) 783 { 784 struct nlm_rqst *req = data; 785 u32 status = ntohl(req->a_res.status); 786 787 if (RPC_SIGNALLED(task)) 788 goto die; 789 790 if (task->tk_status < 0) { 791 dprintk("lockd: CANCEL call error %d, retrying.\n", 792 task->tk_status); 793 goto retry_cancel; 794 } 795 796 dprintk("lockd: cancel status %u (task %u)\n", 797 status, task->tk_pid); 798 799 switch (status) { 800 case NLM_LCK_GRANTED: 801 case NLM_LCK_DENIED_GRACE_PERIOD: 802 case NLM_LCK_DENIED: 803 /* Everything's good */ 804 break; 805 case NLM_LCK_DENIED_NOLOCKS: 806 dprintk("lockd: CANCEL failed (server has no locks)\n"); 807 goto retry_cancel; 808 default: 809 printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", 810 status); 811 } 812 813 die: 814 return; 815 816 retry_cancel: 817 /* Don't ever retry more than 3 times */ 818 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) 819 goto die; 820 nlm_rebind_host(req->a_host); 821 rpc_restart_call(task); 822 rpc_delay(task, 30 * HZ); 823 } 824 825 static const struct rpc_call_ops nlmclnt_cancel_ops = { 826 .rpc_call_done = nlmclnt_cancel_callback, 827 .rpc_release = nlmclnt_rpc_release, 828 }; 829 830 /* 831 * Convert an NLM status code to a generic kernel errno 832 */ 833 static int 834 nlm_stat_to_errno(__be32 status) 835 { 836 switch(ntohl(status)) { 837 case NLM_LCK_GRANTED: 838 return 0; 839 case NLM_LCK_DENIED: 840 return -EAGAIN; 841 case NLM_LCK_DENIED_NOLOCKS: 842 case NLM_LCK_DENIED_GRACE_PERIOD: 843 return -ENOLCK; 844 case NLM_LCK_BLOCKED: 845 printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); 846 return -ENOLCK; 847 #ifdef CONFIG_LOCKD_V4 848 case NLM_DEADLCK: 849 return -EDEADLK; 850 case NLM_ROFS: 851 return -EROFS; 852 case NLM_STALE_FH: 853 return -ESTALE; 854 case NLM_FBIG: 855 return -EOVERFLOW; 856 case NLM_FAILED: 857 return -ENOLCK; 858 #endif 859 } 860 printk(KERN_NOTICE "lockd: unexpected server status %d\n", 861 ntohl(status)); 862 return -ENOLCK; 863 } 864