1 /* AFS file locking support 2 * 3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include "internal.h" 13 14 #define AFS_LOCK_GRANTED 0 15 #define AFS_LOCK_PENDING 1 16 17 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl); 18 static void afs_fl_release_private(struct file_lock *fl); 19 20 static struct workqueue_struct *afs_lock_manager; 21 static DEFINE_MUTEX(afs_lock_manager_mutex); 22 23 static const struct file_lock_operations afs_lock_ops = { 24 .fl_copy_lock = afs_fl_copy_lock, 25 .fl_release_private = afs_fl_release_private, 26 }; 27 28 /* 29 * initialise the lock manager thread if it isn't already running 30 */ 31 static int afs_init_lock_manager(void) 32 { 33 int ret; 34 35 ret = 0; 36 if (!afs_lock_manager) { 37 mutex_lock(&afs_lock_manager_mutex); 38 if (!afs_lock_manager) { 39 afs_lock_manager = 40 create_singlethread_workqueue("kafs_lockd"); 41 if (!afs_lock_manager) 42 ret = -ENOMEM; 43 } 44 mutex_unlock(&afs_lock_manager_mutex); 45 } 46 return ret; 47 } 48 49 /* 50 * destroy the lock manager thread if it's running 51 */ 52 void __exit afs_kill_lock_manager(void) 53 { 54 if (afs_lock_manager) 55 destroy_workqueue(afs_lock_manager); 56 } 57 58 /* 59 * if the callback is broken on this vnode, then the lock may now be available 60 */ 61 void afs_lock_may_be_available(struct afs_vnode *vnode) 62 { 63 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); 64 65 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0); 66 } 67 68 /* 69 * the lock will time out in 5 minutes unless we extend it, so schedule 70 * extension in a bit less than that time 71 */ 72 static void afs_schedule_lock_extension(struct afs_vnode *vnode) 73 { 74 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 75 AFS_LOCKWAIT * HZ / 2); 76 } 77 78 /* 79 * grant one or more locks (readlocks are allowed to jump the queue if the 80 * first lock in the queue is itself a readlock) 81 * - the caller must hold the vnode lock 82 */ 83 static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl) 84 { 85 struct file_lock *p, *_p; 86 87 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); 88 if (fl->fl_type == F_RDLCK) { 89 list_for_each_entry_safe(p, _p, &vnode->pending_locks, 90 fl_u.afs.link) { 91 if (p->fl_type == F_RDLCK) { 92 p->fl_u.afs.state = AFS_LOCK_GRANTED; 93 list_move_tail(&p->fl_u.afs.link, 94 &vnode->granted_locks); 95 wake_up(&p->fl_wait); 96 } 97 } 98 } 99 } 100 101 /* 102 * do work for a lock, including: 103 * - probing for a lock we're waiting on but didn't get immediately 104 * - extending a lock that's close to timing out 105 */ 106 void afs_lock_work(struct work_struct *work) 107 { 108 struct afs_vnode *vnode = 109 container_of(work, struct afs_vnode, lock_work.work); 110 struct file_lock *fl; 111 afs_lock_type_t type; 112 struct key *key; 113 int ret; 114 115 _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode); 116 117 spin_lock(&vnode->lock); 118 119 if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) { 120 _debug("unlock"); 121 spin_unlock(&vnode->lock); 122 123 /* attempt to release the server lock; if it fails, we just 124 * wait 5 minutes and it'll time out anyway */ 125 ret = afs_vnode_release_lock(vnode, vnode->unlock_key); 126 if (ret < 0) 127 printk(KERN_WARNING "AFS:" 128 " Failed to release lock on {%x:%x} error %d\n", 129 vnode->fid.vid, vnode->fid.vnode, ret); 130 131 spin_lock(&vnode->lock); 132 key_put(vnode->unlock_key); 133 vnode->unlock_key = NULL; 134 clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags); 135 } 136 137 /* if we've got a lock, then it must be time to extend that lock as AFS 138 * locks time out after 5 minutes */ 139 if (!list_empty(&vnode->granted_locks)) { 140 _debug("extend"); 141 142 if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) 143 BUG(); 144 fl = list_entry(vnode->granted_locks.next, 145 struct file_lock, fl_u.afs.link); 146 key = key_get(fl->fl_file->private_data); 147 spin_unlock(&vnode->lock); 148 149 ret = afs_vnode_extend_lock(vnode, key); 150 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 151 key_put(key); 152 switch (ret) { 153 case 0: 154 afs_schedule_lock_extension(vnode); 155 break; 156 default: 157 /* ummm... we failed to extend the lock - retry 158 * extension shortly */ 159 printk(KERN_WARNING "AFS:" 160 " Failed to extend lock on {%x:%x} error %d\n", 161 vnode->fid.vid, vnode->fid.vnode, ret); 162 queue_delayed_work(afs_lock_manager, &vnode->lock_work, 163 HZ * 10); 164 break; 165 } 166 _leave(" [extend]"); 167 return; 168 } 169 170 /* if we don't have a granted lock, then we must've been called back by 171 * the server, and so if might be possible to get a lock we're 172 * currently waiting for */ 173 if (!list_empty(&vnode->pending_locks)) { 174 _debug("get"); 175 176 if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags)) 177 BUG(); 178 fl = list_entry(vnode->pending_locks.next, 179 struct file_lock, fl_u.afs.link); 180 key = key_get(fl->fl_file->private_data); 181 type = (fl->fl_type == F_RDLCK) ? 182 AFS_LOCK_READ : AFS_LOCK_WRITE; 183 spin_unlock(&vnode->lock); 184 185 ret = afs_vnode_set_lock(vnode, key, type); 186 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 187 switch (ret) { 188 case -EWOULDBLOCK: 189 _debug("blocked"); 190 break; 191 case 0: 192 _debug("acquired"); 193 if (type == AFS_LOCK_READ) 194 set_bit(AFS_VNODE_READLOCKED, &vnode->flags); 195 else 196 set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 197 ret = AFS_LOCK_GRANTED; 198 default: 199 spin_lock(&vnode->lock); 200 /* the pending lock may have been withdrawn due to a 201 * signal */ 202 if (list_entry(vnode->pending_locks.next, 203 struct file_lock, fl_u.afs.link) == fl) { 204 fl->fl_u.afs.state = ret; 205 if (ret == AFS_LOCK_GRANTED) 206 afs_grant_locks(vnode, fl); 207 else 208 list_del_init(&fl->fl_u.afs.link); 209 wake_up(&fl->fl_wait); 210 spin_unlock(&vnode->lock); 211 } else { 212 _debug("withdrawn"); 213 clear_bit(AFS_VNODE_READLOCKED, &vnode->flags); 214 clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 215 spin_unlock(&vnode->lock); 216 afs_vnode_release_lock(vnode, key); 217 if (!list_empty(&vnode->pending_locks)) 218 afs_lock_may_be_available(vnode); 219 } 220 break; 221 } 222 key_put(key); 223 _leave(" [pend]"); 224 return; 225 } 226 227 /* looks like the lock request was withdrawn on a signal */ 228 spin_unlock(&vnode->lock); 229 _leave(" [no locks]"); 230 } 231 232 /* 233 * pass responsibility for the unlocking of a vnode on the server to the 234 * manager thread, lest a pending signal in the calling thread interrupt 235 * AF_RXRPC 236 * - the caller must hold the vnode lock 237 */ 238 static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key) 239 { 240 cancel_delayed_work(&vnode->lock_work); 241 if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) && 242 !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags)) 243 BUG(); 244 if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) 245 BUG(); 246 vnode->unlock_key = key_get(key); 247 afs_lock_may_be_available(vnode); 248 } 249 250 /* 251 * request a lock on a file on the server 252 */ 253 static int afs_do_setlk(struct file *file, struct file_lock *fl) 254 { 255 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 256 afs_lock_type_t type; 257 struct key *key = file->private_data; 258 int ret; 259 260 _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 261 262 /* only whole-file locks are supported */ 263 if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) 264 return -EINVAL; 265 266 ret = afs_init_lock_manager(); 267 if (ret < 0) 268 return ret; 269 270 fl->fl_ops = &afs_lock_ops; 271 INIT_LIST_HEAD(&fl->fl_u.afs.link); 272 fl->fl_u.afs.state = AFS_LOCK_PENDING; 273 274 type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE; 275 276 lock_flocks(); 277 278 /* make sure we've got a callback on this file and that our view of the 279 * data version is up to date */ 280 ret = afs_vnode_fetch_status(vnode, NULL, key); 281 if (ret < 0) 282 goto error; 283 284 if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) { 285 ret = -EAGAIN; 286 goto error; 287 } 288 289 spin_lock(&vnode->lock); 290 291 /* if we've already got a readlock on the server then we can instantly 292 * grant another readlock, irrespective of whether there are any 293 * pending writelocks */ 294 if (type == AFS_LOCK_READ && 295 vnode->flags & (1 << AFS_VNODE_READLOCKED)) { 296 _debug("instant readlock"); 297 ASSERTCMP(vnode->flags & 298 ((1 << AFS_VNODE_LOCKING) | 299 (1 << AFS_VNODE_WRITELOCKED)), ==, 0); 300 ASSERT(!list_empty(&vnode->granted_locks)); 301 goto sharing_existing_lock; 302 } 303 304 /* if there's no-one else with a lock on this vnode, then we need to 305 * ask the server for a lock */ 306 if (list_empty(&vnode->pending_locks) && 307 list_empty(&vnode->granted_locks)) { 308 _debug("not locked"); 309 ASSERTCMP(vnode->flags & 310 ((1 << AFS_VNODE_LOCKING) | 311 (1 << AFS_VNODE_READLOCKED) | 312 (1 << AFS_VNODE_WRITELOCKED)), ==, 0); 313 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 314 set_bit(AFS_VNODE_LOCKING, &vnode->flags); 315 spin_unlock(&vnode->lock); 316 317 ret = afs_vnode_set_lock(vnode, key, type); 318 clear_bit(AFS_VNODE_LOCKING, &vnode->flags); 319 switch (ret) { 320 case 0: 321 _debug("acquired"); 322 goto acquired_server_lock; 323 case -EWOULDBLOCK: 324 _debug("would block"); 325 spin_lock(&vnode->lock); 326 ASSERT(list_empty(&vnode->granted_locks)); 327 ASSERTCMP(vnode->pending_locks.next, ==, 328 &fl->fl_u.afs.link); 329 goto wait; 330 default: 331 spin_lock(&vnode->lock); 332 list_del_init(&fl->fl_u.afs.link); 333 spin_unlock(&vnode->lock); 334 goto error; 335 } 336 } 337 338 /* otherwise, we need to wait for a local lock to become available */ 339 _debug("wait local"); 340 list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks); 341 wait: 342 if (!(fl->fl_flags & FL_SLEEP)) { 343 _debug("noblock"); 344 ret = -EAGAIN; 345 goto abort_attempt; 346 } 347 spin_unlock(&vnode->lock); 348 349 /* now we need to sleep and wait for the lock manager thread to get the 350 * lock from the server */ 351 _debug("sleep"); 352 ret = wait_event_interruptible(fl->fl_wait, 353 fl->fl_u.afs.state <= AFS_LOCK_GRANTED); 354 if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { 355 ret = fl->fl_u.afs.state; 356 if (ret < 0) 357 goto error; 358 spin_lock(&vnode->lock); 359 goto given_lock; 360 } 361 362 /* we were interrupted, but someone may still be in the throes of 363 * giving us the lock */ 364 _debug("intr"); 365 ASSERTCMP(ret, ==, -ERESTARTSYS); 366 367 spin_lock(&vnode->lock); 368 if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) { 369 ret = fl->fl_u.afs.state; 370 if (ret < 0) { 371 spin_unlock(&vnode->lock); 372 goto error; 373 } 374 goto given_lock; 375 } 376 377 abort_attempt: 378 /* we aren't going to get the lock, either because we're unwilling to 379 * wait, or because some signal happened */ 380 _debug("abort"); 381 if (list_empty(&vnode->granted_locks) && 382 vnode->pending_locks.next == &fl->fl_u.afs.link) { 383 if (vnode->pending_locks.prev != &fl->fl_u.afs.link) { 384 /* kick the next pending lock into having a go */ 385 list_del_init(&fl->fl_u.afs.link); 386 afs_lock_may_be_available(vnode); 387 } 388 } else { 389 list_del_init(&fl->fl_u.afs.link); 390 } 391 spin_unlock(&vnode->lock); 392 goto error; 393 394 acquired_server_lock: 395 /* we've acquired a server lock, but it needs to be renewed after 5 396 * mins */ 397 spin_lock(&vnode->lock); 398 afs_schedule_lock_extension(vnode); 399 if (type == AFS_LOCK_READ) 400 set_bit(AFS_VNODE_READLOCKED, &vnode->flags); 401 else 402 set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags); 403 sharing_existing_lock: 404 /* the lock has been granted as far as we're concerned... */ 405 fl->fl_u.afs.state = AFS_LOCK_GRANTED; 406 list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks); 407 given_lock: 408 /* ... but we do still need to get the VFS's blessing */ 409 ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING))); 410 ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) | 411 (1 << AFS_VNODE_WRITELOCKED))) != 0); 412 ret = posix_lock_file(file, fl, NULL); 413 if (ret < 0) 414 goto vfs_rejected_lock; 415 spin_unlock(&vnode->lock); 416 417 /* again, make sure we've got a callback on this file and, again, make 418 * sure that our view of the data version is up to date (we ignore 419 * errors incurred here and deal with the consequences elsewhere) */ 420 afs_vnode_fetch_status(vnode, NULL, key); 421 422 error: 423 unlock_flocks(); 424 _leave(" = %d", ret); 425 return ret; 426 427 vfs_rejected_lock: 428 /* the VFS rejected the lock we just obtained, so we have to discard 429 * what we just got */ 430 _debug("vfs refused %d", ret); 431 list_del_init(&fl->fl_u.afs.link); 432 if (list_empty(&vnode->granted_locks)) 433 afs_defer_unlock(vnode, key); 434 goto abort_attempt; 435 } 436 437 /* 438 * unlock on a file on the server 439 */ 440 static int afs_do_unlk(struct file *file, struct file_lock *fl) 441 { 442 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 443 struct key *key = file->private_data; 444 int ret; 445 446 _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type); 447 448 /* only whole-file unlocks are supported */ 449 if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX) 450 return -EINVAL; 451 452 fl->fl_ops = &afs_lock_ops; 453 INIT_LIST_HEAD(&fl->fl_u.afs.link); 454 fl->fl_u.afs.state = AFS_LOCK_PENDING; 455 456 spin_lock(&vnode->lock); 457 ret = posix_lock_file(file, fl, NULL); 458 if (ret < 0) { 459 spin_unlock(&vnode->lock); 460 _leave(" = %d [vfs]", ret); 461 return ret; 462 } 463 464 /* discard the server lock only if all granted locks are gone */ 465 if (list_empty(&vnode->granted_locks)) 466 afs_defer_unlock(vnode, key); 467 spin_unlock(&vnode->lock); 468 _leave(" = 0"); 469 return 0; 470 } 471 472 /* 473 * return information about a lock we currently hold, if indeed we hold one 474 */ 475 static int afs_do_getlk(struct file *file, struct file_lock *fl) 476 { 477 struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host); 478 struct key *key = file->private_data; 479 int ret, lock_count; 480 481 _enter(""); 482 483 fl->fl_type = F_UNLCK; 484 485 mutex_lock(&vnode->vfs_inode.i_mutex); 486 487 /* check local lock records first */ 488 ret = 0; 489 posix_test_lock(file, fl); 490 if (fl->fl_type == F_UNLCK) { 491 /* no local locks; consult the server */ 492 ret = afs_vnode_fetch_status(vnode, NULL, key); 493 if (ret < 0) 494 goto error; 495 lock_count = vnode->status.lock_count; 496 if (lock_count) { 497 if (lock_count > 0) 498 fl->fl_type = F_RDLCK; 499 else 500 fl->fl_type = F_WRLCK; 501 fl->fl_start = 0; 502 fl->fl_end = OFFSET_MAX; 503 } 504 } 505 506 error: 507 mutex_unlock(&vnode->vfs_inode.i_mutex); 508 _leave(" = %d [%hd]", ret, fl->fl_type); 509 return ret; 510 } 511 512 /* 513 * manage POSIX locks on a file 514 */ 515 int afs_lock(struct file *file, int cmd, struct file_lock *fl) 516 { 517 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); 518 519 _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}", 520 vnode->fid.vid, vnode->fid.vnode, cmd, 521 fl->fl_type, fl->fl_flags, 522 (long long) fl->fl_start, (long long) fl->fl_end); 523 524 /* AFS doesn't support mandatory locks */ 525 if (__mandatory_lock(&vnode->vfs_inode) && fl->fl_type != F_UNLCK) 526 return -ENOLCK; 527 528 if (IS_GETLK(cmd)) 529 return afs_do_getlk(file, fl); 530 if (fl->fl_type == F_UNLCK) 531 return afs_do_unlk(file, fl); 532 return afs_do_setlk(file, fl); 533 } 534 535 /* 536 * manage FLOCK locks on a file 537 */ 538 int afs_flock(struct file *file, int cmd, struct file_lock *fl) 539 { 540 struct afs_vnode *vnode = AFS_FS_I(file->f_dentry->d_inode); 541 542 _enter("{%x:%u},%d,{t=%x,fl=%x}", 543 vnode->fid.vid, vnode->fid.vnode, cmd, 544 fl->fl_type, fl->fl_flags); 545 546 /* 547 * No BSD flocks over NFS allowed. 548 * Note: we could try to fake a POSIX lock request here by 549 * using ((u32) filp | 0x80000000) or some such as the pid. 550 * Not sure whether that would be unique, though, or whether 551 * that would break in other places. 552 */ 553 if (!(fl->fl_flags & FL_FLOCK)) 554 return -ENOLCK; 555 556 /* we're simulating flock() locks using posix locks on the server */ 557 fl->fl_owner = (fl_owner_t) file; 558 fl->fl_start = 0; 559 fl->fl_end = OFFSET_MAX; 560 561 if (fl->fl_type == F_UNLCK) 562 return afs_do_unlk(file, fl); 563 return afs_do_setlk(file, fl); 564 } 565 566 /* 567 * the POSIX lock management core VFS code copies the lock record and adds the 568 * copy into its own list, so we need to add that copy to the vnode's lock 569 * queue in the same place as the original (which will be deleted shortly 570 * after) 571 */ 572 static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl) 573 { 574 _enter(""); 575 576 list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link); 577 } 578 579 /* 580 * need to remove this lock from the vnode queue when it's removed from the 581 * VFS's list 582 */ 583 static void afs_fl_release_private(struct file_lock *fl) 584 { 585 _enter(""); 586 587 list_del_init(&fl->fl_u.afs.link); 588 } 589