1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dlmglue.c 5 * 6 * Code which implements an OCFS2 specific interface to our DLM. 7 * 8 * Copyright (C) 2003, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/types.h> 27 #include <linux/slab.h> 28 #include <linux/highmem.h> 29 #include <linux/mm.h> 30 #include <linux/kthread.h> 31 #include <linux/pagemap.h> 32 #include <linux/debugfs.h> 33 #include <linux/seq_file.h> 34 #include <linux/time.h> 35 #include <linux/quotaops.h> 36 37 #define MLOG_MASK_PREFIX ML_DLM_GLUE 38 #include <cluster/masklog.h> 39 40 #include "ocfs2.h" 41 #include "ocfs2_lockingver.h" 42 43 #include "alloc.h" 44 #include "dcache.h" 45 #include "dlmglue.h" 46 #include "extent_map.h" 47 #include "file.h" 48 #include "heartbeat.h" 49 #include "inode.h" 50 #include "journal.h" 51 #include "stackglue.h" 52 #include "slot_map.h" 53 #include "super.h" 54 #include "uptodate.h" 55 #include "quota.h" 56 57 #include "buffer_head_io.h" 58 59 struct ocfs2_mask_waiter { 60 struct list_head mw_item; 61 int mw_status; 62 struct completion mw_complete; 63 unsigned long mw_mask; 64 unsigned long mw_goal; 65 #ifdef CONFIG_OCFS2_FS_STATS 66 unsigned long long mw_lock_start; 67 #endif 68 }; 69 70 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres); 71 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres); 72 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres); 73 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres); 74 75 /* 76 * Return value from ->downconvert_worker functions. 77 * 78 * These control the precise actions of ocfs2_unblock_lock() 79 * and ocfs2_process_blocked_lock() 80 * 81 */ 82 enum ocfs2_unblock_action { 83 UNBLOCK_CONTINUE = 0, /* Continue downconvert */ 84 UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire 85 * ->post_unlock callback */ 86 UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire 87 * ->post_unlock() callback. */ 88 }; 89 90 struct ocfs2_unblock_ctl { 91 int requeue; 92 enum ocfs2_unblock_action unblock_action; 93 }; 94 95 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, 96 int new_level); 97 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres); 98 99 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, 100 int blocking); 101 102 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, 103 int blocking); 104 105 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, 106 struct ocfs2_lock_res *lockres); 107 108 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres); 109 110 #define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres) 111 112 /* This aids in debugging situations where a bad LVB might be involved. */ 113 static void ocfs2_dump_meta_lvb_info(u64 level, 114 const char *function, 115 unsigned int line, 116 struct ocfs2_lock_res *lockres) 117 { 118 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 119 120 mlog(level, "LVB information for %s (called from %s:%u):\n", 121 lockres->l_name, function, line); 122 mlog(level, "version: %u, clusters: %u, generation: 0x%x\n", 123 lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters), 124 be32_to_cpu(lvb->lvb_igeneration)); 125 mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n", 126 (unsigned long long)be64_to_cpu(lvb->lvb_isize), 127 be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid), 128 be16_to_cpu(lvb->lvb_imode)); 129 mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, " 130 "mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink), 131 (long long)be64_to_cpu(lvb->lvb_iatime_packed), 132 (long long)be64_to_cpu(lvb->lvb_ictime_packed), 133 (long long)be64_to_cpu(lvb->lvb_imtime_packed), 134 be32_to_cpu(lvb->lvb_iattr)); 135 } 136 137 138 /* 139 * OCFS2 Lock Resource Operations 140 * 141 * These fine tune the behavior of the generic dlmglue locking infrastructure. 142 * 143 * The most basic of lock types can point ->l_priv to their respective 144 * struct ocfs2_super and allow the default actions to manage things. 145 * 146 * Right now, each lock type also needs to implement an init function, 147 * and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres() 148 * should be called when the lock is no longer needed (i.e., object 149 * destruction time). 150 */ 151 struct ocfs2_lock_res_ops { 152 /* 153 * Translate an ocfs2_lock_res * into an ocfs2_super *. Define 154 * this callback if ->l_priv is not an ocfs2_super pointer 155 */ 156 struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *); 157 158 /* 159 * Optionally called in the downconvert thread after a 160 * successful downconvert. The lockres will not be referenced 161 * after this callback is called, so it is safe to free 162 * memory, etc. 163 * 164 * The exact semantics of when this is called are controlled 165 * by ->downconvert_worker() 166 */ 167 void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *); 168 169 /* 170 * Allow a lock type to add checks to determine whether it is 171 * safe to downconvert a lock. Return 0 to re-queue the 172 * downconvert at a later time, nonzero to continue. 173 * 174 * For most locks, the default checks that there are no 175 * incompatible holders are sufficient. 176 * 177 * Called with the lockres spinlock held. 178 */ 179 int (*check_downconvert)(struct ocfs2_lock_res *, int); 180 181 /* 182 * Allows a lock type to populate the lock value block. This 183 * is called on downconvert, and when we drop a lock. 184 * 185 * Locks that want to use this should set LOCK_TYPE_USES_LVB 186 * in the flags field. 187 * 188 * Called with the lockres spinlock held. 189 */ 190 void (*set_lvb)(struct ocfs2_lock_res *); 191 192 /* 193 * Called from the downconvert thread when it is determined 194 * that a lock will be downconverted. This is called without 195 * any locks held so the function can do work that might 196 * schedule (syncing out data, etc). 197 * 198 * This should return any one of the ocfs2_unblock_action 199 * values, depending on what it wants the thread to do. 200 */ 201 int (*downconvert_worker)(struct ocfs2_lock_res *, int); 202 203 /* 204 * LOCK_TYPE_* flags which describe the specific requirements 205 * of a lock type. Descriptions of each individual flag follow. 206 */ 207 int flags; 208 }; 209 210 /* 211 * Some locks want to "refresh" potentially stale data when a 212 * meaningful (PRMODE or EXMODE) lock level is first obtained. If this 213 * flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the 214 * individual lockres l_flags member from the ast function. It is 215 * expected that the locking wrapper will clear the 216 * OCFS2_LOCK_NEEDS_REFRESH flag when done. 217 */ 218 #define LOCK_TYPE_REQUIRES_REFRESH 0x1 219 220 /* 221 * Indicate that a lock type makes use of the lock value block. The 222 * ->set_lvb lock type callback must be defined. 223 */ 224 #define LOCK_TYPE_USES_LVB 0x2 225 226 static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = { 227 .get_osb = ocfs2_get_inode_osb, 228 .flags = 0, 229 }; 230 231 static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = { 232 .get_osb = ocfs2_get_inode_osb, 233 .check_downconvert = ocfs2_check_meta_downconvert, 234 .set_lvb = ocfs2_set_meta_lvb, 235 .downconvert_worker = ocfs2_data_convert_worker, 236 .flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB, 237 }; 238 239 static struct ocfs2_lock_res_ops ocfs2_super_lops = { 240 .flags = LOCK_TYPE_REQUIRES_REFRESH, 241 }; 242 243 static struct ocfs2_lock_res_ops ocfs2_rename_lops = { 244 .flags = 0, 245 }; 246 247 static struct ocfs2_lock_res_ops ocfs2_dentry_lops = { 248 .get_osb = ocfs2_get_dentry_osb, 249 .post_unlock = ocfs2_dentry_post_unlock, 250 .downconvert_worker = ocfs2_dentry_convert_worker, 251 .flags = 0, 252 }; 253 254 static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = { 255 .get_osb = ocfs2_get_inode_osb, 256 .flags = 0, 257 }; 258 259 static struct ocfs2_lock_res_ops ocfs2_flock_lops = { 260 .get_osb = ocfs2_get_file_osb, 261 .flags = 0, 262 }; 263 264 static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = { 265 .set_lvb = ocfs2_set_qinfo_lvb, 266 .get_osb = ocfs2_get_qinfo_osb, 267 .flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB, 268 }; 269 270 static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres) 271 { 272 return lockres->l_type == OCFS2_LOCK_TYPE_META || 273 lockres->l_type == OCFS2_LOCK_TYPE_RW || 274 lockres->l_type == OCFS2_LOCK_TYPE_OPEN; 275 } 276 277 static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres) 278 { 279 BUG_ON(!ocfs2_is_inode_lock(lockres)); 280 281 return (struct inode *) lockres->l_priv; 282 } 283 284 static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres) 285 { 286 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY); 287 288 return (struct ocfs2_dentry_lock *)lockres->l_priv; 289 } 290 291 static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres) 292 { 293 BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO); 294 295 return (struct ocfs2_mem_dqinfo *)lockres->l_priv; 296 } 297 298 static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres) 299 { 300 if (lockres->l_ops->get_osb) 301 return lockres->l_ops->get_osb(lockres); 302 303 return (struct ocfs2_super *)lockres->l_priv; 304 } 305 306 static int ocfs2_lock_create(struct ocfs2_super *osb, 307 struct ocfs2_lock_res *lockres, 308 int level, 309 u32 dlm_flags); 310 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, 311 int wanted); 312 static void ocfs2_cluster_unlock(struct ocfs2_super *osb, 313 struct ocfs2_lock_res *lockres, 314 int level); 315 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres); 316 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres); 317 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres); 318 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level); 319 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, 320 struct ocfs2_lock_res *lockres); 321 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, 322 int convert); 323 #define ocfs2_log_dlm_error(_func, _err, _lockres) do { \ 324 if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \ 325 mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \ 326 _err, _func, _lockres->l_name); \ 327 else \ 328 mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \ 329 _err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \ 330 (unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \ 331 } while (0) 332 static int ocfs2_downconvert_thread(void *arg); 333 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, 334 struct ocfs2_lock_res *lockres); 335 static int ocfs2_inode_lock_update(struct inode *inode, 336 struct buffer_head **bh); 337 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb); 338 static inline int ocfs2_highest_compat_lock_level(int level); 339 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, 340 int new_level); 341 static int ocfs2_downconvert_lock(struct ocfs2_super *osb, 342 struct ocfs2_lock_res *lockres, 343 int new_level, 344 int lvb, 345 unsigned int generation); 346 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, 347 struct ocfs2_lock_res *lockres); 348 static int ocfs2_cancel_convert(struct ocfs2_super *osb, 349 struct ocfs2_lock_res *lockres); 350 351 352 static void ocfs2_build_lock_name(enum ocfs2_lock_type type, 353 u64 blkno, 354 u32 generation, 355 char *name) 356 { 357 int len; 358 359 mlog_entry_void(); 360 361 BUG_ON(type >= OCFS2_NUM_LOCK_TYPES); 362 363 len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x", 364 ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD, 365 (long long)blkno, generation); 366 367 BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1)); 368 369 mlog(0, "built lock resource with name: %s\n", name); 370 371 mlog_exit_void(); 372 } 373 374 static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock); 375 376 static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res, 377 struct ocfs2_dlm_debug *dlm_debug) 378 { 379 mlog(0, "Add tracking for lockres %s\n", res->l_name); 380 381 spin_lock(&ocfs2_dlm_tracking_lock); 382 list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking); 383 spin_unlock(&ocfs2_dlm_tracking_lock); 384 } 385 386 static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res) 387 { 388 spin_lock(&ocfs2_dlm_tracking_lock); 389 if (!list_empty(&res->l_debug_list)) 390 list_del_init(&res->l_debug_list); 391 spin_unlock(&ocfs2_dlm_tracking_lock); 392 } 393 394 #ifdef CONFIG_OCFS2_FS_STATS 395 static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 396 { 397 res->l_lock_num_prmode = 0; 398 res->l_lock_num_prmode_failed = 0; 399 res->l_lock_total_prmode = 0; 400 res->l_lock_max_prmode = 0; 401 res->l_lock_num_exmode = 0; 402 res->l_lock_num_exmode_failed = 0; 403 res->l_lock_total_exmode = 0; 404 res->l_lock_max_exmode = 0; 405 res->l_lock_refresh = 0; 406 } 407 408 static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level, 409 struct ocfs2_mask_waiter *mw, int ret) 410 { 411 unsigned long long *num, *sum; 412 unsigned int *max, *failed; 413 struct timespec ts = current_kernel_time(); 414 unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start; 415 416 if (level == LKM_PRMODE) { 417 num = &res->l_lock_num_prmode; 418 sum = &res->l_lock_total_prmode; 419 max = &res->l_lock_max_prmode; 420 failed = &res->l_lock_num_prmode_failed; 421 } else if (level == LKM_EXMODE) { 422 num = &res->l_lock_num_exmode; 423 sum = &res->l_lock_total_exmode; 424 max = &res->l_lock_max_exmode; 425 failed = &res->l_lock_num_exmode_failed; 426 } else 427 return; 428 429 (*num)++; 430 (*sum) += time; 431 if (time > *max) 432 *max = time; 433 if (ret) 434 (*failed)++; 435 } 436 437 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) 438 { 439 lockres->l_lock_refresh++; 440 } 441 442 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) 443 { 444 struct timespec ts = current_kernel_time(); 445 mw->mw_lock_start = timespec_to_ns(&ts); 446 } 447 #else 448 static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res) 449 { 450 } 451 static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, 452 int level, struct ocfs2_mask_waiter *mw, int ret) 453 { 454 } 455 static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres) 456 { 457 } 458 static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw) 459 { 460 } 461 #endif 462 463 static void ocfs2_lock_res_init_common(struct ocfs2_super *osb, 464 struct ocfs2_lock_res *res, 465 enum ocfs2_lock_type type, 466 struct ocfs2_lock_res_ops *ops, 467 void *priv) 468 { 469 res->l_type = type; 470 res->l_ops = ops; 471 res->l_priv = priv; 472 473 res->l_level = DLM_LOCK_IV; 474 res->l_requested = DLM_LOCK_IV; 475 res->l_blocking = DLM_LOCK_IV; 476 res->l_action = OCFS2_AST_INVALID; 477 res->l_unlock_action = OCFS2_UNLOCK_INVALID; 478 479 res->l_flags = OCFS2_LOCK_INITIALIZED; 480 481 ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug); 482 483 ocfs2_init_lock_stats(res); 484 } 485 486 void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res) 487 { 488 /* This also clears out the lock status block */ 489 memset(res, 0, sizeof(struct ocfs2_lock_res)); 490 spin_lock_init(&res->l_lock); 491 init_waitqueue_head(&res->l_event); 492 INIT_LIST_HEAD(&res->l_blocked_list); 493 INIT_LIST_HEAD(&res->l_mask_waiters); 494 } 495 496 void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res, 497 enum ocfs2_lock_type type, 498 unsigned int generation, 499 struct inode *inode) 500 { 501 struct ocfs2_lock_res_ops *ops; 502 503 switch(type) { 504 case OCFS2_LOCK_TYPE_RW: 505 ops = &ocfs2_inode_rw_lops; 506 break; 507 case OCFS2_LOCK_TYPE_META: 508 ops = &ocfs2_inode_inode_lops; 509 break; 510 case OCFS2_LOCK_TYPE_OPEN: 511 ops = &ocfs2_inode_open_lops; 512 break; 513 default: 514 mlog_bug_on_msg(1, "type: %d\n", type); 515 ops = NULL; /* thanks, gcc */ 516 break; 517 }; 518 519 ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno, 520 generation, res->l_name); 521 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode); 522 } 523 524 static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres) 525 { 526 struct inode *inode = ocfs2_lock_res_inode(lockres); 527 528 return OCFS2_SB(inode->i_sb); 529 } 530 531 static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres) 532 { 533 struct ocfs2_mem_dqinfo *info = lockres->l_priv; 534 535 return OCFS2_SB(info->dqi_gi.dqi_sb); 536 } 537 538 static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres) 539 { 540 struct ocfs2_file_private *fp = lockres->l_priv; 541 542 return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb); 543 } 544 545 static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres) 546 { 547 __be64 inode_blkno_be; 548 549 memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], 550 sizeof(__be64)); 551 552 return be64_to_cpu(inode_blkno_be); 553 } 554 555 static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres) 556 { 557 struct ocfs2_dentry_lock *dl = lockres->l_priv; 558 559 return OCFS2_SB(dl->dl_inode->i_sb); 560 } 561 562 void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl, 563 u64 parent, struct inode *inode) 564 { 565 int len; 566 u64 inode_blkno = OCFS2_I(inode)->ip_blkno; 567 __be64 inode_blkno_be = cpu_to_be64(inode_blkno); 568 struct ocfs2_lock_res *lockres = &dl->dl_lockres; 569 570 ocfs2_lock_res_init_once(lockres); 571 572 /* 573 * Unfortunately, the standard lock naming scheme won't work 574 * here because we have two 16 byte values to use. Instead, 575 * we'll stuff the inode number as a binary value. We still 576 * want error prints to show something without garbling the 577 * display, so drop a null byte in there before the inode 578 * number. A future version of OCFS2 will likely use all 579 * binary lock names. The stringified names have been a 580 * tremendous aid in debugging, but now that the debugfs 581 * interface exists, we can mangle things there if need be. 582 * 583 * NOTE: We also drop the standard "pad" value (the total lock 584 * name size stays the same though - the last part is all 585 * zeros due to the memset in ocfs2_lock_res_init_once() 586 */ 587 len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START, 588 "%c%016llx", 589 ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY), 590 (long long)parent); 591 592 BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1)); 593 594 memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be, 595 sizeof(__be64)); 596 597 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, 598 OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops, 599 dl); 600 } 601 602 static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res, 603 struct ocfs2_super *osb) 604 { 605 /* Superblock lockres doesn't come from a slab so we call init 606 * once on it manually. */ 607 ocfs2_lock_res_init_once(res); 608 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO, 609 0, res->l_name); 610 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER, 611 &ocfs2_super_lops, osb); 612 } 613 614 static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res, 615 struct ocfs2_super *osb) 616 { 617 /* Rename lockres doesn't come from a slab so we call init 618 * once on it manually. */ 619 ocfs2_lock_res_init_once(res); 620 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name); 621 ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME, 622 &ocfs2_rename_lops, osb); 623 } 624 625 void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres, 626 struct ocfs2_file_private *fp) 627 { 628 struct inode *inode = fp->fp_file->f_mapping->host; 629 struct ocfs2_inode_info *oi = OCFS2_I(inode); 630 631 ocfs2_lock_res_init_once(lockres); 632 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno, 633 inode->i_generation, lockres->l_name); 634 ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres, 635 OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops, 636 fp); 637 lockres->l_flags |= OCFS2_LOCK_NOCACHE; 638 } 639 640 void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres, 641 struct ocfs2_mem_dqinfo *info) 642 { 643 ocfs2_lock_res_init_once(lockres); 644 ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type, 645 0, lockres->l_name); 646 ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres, 647 OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops, 648 info); 649 } 650 651 void ocfs2_lock_res_free(struct ocfs2_lock_res *res) 652 { 653 mlog_entry_void(); 654 655 if (!(res->l_flags & OCFS2_LOCK_INITIALIZED)) 656 return; 657 658 ocfs2_remove_lockres_tracking(res); 659 660 mlog_bug_on_msg(!list_empty(&res->l_blocked_list), 661 "Lockres %s is on the blocked list\n", 662 res->l_name); 663 mlog_bug_on_msg(!list_empty(&res->l_mask_waiters), 664 "Lockres %s has mask waiters pending\n", 665 res->l_name); 666 mlog_bug_on_msg(spin_is_locked(&res->l_lock), 667 "Lockres %s is locked\n", 668 res->l_name); 669 mlog_bug_on_msg(res->l_ro_holders, 670 "Lockres %s has %u ro holders\n", 671 res->l_name, res->l_ro_holders); 672 mlog_bug_on_msg(res->l_ex_holders, 673 "Lockres %s has %u ex holders\n", 674 res->l_name, res->l_ex_holders); 675 676 /* Need to clear out the lock status block for the dlm */ 677 memset(&res->l_lksb, 0, sizeof(res->l_lksb)); 678 679 res->l_flags = 0UL; 680 mlog_exit_void(); 681 } 682 683 static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres, 684 int level) 685 { 686 mlog_entry_void(); 687 688 BUG_ON(!lockres); 689 690 switch(level) { 691 case DLM_LOCK_EX: 692 lockres->l_ex_holders++; 693 break; 694 case DLM_LOCK_PR: 695 lockres->l_ro_holders++; 696 break; 697 default: 698 BUG(); 699 } 700 701 mlog_exit_void(); 702 } 703 704 static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres, 705 int level) 706 { 707 mlog_entry_void(); 708 709 BUG_ON(!lockres); 710 711 switch(level) { 712 case DLM_LOCK_EX: 713 BUG_ON(!lockres->l_ex_holders); 714 lockres->l_ex_holders--; 715 break; 716 case DLM_LOCK_PR: 717 BUG_ON(!lockres->l_ro_holders); 718 lockres->l_ro_holders--; 719 break; 720 default: 721 BUG(); 722 } 723 mlog_exit_void(); 724 } 725 726 /* WARNING: This function lives in a world where the only three lock 727 * levels are EX, PR, and NL. It *will* have to be adjusted when more 728 * lock types are added. */ 729 static inline int ocfs2_highest_compat_lock_level(int level) 730 { 731 int new_level = DLM_LOCK_EX; 732 733 if (level == DLM_LOCK_EX) 734 new_level = DLM_LOCK_NL; 735 else if (level == DLM_LOCK_PR) 736 new_level = DLM_LOCK_PR; 737 return new_level; 738 } 739 740 static void lockres_set_flags(struct ocfs2_lock_res *lockres, 741 unsigned long newflags) 742 { 743 struct ocfs2_mask_waiter *mw, *tmp; 744 745 assert_spin_locked(&lockres->l_lock); 746 747 lockres->l_flags = newflags; 748 749 list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) { 750 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) 751 continue; 752 753 list_del_init(&mw->mw_item); 754 mw->mw_status = 0; 755 complete(&mw->mw_complete); 756 } 757 } 758 static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or) 759 { 760 lockres_set_flags(lockres, lockres->l_flags | or); 761 } 762 static void lockres_clear_flags(struct ocfs2_lock_res *lockres, 763 unsigned long clear) 764 { 765 lockres_set_flags(lockres, lockres->l_flags & ~clear); 766 } 767 768 static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres) 769 { 770 mlog_entry_void(); 771 772 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 773 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); 774 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); 775 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); 776 777 lockres->l_level = lockres->l_requested; 778 if (lockres->l_level <= 779 ocfs2_highest_compat_lock_level(lockres->l_blocking)) { 780 lockres->l_blocking = DLM_LOCK_NL; 781 lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED); 782 } 783 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 784 785 mlog_exit_void(); 786 } 787 788 static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres) 789 { 790 mlog_entry_void(); 791 792 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 793 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED)); 794 795 /* Convert from RO to EX doesn't really need anything as our 796 * information is already up to data. Convert from NL to 797 * *anything* however should mark ourselves as needing an 798 * update */ 799 if (lockres->l_level == DLM_LOCK_NL && 800 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) 801 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 802 803 lockres->l_level = lockres->l_requested; 804 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 805 806 mlog_exit_void(); 807 } 808 809 static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres) 810 { 811 mlog_entry_void(); 812 813 BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY))); 814 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); 815 816 if (lockres->l_requested > DLM_LOCK_NL && 817 !(lockres->l_flags & OCFS2_LOCK_LOCAL) && 818 lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) 819 lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 820 821 lockres->l_level = lockres->l_requested; 822 lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED); 823 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 824 825 mlog_exit_void(); 826 } 827 828 static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, 829 int level) 830 { 831 int needs_downconvert = 0; 832 mlog_entry_void(); 833 834 assert_spin_locked(&lockres->l_lock); 835 836 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); 837 838 if (level > lockres->l_blocking) { 839 /* only schedule a downconvert if we haven't already scheduled 840 * one that goes low enough to satisfy the level we're 841 * blocking. this also catches the case where we get 842 * duplicate BASTs */ 843 if (ocfs2_highest_compat_lock_level(level) < 844 ocfs2_highest_compat_lock_level(lockres->l_blocking)) 845 needs_downconvert = 1; 846 847 lockres->l_blocking = level; 848 } 849 850 mlog_exit(needs_downconvert); 851 return needs_downconvert; 852 } 853 854 /* 855 * OCFS2_LOCK_PENDING and l_pending_gen. 856 * 857 * Why does OCFS2_LOCK_PENDING exist? To close a race between setting 858 * OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock() 859 * for more details on the race. 860 * 861 * OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces 862 * a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock() 863 * returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear 864 * OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns, 865 * the caller is going to try to clear PENDING again. If nothing else is 866 * happening, __lockres_clear_pending() sees PENDING is unset and does 867 * nothing. 868 * 869 * But what if another path (eg downconvert thread) has just started a 870 * new locking action? The other path has re-set PENDING. Our path 871 * cannot clear PENDING, because that will re-open the original race 872 * window. 873 * 874 * [Example] 875 * 876 * ocfs2_meta_lock() 877 * ocfs2_cluster_lock() 878 * set BUSY 879 * set PENDING 880 * drop l_lock 881 * ocfs2_dlm_lock() 882 * ocfs2_locking_ast() ocfs2_downconvert_thread() 883 * clear PENDING ocfs2_unblock_lock() 884 * take_l_lock 885 * !BUSY 886 * ocfs2_prepare_downconvert() 887 * set BUSY 888 * set PENDING 889 * drop l_lock 890 * take l_lock 891 * clear PENDING 892 * drop l_lock 893 * <window> 894 * ocfs2_dlm_lock() 895 * 896 * So as you can see, we now have a window where l_lock is not held, 897 * PENDING is not set, and ocfs2_dlm_lock() has not been called. 898 * 899 * The core problem is that ocfs2_cluster_lock() has cleared the PENDING 900 * set by ocfs2_prepare_downconvert(). That wasn't nice. 901 * 902 * To solve this we introduce l_pending_gen. A call to 903 * lockres_clear_pending() will only do so when it is passed a generation 904 * number that matches the lockres. lockres_set_pending() will return the 905 * current generation number. When ocfs2_cluster_lock() goes to clear 906 * PENDING, it passes the generation it got from set_pending(). In our 907 * example above, the generation numbers will *not* match. Thus, 908 * ocfs2_cluster_lock() will not clear the PENDING set by 909 * ocfs2_prepare_downconvert(). 910 */ 911 912 /* Unlocked version for ocfs2_locking_ast() */ 913 static void __lockres_clear_pending(struct ocfs2_lock_res *lockres, 914 unsigned int generation, 915 struct ocfs2_super *osb) 916 { 917 assert_spin_locked(&lockres->l_lock); 918 919 /* 920 * The ast and locking functions can race us here. The winner 921 * will clear pending, the loser will not. 922 */ 923 if (!(lockres->l_flags & OCFS2_LOCK_PENDING) || 924 (lockres->l_pending_gen != generation)) 925 return; 926 927 lockres_clear_flags(lockres, OCFS2_LOCK_PENDING); 928 lockres->l_pending_gen++; 929 930 /* 931 * The downconvert thread may have skipped us because we 932 * were PENDING. Wake it up. 933 */ 934 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) 935 ocfs2_wake_downconvert_thread(osb); 936 } 937 938 /* Locked version for callers of ocfs2_dlm_lock() */ 939 static void lockres_clear_pending(struct ocfs2_lock_res *lockres, 940 unsigned int generation, 941 struct ocfs2_super *osb) 942 { 943 unsigned long flags; 944 945 spin_lock_irqsave(&lockres->l_lock, flags); 946 __lockres_clear_pending(lockres, generation, osb); 947 spin_unlock_irqrestore(&lockres->l_lock, flags); 948 } 949 950 static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres) 951 { 952 assert_spin_locked(&lockres->l_lock); 953 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY)); 954 955 lockres_or_flags(lockres, OCFS2_LOCK_PENDING); 956 957 return lockres->l_pending_gen; 958 } 959 960 961 static void ocfs2_blocking_ast(void *opaque, int level) 962 { 963 struct ocfs2_lock_res *lockres = opaque; 964 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); 965 int needs_downconvert; 966 unsigned long flags; 967 968 BUG_ON(level <= DLM_LOCK_NL); 969 970 mlog(0, "BAST fired for lockres %s, blocking %d, level %d type %s\n", 971 lockres->l_name, level, lockres->l_level, 972 ocfs2_lock_type_string(lockres->l_type)); 973 974 /* 975 * We can skip the bast for locks which don't enable caching - 976 * they'll be dropped at the earliest possible time anyway. 977 */ 978 if (lockres->l_flags & OCFS2_LOCK_NOCACHE) 979 return; 980 981 spin_lock_irqsave(&lockres->l_lock, flags); 982 needs_downconvert = ocfs2_generic_handle_bast(lockres, level); 983 if (needs_downconvert) 984 ocfs2_schedule_blocked_lock(osb, lockres); 985 spin_unlock_irqrestore(&lockres->l_lock, flags); 986 987 wake_up(&lockres->l_event); 988 989 ocfs2_wake_downconvert_thread(osb); 990 } 991 992 static void ocfs2_locking_ast(void *opaque) 993 { 994 struct ocfs2_lock_res *lockres = opaque; 995 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); 996 unsigned long flags; 997 int status; 998 999 spin_lock_irqsave(&lockres->l_lock, flags); 1000 1001 status = ocfs2_dlm_lock_status(&lockres->l_lksb); 1002 1003 if (status == -EAGAIN) { 1004 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1005 goto out; 1006 } 1007 1008 if (status) { 1009 mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n", 1010 lockres->l_name, status); 1011 spin_unlock_irqrestore(&lockres->l_lock, flags); 1012 return; 1013 } 1014 1015 switch(lockres->l_action) { 1016 case OCFS2_AST_ATTACH: 1017 ocfs2_generic_handle_attach_action(lockres); 1018 lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL); 1019 break; 1020 case OCFS2_AST_CONVERT: 1021 ocfs2_generic_handle_convert_action(lockres); 1022 break; 1023 case OCFS2_AST_DOWNCONVERT: 1024 ocfs2_generic_handle_downconvert_action(lockres); 1025 break; 1026 default: 1027 mlog(ML_ERROR, "lockres %s: ast fired with invalid action: %u " 1028 "lockres flags = 0x%lx, unlock action: %u\n", 1029 lockres->l_name, lockres->l_action, lockres->l_flags, 1030 lockres->l_unlock_action); 1031 BUG(); 1032 } 1033 out: 1034 /* set it to something invalid so if we get called again we 1035 * can catch it. */ 1036 lockres->l_action = OCFS2_AST_INVALID; 1037 1038 /* Did we try to cancel this lock? Clear that state */ 1039 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) 1040 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; 1041 1042 /* 1043 * We may have beaten the locking functions here. We certainly 1044 * know that dlm_lock() has been called :-) 1045 * Because we can't have two lock calls in flight at once, we 1046 * can use lockres->l_pending_gen. 1047 */ 1048 __lockres_clear_pending(lockres, lockres->l_pending_gen, osb); 1049 1050 wake_up(&lockres->l_event); 1051 spin_unlock_irqrestore(&lockres->l_lock, flags); 1052 } 1053 1054 static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres, 1055 int convert) 1056 { 1057 unsigned long flags; 1058 1059 mlog_entry_void(); 1060 spin_lock_irqsave(&lockres->l_lock, flags); 1061 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 1062 if (convert) 1063 lockres->l_action = OCFS2_AST_INVALID; 1064 else 1065 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; 1066 spin_unlock_irqrestore(&lockres->l_lock, flags); 1067 1068 wake_up(&lockres->l_event); 1069 mlog_exit_void(); 1070 } 1071 1072 /* Note: If we detect another process working on the lock (i.e., 1073 * OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller 1074 * to do the right thing in that case. 1075 */ 1076 static int ocfs2_lock_create(struct ocfs2_super *osb, 1077 struct ocfs2_lock_res *lockres, 1078 int level, 1079 u32 dlm_flags) 1080 { 1081 int ret = 0; 1082 unsigned long flags; 1083 unsigned int gen; 1084 1085 mlog_entry_void(); 1086 1087 mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level, 1088 dlm_flags); 1089 1090 spin_lock_irqsave(&lockres->l_lock, flags); 1091 if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) || 1092 (lockres->l_flags & OCFS2_LOCK_BUSY)) { 1093 spin_unlock_irqrestore(&lockres->l_lock, flags); 1094 goto bail; 1095 } 1096 1097 lockres->l_action = OCFS2_AST_ATTACH; 1098 lockres->l_requested = level; 1099 lockres_or_flags(lockres, OCFS2_LOCK_BUSY); 1100 gen = lockres_set_pending(lockres); 1101 spin_unlock_irqrestore(&lockres->l_lock, flags); 1102 1103 ret = ocfs2_dlm_lock(osb->cconn, 1104 level, 1105 &lockres->l_lksb, 1106 dlm_flags, 1107 lockres->l_name, 1108 OCFS2_LOCK_ID_MAX_LEN - 1, 1109 lockres); 1110 lockres_clear_pending(lockres, gen, osb); 1111 if (ret) { 1112 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); 1113 ocfs2_recover_from_dlm_error(lockres, 1); 1114 } 1115 1116 mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name); 1117 1118 bail: 1119 mlog_exit(ret); 1120 return ret; 1121 } 1122 1123 static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres, 1124 int flag) 1125 { 1126 unsigned long flags; 1127 int ret; 1128 1129 spin_lock_irqsave(&lockres->l_lock, flags); 1130 ret = lockres->l_flags & flag; 1131 spin_unlock_irqrestore(&lockres->l_lock, flags); 1132 1133 return ret; 1134 } 1135 1136 static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres) 1137 1138 { 1139 wait_event(lockres->l_event, 1140 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY)); 1141 } 1142 1143 static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres) 1144 1145 { 1146 wait_event(lockres->l_event, 1147 !ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING)); 1148 } 1149 1150 /* predict what lock level we'll be dropping down to on behalf 1151 * of another node, and return true if the currently wanted 1152 * level will be compatible with it. */ 1153 static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres, 1154 int wanted) 1155 { 1156 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); 1157 1158 return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking); 1159 } 1160 1161 static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw) 1162 { 1163 INIT_LIST_HEAD(&mw->mw_item); 1164 init_completion(&mw->mw_complete); 1165 ocfs2_init_start_time(mw); 1166 } 1167 1168 static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw) 1169 { 1170 wait_for_completion(&mw->mw_complete); 1171 /* Re-arm the completion in case we want to wait on it again */ 1172 INIT_COMPLETION(mw->mw_complete); 1173 return mw->mw_status; 1174 } 1175 1176 static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres, 1177 struct ocfs2_mask_waiter *mw, 1178 unsigned long mask, 1179 unsigned long goal) 1180 { 1181 BUG_ON(!list_empty(&mw->mw_item)); 1182 1183 assert_spin_locked(&lockres->l_lock); 1184 1185 list_add_tail(&mw->mw_item, &lockres->l_mask_waiters); 1186 mw->mw_mask = mask; 1187 mw->mw_goal = goal; 1188 } 1189 1190 /* returns 0 if the mw that was removed was already satisfied, -EBUSY 1191 * if the mask still hadn't reached its goal */ 1192 static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres, 1193 struct ocfs2_mask_waiter *mw) 1194 { 1195 unsigned long flags; 1196 int ret = 0; 1197 1198 spin_lock_irqsave(&lockres->l_lock, flags); 1199 if (!list_empty(&mw->mw_item)) { 1200 if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal) 1201 ret = -EBUSY; 1202 1203 list_del_init(&mw->mw_item); 1204 init_completion(&mw->mw_complete); 1205 } 1206 spin_unlock_irqrestore(&lockres->l_lock, flags); 1207 1208 return ret; 1209 1210 } 1211 1212 static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw, 1213 struct ocfs2_lock_res *lockres) 1214 { 1215 int ret; 1216 1217 ret = wait_for_completion_interruptible(&mw->mw_complete); 1218 if (ret) 1219 lockres_remove_mask_waiter(lockres, mw); 1220 else 1221 ret = mw->mw_status; 1222 /* Re-arm the completion in case we want to wait on it again */ 1223 INIT_COMPLETION(mw->mw_complete); 1224 return ret; 1225 } 1226 1227 static int ocfs2_cluster_lock(struct ocfs2_super *osb, 1228 struct ocfs2_lock_res *lockres, 1229 int level, 1230 u32 lkm_flags, 1231 int arg_flags) 1232 { 1233 struct ocfs2_mask_waiter mw; 1234 int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR); 1235 int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */ 1236 unsigned long flags; 1237 unsigned int gen; 1238 int noqueue_attempted = 0; 1239 1240 mlog_entry_void(); 1241 1242 ocfs2_init_mask_waiter(&mw); 1243 1244 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) 1245 lkm_flags |= DLM_LKF_VALBLK; 1246 1247 again: 1248 wait = 0; 1249 1250 if (catch_signals && signal_pending(current)) { 1251 ret = -ERESTARTSYS; 1252 goto out; 1253 } 1254 1255 spin_lock_irqsave(&lockres->l_lock, flags); 1256 1257 mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING, 1258 "Cluster lock called on freeing lockres %s! flags " 1259 "0x%lx\n", lockres->l_name, lockres->l_flags); 1260 1261 /* We only compare against the currently granted level 1262 * here. If the lock is blocked waiting on a downconvert, 1263 * we'll get caught below. */ 1264 if (lockres->l_flags & OCFS2_LOCK_BUSY && 1265 level > lockres->l_level) { 1266 /* is someone sitting in dlm_lock? If so, wait on 1267 * them. */ 1268 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); 1269 wait = 1; 1270 goto unlock; 1271 } 1272 1273 if (lockres->l_flags & OCFS2_LOCK_BLOCKED && 1274 !ocfs2_may_continue_on_blocked_lock(lockres, level)) { 1275 /* is the lock is currently blocked on behalf of 1276 * another node */ 1277 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0); 1278 wait = 1; 1279 goto unlock; 1280 } 1281 1282 if (level > lockres->l_level) { 1283 if (noqueue_attempted > 0) { 1284 ret = -EAGAIN; 1285 goto unlock; 1286 } 1287 if (lkm_flags & DLM_LKF_NOQUEUE) 1288 noqueue_attempted = 1; 1289 1290 if (lockres->l_action != OCFS2_AST_INVALID) 1291 mlog(ML_ERROR, "lockres %s has action %u pending\n", 1292 lockres->l_name, lockres->l_action); 1293 1294 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { 1295 lockres->l_action = OCFS2_AST_ATTACH; 1296 lkm_flags &= ~DLM_LKF_CONVERT; 1297 } else { 1298 lockres->l_action = OCFS2_AST_CONVERT; 1299 lkm_flags |= DLM_LKF_CONVERT; 1300 } 1301 1302 lockres->l_requested = level; 1303 lockres_or_flags(lockres, OCFS2_LOCK_BUSY); 1304 gen = lockres_set_pending(lockres); 1305 spin_unlock_irqrestore(&lockres->l_lock, flags); 1306 1307 BUG_ON(level == DLM_LOCK_IV); 1308 BUG_ON(level == DLM_LOCK_NL); 1309 1310 mlog(0, "lock %s, convert from %d to level = %d\n", 1311 lockres->l_name, lockres->l_level, level); 1312 1313 /* call dlm_lock to upgrade lock now */ 1314 ret = ocfs2_dlm_lock(osb->cconn, 1315 level, 1316 &lockres->l_lksb, 1317 lkm_flags, 1318 lockres->l_name, 1319 OCFS2_LOCK_ID_MAX_LEN - 1, 1320 lockres); 1321 lockres_clear_pending(lockres, gen, osb); 1322 if (ret) { 1323 if (!(lkm_flags & DLM_LKF_NOQUEUE) || 1324 (ret != -EAGAIN)) { 1325 ocfs2_log_dlm_error("ocfs2_dlm_lock", 1326 ret, lockres); 1327 } 1328 ocfs2_recover_from_dlm_error(lockres, 1); 1329 goto out; 1330 } 1331 1332 mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n", 1333 lockres->l_name); 1334 1335 /* At this point we've gone inside the dlm and need to 1336 * complete our work regardless. */ 1337 catch_signals = 0; 1338 1339 /* wait for busy to clear and carry on */ 1340 goto again; 1341 } 1342 1343 /* Ok, if we get here then we're good to go. */ 1344 ocfs2_inc_holders(lockres, level); 1345 1346 ret = 0; 1347 unlock: 1348 spin_unlock_irqrestore(&lockres->l_lock, flags); 1349 out: 1350 /* 1351 * This is helping work around a lock inversion between the page lock 1352 * and dlm locks. One path holds the page lock while calling aops 1353 * which block acquiring dlm locks. The voting thread holds dlm 1354 * locks while acquiring page locks while down converting data locks. 1355 * This block is helping an aop path notice the inversion and back 1356 * off to unlock its page lock before trying the dlm lock again. 1357 */ 1358 if (wait && arg_flags & OCFS2_LOCK_NONBLOCK && 1359 mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) { 1360 wait = 0; 1361 if (lockres_remove_mask_waiter(lockres, &mw)) 1362 ret = -EAGAIN; 1363 else 1364 goto again; 1365 } 1366 if (wait) { 1367 ret = ocfs2_wait_for_mask(&mw); 1368 if (ret == 0) 1369 goto again; 1370 mlog_errno(ret); 1371 } 1372 ocfs2_update_lock_stats(lockres, level, &mw, ret); 1373 1374 mlog_exit(ret); 1375 return ret; 1376 } 1377 1378 static void ocfs2_cluster_unlock(struct ocfs2_super *osb, 1379 struct ocfs2_lock_res *lockres, 1380 int level) 1381 { 1382 unsigned long flags; 1383 1384 mlog_entry_void(); 1385 spin_lock_irqsave(&lockres->l_lock, flags); 1386 ocfs2_dec_holders(lockres, level); 1387 ocfs2_downconvert_on_unlock(osb, lockres); 1388 spin_unlock_irqrestore(&lockres->l_lock, flags); 1389 mlog_exit_void(); 1390 } 1391 1392 static int ocfs2_create_new_lock(struct ocfs2_super *osb, 1393 struct ocfs2_lock_res *lockres, 1394 int ex, 1395 int local) 1396 { 1397 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 1398 unsigned long flags; 1399 u32 lkm_flags = local ? DLM_LKF_LOCAL : 0; 1400 1401 spin_lock_irqsave(&lockres->l_lock, flags); 1402 BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED); 1403 lockres_or_flags(lockres, OCFS2_LOCK_LOCAL); 1404 spin_unlock_irqrestore(&lockres->l_lock, flags); 1405 1406 return ocfs2_lock_create(osb, lockres, level, lkm_flags); 1407 } 1408 1409 /* Grants us an EX lock on the data and metadata resources, skipping 1410 * the normal cluster directory lookup. Use this ONLY on newly created 1411 * inodes which other nodes can't possibly see, and which haven't been 1412 * hashed in the inode hash yet. This can give us a good performance 1413 * increase as it'll skip the network broadcast normally associated 1414 * with creating a new lock resource. */ 1415 int ocfs2_create_new_inode_locks(struct inode *inode) 1416 { 1417 int ret; 1418 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1419 1420 BUG_ON(!inode); 1421 BUG_ON(!ocfs2_inode_is_new(inode)); 1422 1423 mlog_entry_void(); 1424 1425 mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno); 1426 1427 /* NOTE: That we don't increment any of the holder counts, nor 1428 * do we add anything to a journal handle. Since this is 1429 * supposed to be a new inode which the cluster doesn't know 1430 * about yet, there is no need to. As far as the LVB handling 1431 * is concerned, this is basically like acquiring an EX lock 1432 * on a resource which has an invalid one -- we'll set it 1433 * valid when we release the EX. */ 1434 1435 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1); 1436 if (ret) { 1437 mlog_errno(ret); 1438 goto bail; 1439 } 1440 1441 /* 1442 * We don't want to use DLM_LKF_LOCAL on a meta data lock as they 1443 * don't use a generation in their lock names. 1444 */ 1445 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0); 1446 if (ret) { 1447 mlog_errno(ret); 1448 goto bail; 1449 } 1450 1451 ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0); 1452 if (ret) { 1453 mlog_errno(ret); 1454 goto bail; 1455 } 1456 1457 bail: 1458 mlog_exit(ret); 1459 return ret; 1460 } 1461 1462 int ocfs2_rw_lock(struct inode *inode, int write) 1463 { 1464 int status, level; 1465 struct ocfs2_lock_res *lockres; 1466 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1467 1468 BUG_ON(!inode); 1469 1470 mlog_entry_void(); 1471 1472 mlog(0, "inode %llu take %s RW lock\n", 1473 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1474 write ? "EXMODE" : "PRMODE"); 1475 1476 if (ocfs2_mount_local(osb)) 1477 return 0; 1478 1479 lockres = &OCFS2_I(inode)->ip_rw_lockres; 1480 1481 level = write ? DLM_LOCK_EX : DLM_LOCK_PR; 1482 1483 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0, 1484 0); 1485 if (status < 0) 1486 mlog_errno(status); 1487 1488 mlog_exit(status); 1489 return status; 1490 } 1491 1492 void ocfs2_rw_unlock(struct inode *inode, int write) 1493 { 1494 int level = write ? DLM_LOCK_EX : DLM_LOCK_PR; 1495 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres; 1496 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1497 1498 mlog_entry_void(); 1499 1500 mlog(0, "inode %llu drop %s RW lock\n", 1501 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1502 write ? "EXMODE" : "PRMODE"); 1503 1504 if (!ocfs2_mount_local(osb)) 1505 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 1506 1507 mlog_exit_void(); 1508 } 1509 1510 /* 1511 * ocfs2_open_lock always get PR mode lock. 1512 */ 1513 int ocfs2_open_lock(struct inode *inode) 1514 { 1515 int status = 0; 1516 struct ocfs2_lock_res *lockres; 1517 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1518 1519 BUG_ON(!inode); 1520 1521 mlog_entry_void(); 1522 1523 mlog(0, "inode %llu take PRMODE open lock\n", 1524 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1525 1526 if (ocfs2_mount_local(osb)) 1527 goto out; 1528 1529 lockres = &OCFS2_I(inode)->ip_open_lockres; 1530 1531 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, 1532 DLM_LOCK_PR, 0, 0); 1533 if (status < 0) 1534 mlog_errno(status); 1535 1536 out: 1537 mlog_exit(status); 1538 return status; 1539 } 1540 1541 int ocfs2_try_open_lock(struct inode *inode, int write) 1542 { 1543 int status = 0, level; 1544 struct ocfs2_lock_res *lockres; 1545 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1546 1547 BUG_ON(!inode); 1548 1549 mlog_entry_void(); 1550 1551 mlog(0, "inode %llu try to take %s open lock\n", 1552 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1553 write ? "EXMODE" : "PRMODE"); 1554 1555 if (ocfs2_mount_local(osb)) 1556 goto out; 1557 1558 lockres = &OCFS2_I(inode)->ip_open_lockres; 1559 1560 level = write ? DLM_LOCK_EX : DLM_LOCK_PR; 1561 1562 /* 1563 * The file system may already holding a PRMODE/EXMODE open lock. 1564 * Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on 1565 * other nodes and the -EAGAIN will indicate to the caller that 1566 * this inode is still in use. 1567 */ 1568 status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, 1569 level, DLM_LKF_NOQUEUE, 0); 1570 1571 out: 1572 mlog_exit(status); 1573 return status; 1574 } 1575 1576 /* 1577 * ocfs2_open_unlock unlock PR and EX mode open locks. 1578 */ 1579 void ocfs2_open_unlock(struct inode *inode) 1580 { 1581 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres; 1582 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 1583 1584 mlog_entry_void(); 1585 1586 mlog(0, "inode %llu drop open lock\n", 1587 (unsigned long long)OCFS2_I(inode)->ip_blkno); 1588 1589 if (ocfs2_mount_local(osb)) 1590 goto out; 1591 1592 if(lockres->l_ro_holders) 1593 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, 1594 DLM_LOCK_PR); 1595 if(lockres->l_ex_holders) 1596 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, 1597 DLM_LOCK_EX); 1598 1599 out: 1600 mlog_exit_void(); 1601 } 1602 1603 static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres, 1604 int level) 1605 { 1606 int ret; 1607 struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres); 1608 unsigned long flags; 1609 struct ocfs2_mask_waiter mw; 1610 1611 ocfs2_init_mask_waiter(&mw); 1612 1613 retry_cancel: 1614 spin_lock_irqsave(&lockres->l_lock, flags); 1615 if (lockres->l_flags & OCFS2_LOCK_BUSY) { 1616 ret = ocfs2_prepare_cancel_convert(osb, lockres); 1617 if (ret) { 1618 spin_unlock_irqrestore(&lockres->l_lock, flags); 1619 ret = ocfs2_cancel_convert(osb, lockres); 1620 if (ret < 0) { 1621 mlog_errno(ret); 1622 goto out; 1623 } 1624 goto retry_cancel; 1625 } 1626 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); 1627 spin_unlock_irqrestore(&lockres->l_lock, flags); 1628 1629 ocfs2_wait_for_mask(&mw); 1630 goto retry_cancel; 1631 } 1632 1633 ret = -ERESTARTSYS; 1634 /* 1635 * We may still have gotten the lock, in which case there's no 1636 * point to restarting the syscall. 1637 */ 1638 if (lockres->l_level == level) 1639 ret = 0; 1640 1641 mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret, 1642 lockres->l_flags, lockres->l_level, lockres->l_action); 1643 1644 spin_unlock_irqrestore(&lockres->l_lock, flags); 1645 1646 out: 1647 return ret; 1648 } 1649 1650 /* 1651 * ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of 1652 * flock() calls. The locking approach this requires is sufficiently 1653 * different from all other cluster lock types that we implement a 1654 * seperate path to the "low-level" dlm calls. In particular: 1655 * 1656 * - No optimization of lock levels is done - we take at exactly 1657 * what's been requested. 1658 * 1659 * - No lock caching is employed. We immediately downconvert to 1660 * no-lock at unlock time. This also means flock locks never go on 1661 * the blocking list). 1662 * 1663 * - Since userspace can trivially deadlock itself with flock, we make 1664 * sure to allow cancellation of a misbehaving applications flock() 1665 * request. 1666 * 1667 * - Access to any flock lockres doesn't require concurrency, so we 1668 * can simplify the code by requiring the caller to guarantee 1669 * serialization of dlmglue flock calls. 1670 */ 1671 int ocfs2_file_lock(struct file *file, int ex, int trylock) 1672 { 1673 int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 1674 unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0; 1675 unsigned long flags; 1676 struct ocfs2_file_private *fp = file->private_data; 1677 struct ocfs2_lock_res *lockres = &fp->fp_flock; 1678 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb); 1679 struct ocfs2_mask_waiter mw; 1680 1681 ocfs2_init_mask_waiter(&mw); 1682 1683 if ((lockres->l_flags & OCFS2_LOCK_BUSY) || 1684 (lockres->l_level > DLM_LOCK_NL)) { 1685 mlog(ML_ERROR, 1686 "File lock \"%s\" has busy or locked state: flags: 0x%lx, " 1687 "level: %u\n", lockres->l_name, lockres->l_flags, 1688 lockres->l_level); 1689 return -EINVAL; 1690 } 1691 1692 spin_lock_irqsave(&lockres->l_lock, flags); 1693 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { 1694 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); 1695 spin_unlock_irqrestore(&lockres->l_lock, flags); 1696 1697 /* 1698 * Get the lock at NLMODE to start - that way we 1699 * can cancel the upconvert request if need be. 1700 */ 1701 ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0); 1702 if (ret < 0) { 1703 mlog_errno(ret); 1704 goto out; 1705 } 1706 1707 ret = ocfs2_wait_for_mask(&mw); 1708 if (ret) { 1709 mlog_errno(ret); 1710 goto out; 1711 } 1712 spin_lock_irqsave(&lockres->l_lock, flags); 1713 } 1714 1715 lockres->l_action = OCFS2_AST_CONVERT; 1716 lkm_flags |= DLM_LKF_CONVERT; 1717 lockres->l_requested = level; 1718 lockres_or_flags(lockres, OCFS2_LOCK_BUSY); 1719 1720 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); 1721 spin_unlock_irqrestore(&lockres->l_lock, flags); 1722 1723 ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags, 1724 lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1, 1725 lockres); 1726 if (ret) { 1727 if (!trylock || (ret != -EAGAIN)) { 1728 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); 1729 ret = -EINVAL; 1730 } 1731 1732 ocfs2_recover_from_dlm_error(lockres, 1); 1733 lockres_remove_mask_waiter(lockres, &mw); 1734 goto out; 1735 } 1736 1737 ret = ocfs2_wait_for_mask_interruptible(&mw, lockres); 1738 if (ret == -ERESTARTSYS) { 1739 /* 1740 * Userspace can cause deadlock itself with 1741 * flock(). Current behavior locally is to allow the 1742 * deadlock, but abort the system call if a signal is 1743 * received. We follow this example, otherwise a 1744 * poorly written program could sit in kernel until 1745 * reboot. 1746 * 1747 * Handling this is a bit more complicated for Ocfs2 1748 * though. We can't exit this function with an 1749 * outstanding lock request, so a cancel convert is 1750 * required. We intentionally overwrite 'ret' - if the 1751 * cancel fails and the lock was granted, it's easier 1752 * to just bubble sucess back up to the user. 1753 */ 1754 ret = ocfs2_flock_handle_signal(lockres, level); 1755 } else if (!ret && (level > lockres->l_level)) { 1756 /* Trylock failed asynchronously */ 1757 BUG_ON(!trylock); 1758 ret = -EAGAIN; 1759 } 1760 1761 out: 1762 1763 mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n", 1764 lockres->l_name, ex, trylock, ret); 1765 return ret; 1766 } 1767 1768 void ocfs2_file_unlock(struct file *file) 1769 { 1770 int ret; 1771 unsigned int gen; 1772 unsigned long flags; 1773 struct ocfs2_file_private *fp = file->private_data; 1774 struct ocfs2_lock_res *lockres = &fp->fp_flock; 1775 struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb); 1776 struct ocfs2_mask_waiter mw; 1777 1778 ocfs2_init_mask_waiter(&mw); 1779 1780 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) 1781 return; 1782 1783 if (lockres->l_level == DLM_LOCK_NL) 1784 return; 1785 1786 mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n", 1787 lockres->l_name, lockres->l_flags, lockres->l_level, 1788 lockres->l_action); 1789 1790 spin_lock_irqsave(&lockres->l_lock, flags); 1791 /* 1792 * Fake a blocking ast for the downconvert code. 1793 */ 1794 lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED); 1795 lockres->l_blocking = DLM_LOCK_EX; 1796 1797 gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL); 1798 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0); 1799 spin_unlock_irqrestore(&lockres->l_lock, flags); 1800 1801 ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen); 1802 if (ret) { 1803 mlog_errno(ret); 1804 return; 1805 } 1806 1807 ret = ocfs2_wait_for_mask(&mw); 1808 if (ret) 1809 mlog_errno(ret); 1810 } 1811 1812 static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb, 1813 struct ocfs2_lock_res *lockres) 1814 { 1815 int kick = 0; 1816 1817 mlog_entry_void(); 1818 1819 /* If we know that another node is waiting on our lock, kick 1820 * the downconvert thread * pre-emptively when we reach a release 1821 * condition. */ 1822 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) { 1823 switch(lockres->l_blocking) { 1824 case DLM_LOCK_EX: 1825 if (!lockres->l_ex_holders && !lockres->l_ro_holders) 1826 kick = 1; 1827 break; 1828 case DLM_LOCK_PR: 1829 if (!lockres->l_ex_holders) 1830 kick = 1; 1831 break; 1832 default: 1833 BUG(); 1834 } 1835 } 1836 1837 if (kick) 1838 ocfs2_wake_downconvert_thread(osb); 1839 1840 mlog_exit_void(); 1841 } 1842 1843 #define OCFS2_SEC_BITS 34 1844 #define OCFS2_SEC_SHIFT (64 - 34) 1845 #define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1) 1846 1847 /* LVB only has room for 64 bits of time here so we pack it for 1848 * now. */ 1849 static u64 ocfs2_pack_timespec(struct timespec *spec) 1850 { 1851 u64 res; 1852 u64 sec = spec->tv_sec; 1853 u32 nsec = spec->tv_nsec; 1854 1855 res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK); 1856 1857 return res; 1858 } 1859 1860 /* Call this with the lockres locked. I am reasonably sure we don't 1861 * need ip_lock in this function as anyone who would be changing those 1862 * values is supposed to be blocked in ocfs2_inode_lock right now. */ 1863 static void __ocfs2_stuff_meta_lvb(struct inode *inode) 1864 { 1865 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1866 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 1867 struct ocfs2_meta_lvb *lvb; 1868 1869 mlog_entry_void(); 1870 1871 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 1872 1873 /* 1874 * Invalidate the LVB of a deleted inode - this way other 1875 * nodes are forced to go to disk and discover the new inode 1876 * status. 1877 */ 1878 if (oi->ip_flags & OCFS2_INODE_DELETED) { 1879 lvb->lvb_version = 0; 1880 goto out; 1881 } 1882 1883 lvb->lvb_version = OCFS2_LVB_VERSION; 1884 lvb->lvb_isize = cpu_to_be64(i_size_read(inode)); 1885 lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters); 1886 lvb->lvb_iuid = cpu_to_be32(inode->i_uid); 1887 lvb->lvb_igid = cpu_to_be32(inode->i_gid); 1888 lvb->lvb_imode = cpu_to_be16(inode->i_mode); 1889 lvb->lvb_inlink = cpu_to_be16(inode->i_nlink); 1890 lvb->lvb_iatime_packed = 1891 cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime)); 1892 lvb->lvb_ictime_packed = 1893 cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime)); 1894 lvb->lvb_imtime_packed = 1895 cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime)); 1896 lvb->lvb_iattr = cpu_to_be32(oi->ip_attr); 1897 lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features); 1898 lvb->lvb_igeneration = cpu_to_be32(inode->i_generation); 1899 1900 out: 1901 mlog_meta_lvb(0, lockres); 1902 1903 mlog_exit_void(); 1904 } 1905 1906 static void ocfs2_unpack_timespec(struct timespec *spec, 1907 u64 packed_time) 1908 { 1909 spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT; 1910 spec->tv_nsec = packed_time & OCFS2_NSEC_MASK; 1911 } 1912 1913 static void ocfs2_refresh_inode_from_lvb(struct inode *inode) 1914 { 1915 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1916 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 1917 struct ocfs2_meta_lvb *lvb; 1918 1919 mlog_entry_void(); 1920 1921 mlog_meta_lvb(0, lockres); 1922 1923 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 1924 1925 /* We're safe here without the lockres lock... */ 1926 spin_lock(&oi->ip_lock); 1927 oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters); 1928 i_size_write(inode, be64_to_cpu(lvb->lvb_isize)); 1929 1930 oi->ip_attr = be32_to_cpu(lvb->lvb_iattr); 1931 oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures); 1932 ocfs2_set_inode_flags(inode); 1933 1934 /* fast-symlinks are a special case */ 1935 if (S_ISLNK(inode->i_mode) && !oi->ip_clusters) 1936 inode->i_blocks = 0; 1937 else 1938 inode->i_blocks = ocfs2_inode_sector_count(inode); 1939 1940 inode->i_uid = be32_to_cpu(lvb->lvb_iuid); 1941 inode->i_gid = be32_to_cpu(lvb->lvb_igid); 1942 inode->i_mode = be16_to_cpu(lvb->lvb_imode); 1943 inode->i_nlink = be16_to_cpu(lvb->lvb_inlink); 1944 ocfs2_unpack_timespec(&inode->i_atime, 1945 be64_to_cpu(lvb->lvb_iatime_packed)); 1946 ocfs2_unpack_timespec(&inode->i_mtime, 1947 be64_to_cpu(lvb->lvb_imtime_packed)); 1948 ocfs2_unpack_timespec(&inode->i_ctime, 1949 be64_to_cpu(lvb->lvb_ictime_packed)); 1950 spin_unlock(&oi->ip_lock); 1951 1952 mlog_exit_void(); 1953 } 1954 1955 static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode, 1956 struct ocfs2_lock_res *lockres) 1957 { 1958 struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 1959 1960 if (lvb->lvb_version == OCFS2_LVB_VERSION 1961 && be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation) 1962 return 1; 1963 return 0; 1964 } 1965 1966 /* Determine whether a lock resource needs to be refreshed, and 1967 * arbitrate who gets to refresh it. 1968 * 1969 * 0 means no refresh needed. 1970 * 1971 * > 0 means you need to refresh this and you MUST call 1972 * ocfs2_complete_lock_res_refresh afterwards. */ 1973 static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres) 1974 { 1975 unsigned long flags; 1976 int status = 0; 1977 1978 mlog_entry_void(); 1979 1980 refresh_check: 1981 spin_lock_irqsave(&lockres->l_lock, flags); 1982 if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) { 1983 spin_unlock_irqrestore(&lockres->l_lock, flags); 1984 goto bail; 1985 } 1986 1987 if (lockres->l_flags & OCFS2_LOCK_REFRESHING) { 1988 spin_unlock_irqrestore(&lockres->l_lock, flags); 1989 1990 ocfs2_wait_on_refreshing_lock(lockres); 1991 goto refresh_check; 1992 } 1993 1994 /* Ok, I'll be the one to refresh this lock. */ 1995 lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING); 1996 spin_unlock_irqrestore(&lockres->l_lock, flags); 1997 1998 status = 1; 1999 bail: 2000 mlog_exit(status); 2001 return status; 2002 } 2003 2004 /* If status is non zero, I'll mark it as not being in refresh 2005 * anymroe, but i won't clear the needs refresh flag. */ 2006 static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres, 2007 int status) 2008 { 2009 unsigned long flags; 2010 mlog_entry_void(); 2011 2012 spin_lock_irqsave(&lockres->l_lock, flags); 2013 lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING); 2014 if (!status) 2015 lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH); 2016 spin_unlock_irqrestore(&lockres->l_lock, flags); 2017 2018 wake_up(&lockres->l_event); 2019 2020 mlog_exit_void(); 2021 } 2022 2023 /* may or may not return a bh if it went to disk. */ 2024 static int ocfs2_inode_lock_update(struct inode *inode, 2025 struct buffer_head **bh) 2026 { 2027 int status = 0; 2028 struct ocfs2_inode_info *oi = OCFS2_I(inode); 2029 struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres; 2030 struct ocfs2_dinode *fe; 2031 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2032 2033 mlog_entry_void(); 2034 2035 if (ocfs2_mount_local(osb)) 2036 goto bail; 2037 2038 spin_lock(&oi->ip_lock); 2039 if (oi->ip_flags & OCFS2_INODE_DELETED) { 2040 mlog(0, "Orphaned inode %llu was deleted while we " 2041 "were waiting on a lock. ip_flags = 0x%x\n", 2042 (unsigned long long)oi->ip_blkno, oi->ip_flags); 2043 spin_unlock(&oi->ip_lock); 2044 status = -ENOENT; 2045 goto bail; 2046 } 2047 spin_unlock(&oi->ip_lock); 2048 2049 if (!ocfs2_should_refresh_lock_res(lockres)) 2050 goto bail; 2051 2052 /* This will discard any caching information we might have had 2053 * for the inode metadata. */ 2054 ocfs2_metadata_cache_purge(inode); 2055 2056 ocfs2_extent_map_trunc(inode, 0); 2057 2058 if (ocfs2_meta_lvb_is_trustable(inode, lockres)) { 2059 mlog(0, "Trusting LVB on inode %llu\n", 2060 (unsigned long long)oi->ip_blkno); 2061 ocfs2_refresh_inode_from_lvb(inode); 2062 } else { 2063 /* Boo, we have to go to disk. */ 2064 /* read bh, cast, ocfs2_refresh_inode */ 2065 status = ocfs2_read_inode_block(inode, bh); 2066 if (status < 0) { 2067 mlog_errno(status); 2068 goto bail_refresh; 2069 } 2070 fe = (struct ocfs2_dinode *) (*bh)->b_data; 2071 2072 /* This is a good chance to make sure we're not 2073 * locking an invalid object. ocfs2_read_inode_block() 2074 * already checked that the inode block is sane. 2075 * 2076 * We bug on a stale inode here because we checked 2077 * above whether it was wiped from disk. The wiping 2078 * node provides a guarantee that we receive that 2079 * message and can mark the inode before dropping any 2080 * locks associated with it. */ 2081 mlog_bug_on_msg(inode->i_generation != 2082 le32_to_cpu(fe->i_generation), 2083 "Invalid dinode %llu disk generation: %u " 2084 "inode->i_generation: %u\n", 2085 (unsigned long long)oi->ip_blkno, 2086 le32_to_cpu(fe->i_generation), 2087 inode->i_generation); 2088 mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) || 2089 !(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)), 2090 "Stale dinode %llu dtime: %llu flags: 0x%x\n", 2091 (unsigned long long)oi->ip_blkno, 2092 (unsigned long long)le64_to_cpu(fe->i_dtime), 2093 le32_to_cpu(fe->i_flags)); 2094 2095 ocfs2_refresh_inode(inode, fe); 2096 ocfs2_track_lock_refresh(lockres); 2097 } 2098 2099 status = 0; 2100 bail_refresh: 2101 ocfs2_complete_lock_res_refresh(lockres, status); 2102 bail: 2103 mlog_exit(status); 2104 return status; 2105 } 2106 2107 static int ocfs2_assign_bh(struct inode *inode, 2108 struct buffer_head **ret_bh, 2109 struct buffer_head *passed_bh) 2110 { 2111 int status; 2112 2113 if (passed_bh) { 2114 /* Ok, the update went to disk for us, use the 2115 * returned bh. */ 2116 *ret_bh = passed_bh; 2117 get_bh(*ret_bh); 2118 2119 return 0; 2120 } 2121 2122 status = ocfs2_read_inode_block(inode, ret_bh); 2123 if (status < 0) 2124 mlog_errno(status); 2125 2126 return status; 2127 } 2128 2129 /* 2130 * returns < 0 error if the callback will never be called, otherwise 2131 * the result of the lock will be communicated via the callback. 2132 */ 2133 int ocfs2_inode_lock_full(struct inode *inode, 2134 struct buffer_head **ret_bh, 2135 int ex, 2136 int arg_flags) 2137 { 2138 int status, level, acquired; 2139 u32 dlm_flags; 2140 struct ocfs2_lock_res *lockres = NULL; 2141 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2142 struct buffer_head *local_bh = NULL; 2143 2144 BUG_ON(!inode); 2145 2146 mlog_entry_void(); 2147 2148 mlog(0, "inode %llu, take %s META lock\n", 2149 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2150 ex ? "EXMODE" : "PRMODE"); 2151 2152 status = 0; 2153 acquired = 0; 2154 /* We'll allow faking a readonly metadata lock for 2155 * rodevices. */ 2156 if (ocfs2_is_hard_readonly(osb)) { 2157 if (ex) 2158 status = -EROFS; 2159 goto bail; 2160 } 2161 2162 if (ocfs2_mount_local(osb)) 2163 goto local; 2164 2165 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) 2166 ocfs2_wait_for_recovery(osb); 2167 2168 lockres = &OCFS2_I(inode)->ip_inode_lockres; 2169 level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2170 dlm_flags = 0; 2171 if (arg_flags & OCFS2_META_LOCK_NOQUEUE) 2172 dlm_flags |= DLM_LKF_NOQUEUE; 2173 2174 status = ocfs2_cluster_lock(osb, lockres, level, dlm_flags, arg_flags); 2175 if (status < 0) { 2176 if (status != -EAGAIN && status != -EIOCBRETRY) 2177 mlog_errno(status); 2178 goto bail; 2179 } 2180 2181 /* Notify the error cleanup path to drop the cluster lock. */ 2182 acquired = 1; 2183 2184 /* We wait twice because a node may have died while we were in 2185 * the lower dlm layers. The second time though, we've 2186 * committed to owning this lock so we don't allow signals to 2187 * abort the operation. */ 2188 if (!(arg_flags & OCFS2_META_LOCK_RECOVERY)) 2189 ocfs2_wait_for_recovery(osb); 2190 2191 local: 2192 /* 2193 * We only see this flag if we're being called from 2194 * ocfs2_read_locked_inode(). It means we're locking an inode 2195 * which hasn't been populated yet, so clear the refresh flag 2196 * and let the caller handle it. 2197 */ 2198 if (inode->i_state & I_NEW) { 2199 status = 0; 2200 if (lockres) 2201 ocfs2_complete_lock_res_refresh(lockres, 0); 2202 goto bail; 2203 } 2204 2205 /* This is fun. The caller may want a bh back, or it may 2206 * not. ocfs2_inode_lock_update definitely wants one in, but 2207 * may or may not read one, depending on what's in the 2208 * LVB. The result of all of this is that we've *only* gone to 2209 * disk if we have to, so the complexity is worthwhile. */ 2210 status = ocfs2_inode_lock_update(inode, &local_bh); 2211 if (status < 0) { 2212 if (status != -ENOENT) 2213 mlog_errno(status); 2214 goto bail; 2215 } 2216 2217 if (ret_bh) { 2218 status = ocfs2_assign_bh(inode, ret_bh, local_bh); 2219 if (status < 0) { 2220 mlog_errno(status); 2221 goto bail; 2222 } 2223 } 2224 2225 bail: 2226 if (status < 0) { 2227 if (ret_bh && (*ret_bh)) { 2228 brelse(*ret_bh); 2229 *ret_bh = NULL; 2230 } 2231 if (acquired) 2232 ocfs2_inode_unlock(inode, ex); 2233 } 2234 2235 if (local_bh) 2236 brelse(local_bh); 2237 2238 mlog_exit(status); 2239 return status; 2240 } 2241 2242 /* 2243 * This is working around a lock inversion between tasks acquiring DLM 2244 * locks while holding a page lock and the downconvert thread which 2245 * blocks dlm lock acquiry while acquiring page locks. 2246 * 2247 * ** These _with_page variantes are only intended to be called from aop 2248 * methods that hold page locks and return a very specific *positive* error 2249 * code that aop methods pass up to the VFS -- test for errors with != 0. ** 2250 * 2251 * The DLM is called such that it returns -EAGAIN if it would have 2252 * blocked waiting for the downconvert thread. In that case we unlock 2253 * our page so the downconvert thread can make progress. Once we've 2254 * done this we have to return AOP_TRUNCATED_PAGE so the aop method 2255 * that called us can bubble that back up into the VFS who will then 2256 * immediately retry the aop call. 2257 * 2258 * We do a blocking lock and immediate unlock before returning, though, so that 2259 * the lock has a great chance of being cached on this node by the time the VFS 2260 * calls back to retry the aop. This has a potential to livelock as nodes 2261 * ping locks back and forth, but that's a risk we're willing to take to avoid 2262 * the lock inversion simply. 2263 */ 2264 int ocfs2_inode_lock_with_page(struct inode *inode, 2265 struct buffer_head **ret_bh, 2266 int ex, 2267 struct page *page) 2268 { 2269 int ret; 2270 2271 ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); 2272 if (ret == -EAGAIN) { 2273 unlock_page(page); 2274 if (ocfs2_inode_lock(inode, ret_bh, ex) == 0) 2275 ocfs2_inode_unlock(inode, ex); 2276 ret = AOP_TRUNCATED_PAGE; 2277 } 2278 2279 return ret; 2280 } 2281 2282 int ocfs2_inode_lock_atime(struct inode *inode, 2283 struct vfsmount *vfsmnt, 2284 int *level) 2285 { 2286 int ret; 2287 2288 mlog_entry_void(); 2289 ret = ocfs2_inode_lock(inode, NULL, 0); 2290 if (ret < 0) { 2291 mlog_errno(ret); 2292 return ret; 2293 } 2294 2295 /* 2296 * If we should update atime, we will get EX lock, 2297 * otherwise we just get PR lock. 2298 */ 2299 if (ocfs2_should_update_atime(inode, vfsmnt)) { 2300 struct buffer_head *bh = NULL; 2301 2302 ocfs2_inode_unlock(inode, 0); 2303 ret = ocfs2_inode_lock(inode, &bh, 1); 2304 if (ret < 0) { 2305 mlog_errno(ret); 2306 return ret; 2307 } 2308 *level = 1; 2309 if (ocfs2_should_update_atime(inode, vfsmnt)) 2310 ocfs2_update_inode_atime(inode, bh); 2311 if (bh) 2312 brelse(bh); 2313 } else 2314 *level = 0; 2315 2316 mlog_exit(ret); 2317 return ret; 2318 } 2319 2320 void ocfs2_inode_unlock(struct inode *inode, 2321 int ex) 2322 { 2323 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2324 struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres; 2325 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); 2326 2327 mlog_entry_void(); 2328 2329 mlog(0, "inode %llu drop %s META lock\n", 2330 (unsigned long long)OCFS2_I(inode)->ip_blkno, 2331 ex ? "EXMODE" : "PRMODE"); 2332 2333 if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) && 2334 !ocfs2_mount_local(osb)) 2335 ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level); 2336 2337 mlog_exit_void(); 2338 } 2339 2340 int ocfs2_super_lock(struct ocfs2_super *osb, 2341 int ex) 2342 { 2343 int status = 0; 2344 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2345 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; 2346 2347 mlog_entry_void(); 2348 2349 if (ocfs2_is_hard_readonly(osb)) 2350 return -EROFS; 2351 2352 if (ocfs2_mount_local(osb)) 2353 goto bail; 2354 2355 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); 2356 if (status < 0) { 2357 mlog_errno(status); 2358 goto bail; 2359 } 2360 2361 /* The super block lock path is really in the best position to 2362 * know when resources covered by the lock need to be 2363 * refreshed, so we do it here. Of course, making sense of 2364 * everything is up to the caller :) */ 2365 status = ocfs2_should_refresh_lock_res(lockres); 2366 if (status < 0) { 2367 mlog_errno(status); 2368 goto bail; 2369 } 2370 if (status) { 2371 status = ocfs2_refresh_slot_info(osb); 2372 2373 ocfs2_complete_lock_res_refresh(lockres, status); 2374 2375 if (status < 0) 2376 mlog_errno(status); 2377 ocfs2_track_lock_refresh(lockres); 2378 } 2379 bail: 2380 mlog_exit(status); 2381 return status; 2382 } 2383 2384 void ocfs2_super_unlock(struct ocfs2_super *osb, 2385 int ex) 2386 { 2387 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2388 struct ocfs2_lock_res *lockres = &osb->osb_super_lockres; 2389 2390 if (!ocfs2_mount_local(osb)) 2391 ocfs2_cluster_unlock(osb, lockres, level); 2392 } 2393 2394 int ocfs2_rename_lock(struct ocfs2_super *osb) 2395 { 2396 int status; 2397 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; 2398 2399 if (ocfs2_is_hard_readonly(osb)) 2400 return -EROFS; 2401 2402 if (ocfs2_mount_local(osb)) 2403 return 0; 2404 2405 status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0); 2406 if (status < 0) 2407 mlog_errno(status); 2408 2409 return status; 2410 } 2411 2412 void ocfs2_rename_unlock(struct ocfs2_super *osb) 2413 { 2414 struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres; 2415 2416 if (!ocfs2_mount_local(osb)) 2417 ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX); 2418 } 2419 2420 int ocfs2_dentry_lock(struct dentry *dentry, int ex) 2421 { 2422 int ret; 2423 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2424 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 2425 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 2426 2427 BUG_ON(!dl); 2428 2429 if (ocfs2_is_hard_readonly(osb)) 2430 return -EROFS; 2431 2432 if (ocfs2_mount_local(osb)) 2433 return 0; 2434 2435 ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0); 2436 if (ret < 0) 2437 mlog_errno(ret); 2438 2439 return ret; 2440 } 2441 2442 void ocfs2_dentry_unlock(struct dentry *dentry, int ex) 2443 { 2444 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 2445 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 2446 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 2447 2448 if (!ocfs2_mount_local(osb)) 2449 ocfs2_cluster_unlock(osb, &dl->dl_lockres, level); 2450 } 2451 2452 /* Reference counting of the dlm debug structure. We want this because 2453 * open references on the debug inodes can live on after a mount, so 2454 * we can't rely on the ocfs2_super to always exist. */ 2455 static void ocfs2_dlm_debug_free(struct kref *kref) 2456 { 2457 struct ocfs2_dlm_debug *dlm_debug; 2458 2459 dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt); 2460 2461 kfree(dlm_debug); 2462 } 2463 2464 void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug) 2465 { 2466 if (dlm_debug) 2467 kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free); 2468 } 2469 2470 static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug) 2471 { 2472 kref_get(&debug->d_refcnt); 2473 } 2474 2475 struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void) 2476 { 2477 struct ocfs2_dlm_debug *dlm_debug; 2478 2479 dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL); 2480 if (!dlm_debug) { 2481 mlog_errno(-ENOMEM); 2482 goto out; 2483 } 2484 2485 kref_init(&dlm_debug->d_refcnt); 2486 INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking); 2487 dlm_debug->d_locking_state = NULL; 2488 out: 2489 return dlm_debug; 2490 } 2491 2492 /* Access to this is arbitrated for us via seq_file->sem. */ 2493 struct ocfs2_dlm_seq_priv { 2494 struct ocfs2_dlm_debug *p_dlm_debug; 2495 struct ocfs2_lock_res p_iter_res; 2496 struct ocfs2_lock_res p_tmp_res; 2497 }; 2498 2499 static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start, 2500 struct ocfs2_dlm_seq_priv *priv) 2501 { 2502 struct ocfs2_lock_res *iter, *ret = NULL; 2503 struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug; 2504 2505 assert_spin_locked(&ocfs2_dlm_tracking_lock); 2506 2507 list_for_each_entry(iter, &start->l_debug_list, l_debug_list) { 2508 /* discover the head of the list */ 2509 if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) { 2510 mlog(0, "End of list found, %p\n", ret); 2511 break; 2512 } 2513 2514 /* We track our "dummy" iteration lockres' by a NULL 2515 * l_ops field. */ 2516 if (iter->l_ops != NULL) { 2517 ret = iter; 2518 break; 2519 } 2520 } 2521 2522 return ret; 2523 } 2524 2525 static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos) 2526 { 2527 struct ocfs2_dlm_seq_priv *priv = m->private; 2528 struct ocfs2_lock_res *iter; 2529 2530 spin_lock(&ocfs2_dlm_tracking_lock); 2531 iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv); 2532 if (iter) { 2533 /* Since lockres' have the lifetime of their container 2534 * (which can be inodes, ocfs2_supers, etc) we want to 2535 * copy this out to a temporary lockres while still 2536 * under the spinlock. Obviously after this we can't 2537 * trust any pointers on the copy returned, but that's 2538 * ok as the information we want isn't typically held 2539 * in them. */ 2540 priv->p_tmp_res = *iter; 2541 iter = &priv->p_tmp_res; 2542 } 2543 spin_unlock(&ocfs2_dlm_tracking_lock); 2544 2545 return iter; 2546 } 2547 2548 static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v) 2549 { 2550 } 2551 2552 static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos) 2553 { 2554 struct ocfs2_dlm_seq_priv *priv = m->private; 2555 struct ocfs2_lock_res *iter = v; 2556 struct ocfs2_lock_res *dummy = &priv->p_iter_res; 2557 2558 spin_lock(&ocfs2_dlm_tracking_lock); 2559 iter = ocfs2_dlm_next_res(iter, priv); 2560 list_del_init(&dummy->l_debug_list); 2561 if (iter) { 2562 list_add(&dummy->l_debug_list, &iter->l_debug_list); 2563 priv->p_tmp_res = *iter; 2564 iter = &priv->p_tmp_res; 2565 } 2566 spin_unlock(&ocfs2_dlm_tracking_lock); 2567 2568 return iter; 2569 } 2570 2571 /* So that debugfs.ocfs2 can determine which format is being used */ 2572 #define OCFS2_DLM_DEBUG_STR_VERSION 2 2573 static int ocfs2_dlm_seq_show(struct seq_file *m, void *v) 2574 { 2575 int i; 2576 char *lvb; 2577 struct ocfs2_lock_res *lockres = v; 2578 2579 if (!lockres) 2580 return -EINVAL; 2581 2582 seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION); 2583 2584 if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY) 2585 seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1, 2586 lockres->l_name, 2587 (unsigned int)ocfs2_get_dentry_lock_ino(lockres)); 2588 else 2589 seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name); 2590 2591 seq_printf(m, "%d\t" 2592 "0x%lx\t" 2593 "0x%x\t" 2594 "0x%x\t" 2595 "%u\t" 2596 "%u\t" 2597 "%d\t" 2598 "%d\t", 2599 lockres->l_level, 2600 lockres->l_flags, 2601 lockres->l_action, 2602 lockres->l_unlock_action, 2603 lockres->l_ro_holders, 2604 lockres->l_ex_holders, 2605 lockres->l_requested, 2606 lockres->l_blocking); 2607 2608 /* Dump the raw LVB */ 2609 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 2610 for(i = 0; i < DLM_LVB_LEN; i++) 2611 seq_printf(m, "0x%x\t", lvb[i]); 2612 2613 #ifdef CONFIG_OCFS2_FS_STATS 2614 # define lock_num_prmode(_l) (_l)->l_lock_num_prmode 2615 # define lock_num_exmode(_l) (_l)->l_lock_num_exmode 2616 # define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed 2617 # define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed 2618 # define lock_total_prmode(_l) (_l)->l_lock_total_prmode 2619 # define lock_total_exmode(_l) (_l)->l_lock_total_exmode 2620 # define lock_max_prmode(_l) (_l)->l_lock_max_prmode 2621 # define lock_max_exmode(_l) (_l)->l_lock_max_exmode 2622 # define lock_refresh(_l) (_l)->l_lock_refresh 2623 #else 2624 # define lock_num_prmode(_l) (0ULL) 2625 # define lock_num_exmode(_l) (0ULL) 2626 # define lock_num_prmode_failed(_l) (0) 2627 # define lock_num_exmode_failed(_l) (0) 2628 # define lock_total_prmode(_l) (0ULL) 2629 # define lock_total_exmode(_l) (0ULL) 2630 # define lock_max_prmode(_l) (0) 2631 # define lock_max_exmode(_l) (0) 2632 # define lock_refresh(_l) (0) 2633 #endif 2634 /* The following seq_print was added in version 2 of this output */ 2635 seq_printf(m, "%llu\t" 2636 "%llu\t" 2637 "%u\t" 2638 "%u\t" 2639 "%llu\t" 2640 "%llu\t" 2641 "%u\t" 2642 "%u\t" 2643 "%u\t", 2644 lock_num_prmode(lockres), 2645 lock_num_exmode(lockres), 2646 lock_num_prmode_failed(lockres), 2647 lock_num_exmode_failed(lockres), 2648 lock_total_prmode(lockres), 2649 lock_total_exmode(lockres), 2650 lock_max_prmode(lockres), 2651 lock_max_exmode(lockres), 2652 lock_refresh(lockres)); 2653 2654 /* End the line */ 2655 seq_printf(m, "\n"); 2656 return 0; 2657 } 2658 2659 static const struct seq_operations ocfs2_dlm_seq_ops = { 2660 .start = ocfs2_dlm_seq_start, 2661 .stop = ocfs2_dlm_seq_stop, 2662 .next = ocfs2_dlm_seq_next, 2663 .show = ocfs2_dlm_seq_show, 2664 }; 2665 2666 static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file) 2667 { 2668 struct seq_file *seq = (struct seq_file *) file->private_data; 2669 struct ocfs2_dlm_seq_priv *priv = seq->private; 2670 struct ocfs2_lock_res *res = &priv->p_iter_res; 2671 2672 ocfs2_remove_lockres_tracking(res); 2673 ocfs2_put_dlm_debug(priv->p_dlm_debug); 2674 return seq_release_private(inode, file); 2675 } 2676 2677 static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file) 2678 { 2679 int ret; 2680 struct ocfs2_dlm_seq_priv *priv; 2681 struct seq_file *seq; 2682 struct ocfs2_super *osb; 2683 2684 priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL); 2685 if (!priv) { 2686 ret = -ENOMEM; 2687 mlog_errno(ret); 2688 goto out; 2689 } 2690 osb = inode->i_private; 2691 ocfs2_get_dlm_debug(osb->osb_dlm_debug); 2692 priv->p_dlm_debug = osb->osb_dlm_debug; 2693 INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list); 2694 2695 ret = seq_open(file, &ocfs2_dlm_seq_ops); 2696 if (ret) { 2697 kfree(priv); 2698 mlog_errno(ret); 2699 goto out; 2700 } 2701 2702 seq = (struct seq_file *) file->private_data; 2703 seq->private = priv; 2704 2705 ocfs2_add_lockres_tracking(&priv->p_iter_res, 2706 priv->p_dlm_debug); 2707 2708 out: 2709 return ret; 2710 } 2711 2712 static const struct file_operations ocfs2_dlm_debug_fops = { 2713 .open = ocfs2_dlm_debug_open, 2714 .release = ocfs2_dlm_debug_release, 2715 .read = seq_read, 2716 .llseek = seq_lseek, 2717 }; 2718 2719 static int ocfs2_dlm_init_debug(struct ocfs2_super *osb) 2720 { 2721 int ret = 0; 2722 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug; 2723 2724 dlm_debug->d_locking_state = debugfs_create_file("locking_state", 2725 S_IFREG|S_IRUSR, 2726 osb->osb_debug_root, 2727 osb, 2728 &ocfs2_dlm_debug_fops); 2729 if (!dlm_debug->d_locking_state) { 2730 ret = -EINVAL; 2731 mlog(ML_ERROR, 2732 "Unable to create locking state debugfs file.\n"); 2733 goto out; 2734 } 2735 2736 ocfs2_get_dlm_debug(dlm_debug); 2737 out: 2738 return ret; 2739 } 2740 2741 static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb) 2742 { 2743 struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug; 2744 2745 if (dlm_debug) { 2746 debugfs_remove(dlm_debug->d_locking_state); 2747 ocfs2_put_dlm_debug(dlm_debug); 2748 } 2749 } 2750 2751 int ocfs2_dlm_init(struct ocfs2_super *osb) 2752 { 2753 int status = 0; 2754 struct ocfs2_cluster_connection *conn = NULL; 2755 2756 mlog_entry_void(); 2757 2758 if (ocfs2_mount_local(osb)) { 2759 osb->node_num = 0; 2760 goto local; 2761 } 2762 2763 status = ocfs2_dlm_init_debug(osb); 2764 if (status < 0) { 2765 mlog_errno(status); 2766 goto bail; 2767 } 2768 2769 /* launch downconvert thread */ 2770 osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc"); 2771 if (IS_ERR(osb->dc_task)) { 2772 status = PTR_ERR(osb->dc_task); 2773 osb->dc_task = NULL; 2774 mlog_errno(status); 2775 goto bail; 2776 } 2777 2778 /* for now, uuid == domain */ 2779 status = ocfs2_cluster_connect(osb->osb_cluster_stack, 2780 osb->uuid_str, 2781 strlen(osb->uuid_str), 2782 ocfs2_do_node_down, osb, 2783 &conn); 2784 if (status) { 2785 mlog_errno(status); 2786 goto bail; 2787 } 2788 2789 status = ocfs2_cluster_this_node(&osb->node_num); 2790 if (status < 0) { 2791 mlog_errno(status); 2792 mlog(ML_ERROR, 2793 "could not find this host's node number\n"); 2794 ocfs2_cluster_disconnect(conn, 0); 2795 goto bail; 2796 } 2797 2798 local: 2799 ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb); 2800 ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb); 2801 2802 osb->cconn = conn; 2803 2804 status = 0; 2805 bail: 2806 if (status < 0) { 2807 ocfs2_dlm_shutdown_debug(osb); 2808 if (osb->dc_task) 2809 kthread_stop(osb->dc_task); 2810 } 2811 2812 mlog_exit(status); 2813 return status; 2814 } 2815 2816 void ocfs2_dlm_shutdown(struct ocfs2_super *osb, 2817 int hangup_pending) 2818 { 2819 mlog_entry_void(); 2820 2821 ocfs2_drop_osb_locks(osb); 2822 2823 /* 2824 * Now that we have dropped all locks and ocfs2_dismount_volume() 2825 * has disabled recovery, the DLM won't be talking to us. It's 2826 * safe to tear things down before disconnecting the cluster. 2827 */ 2828 2829 if (osb->dc_task) { 2830 kthread_stop(osb->dc_task); 2831 osb->dc_task = NULL; 2832 } 2833 2834 ocfs2_lock_res_free(&osb->osb_super_lockres); 2835 ocfs2_lock_res_free(&osb->osb_rename_lockres); 2836 2837 ocfs2_cluster_disconnect(osb->cconn, hangup_pending); 2838 osb->cconn = NULL; 2839 2840 ocfs2_dlm_shutdown_debug(osb); 2841 2842 mlog_exit_void(); 2843 } 2844 2845 static void ocfs2_unlock_ast(void *opaque, int error) 2846 { 2847 struct ocfs2_lock_res *lockres = opaque; 2848 unsigned long flags; 2849 2850 mlog_entry_void(); 2851 2852 mlog(0, "UNLOCK AST called on lock %s, action = %d\n", lockres->l_name, 2853 lockres->l_unlock_action); 2854 2855 spin_lock_irqsave(&lockres->l_lock, flags); 2856 if (error) { 2857 mlog(ML_ERROR, "Dlm passes error %d for lock %s, " 2858 "unlock_action %d\n", error, lockres->l_name, 2859 lockres->l_unlock_action); 2860 spin_unlock_irqrestore(&lockres->l_lock, flags); 2861 return; 2862 } 2863 2864 switch(lockres->l_unlock_action) { 2865 case OCFS2_UNLOCK_CANCEL_CONVERT: 2866 mlog(0, "Cancel convert success for %s\n", lockres->l_name); 2867 lockres->l_action = OCFS2_AST_INVALID; 2868 /* Downconvert thread may have requeued this lock, we 2869 * need to wake it. */ 2870 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) 2871 ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres)); 2872 break; 2873 case OCFS2_UNLOCK_DROP_LOCK: 2874 lockres->l_level = DLM_LOCK_IV; 2875 break; 2876 default: 2877 BUG(); 2878 } 2879 2880 lockres_clear_flags(lockres, OCFS2_LOCK_BUSY); 2881 lockres->l_unlock_action = OCFS2_UNLOCK_INVALID; 2882 wake_up(&lockres->l_event); 2883 spin_unlock_irqrestore(&lockres->l_lock, flags); 2884 2885 mlog_exit_void(); 2886 } 2887 2888 static int ocfs2_drop_lock(struct ocfs2_super *osb, 2889 struct ocfs2_lock_res *lockres) 2890 { 2891 int ret; 2892 unsigned long flags; 2893 u32 lkm_flags = 0; 2894 2895 /* We didn't get anywhere near actually using this lockres. */ 2896 if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) 2897 goto out; 2898 2899 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) 2900 lkm_flags |= DLM_LKF_VALBLK; 2901 2902 spin_lock_irqsave(&lockres->l_lock, flags); 2903 2904 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING), 2905 "lockres %s, flags 0x%lx\n", 2906 lockres->l_name, lockres->l_flags); 2907 2908 while (lockres->l_flags & OCFS2_LOCK_BUSY) { 2909 mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = " 2910 "%u, unlock_action = %u\n", 2911 lockres->l_name, lockres->l_flags, lockres->l_action, 2912 lockres->l_unlock_action); 2913 2914 spin_unlock_irqrestore(&lockres->l_lock, flags); 2915 2916 /* XXX: Today we just wait on any busy 2917 * locks... Perhaps we need to cancel converts in the 2918 * future? */ 2919 ocfs2_wait_on_busy_lock(lockres); 2920 2921 spin_lock_irqsave(&lockres->l_lock, flags); 2922 } 2923 2924 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { 2925 if (lockres->l_flags & OCFS2_LOCK_ATTACHED && 2926 lockres->l_level == DLM_LOCK_EX && 2927 !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) 2928 lockres->l_ops->set_lvb(lockres); 2929 } 2930 2931 if (lockres->l_flags & OCFS2_LOCK_BUSY) 2932 mlog(ML_ERROR, "destroying busy lock: \"%s\"\n", 2933 lockres->l_name); 2934 if (lockres->l_flags & OCFS2_LOCK_BLOCKED) 2935 mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name); 2936 2937 if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) { 2938 spin_unlock_irqrestore(&lockres->l_lock, flags); 2939 goto out; 2940 } 2941 2942 lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED); 2943 2944 /* make sure we never get here while waiting for an ast to 2945 * fire. */ 2946 BUG_ON(lockres->l_action != OCFS2_AST_INVALID); 2947 2948 /* is this necessary? */ 2949 lockres_or_flags(lockres, OCFS2_LOCK_BUSY); 2950 lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK; 2951 spin_unlock_irqrestore(&lockres->l_lock, flags); 2952 2953 mlog(0, "lock %s\n", lockres->l_name); 2954 2955 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags, 2956 lockres); 2957 if (ret) { 2958 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); 2959 mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags); 2960 ocfs2_dlm_dump_lksb(&lockres->l_lksb); 2961 BUG(); 2962 } 2963 mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n", 2964 lockres->l_name); 2965 2966 ocfs2_wait_on_busy_lock(lockres); 2967 out: 2968 mlog_exit(0); 2969 return 0; 2970 } 2971 2972 /* Mark the lockres as being dropped. It will no longer be 2973 * queued if blocking, but we still may have to wait on it 2974 * being dequeued from the downconvert thread before we can consider 2975 * it safe to drop. 2976 * 2977 * You can *not* attempt to call cluster_lock on this lockres anymore. */ 2978 void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres) 2979 { 2980 int status; 2981 struct ocfs2_mask_waiter mw; 2982 unsigned long flags; 2983 2984 ocfs2_init_mask_waiter(&mw); 2985 2986 spin_lock_irqsave(&lockres->l_lock, flags); 2987 lockres->l_flags |= OCFS2_LOCK_FREEING; 2988 while (lockres->l_flags & OCFS2_LOCK_QUEUED) { 2989 lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0); 2990 spin_unlock_irqrestore(&lockres->l_lock, flags); 2991 2992 mlog(0, "Waiting on lockres %s\n", lockres->l_name); 2993 2994 status = ocfs2_wait_for_mask(&mw); 2995 if (status) 2996 mlog_errno(status); 2997 2998 spin_lock_irqsave(&lockres->l_lock, flags); 2999 } 3000 spin_unlock_irqrestore(&lockres->l_lock, flags); 3001 } 3002 3003 void ocfs2_simple_drop_lockres(struct ocfs2_super *osb, 3004 struct ocfs2_lock_res *lockres) 3005 { 3006 int ret; 3007 3008 ocfs2_mark_lockres_freeing(lockres); 3009 ret = ocfs2_drop_lock(osb, lockres); 3010 if (ret) 3011 mlog_errno(ret); 3012 } 3013 3014 static void ocfs2_drop_osb_locks(struct ocfs2_super *osb) 3015 { 3016 ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres); 3017 ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres); 3018 } 3019 3020 int ocfs2_drop_inode_locks(struct inode *inode) 3021 { 3022 int status, err; 3023 3024 mlog_entry_void(); 3025 3026 /* No need to call ocfs2_mark_lockres_freeing here - 3027 * ocfs2_clear_inode has done it for us. */ 3028 3029 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), 3030 &OCFS2_I(inode)->ip_open_lockres); 3031 if (err < 0) 3032 mlog_errno(err); 3033 3034 status = err; 3035 3036 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), 3037 &OCFS2_I(inode)->ip_inode_lockres); 3038 if (err < 0) 3039 mlog_errno(err); 3040 if (err < 0 && !status) 3041 status = err; 3042 3043 err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb), 3044 &OCFS2_I(inode)->ip_rw_lockres); 3045 if (err < 0) 3046 mlog_errno(err); 3047 if (err < 0 && !status) 3048 status = err; 3049 3050 mlog_exit(status); 3051 return status; 3052 } 3053 3054 static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres, 3055 int new_level) 3056 { 3057 assert_spin_locked(&lockres->l_lock); 3058 3059 BUG_ON(lockres->l_blocking <= DLM_LOCK_NL); 3060 3061 if (lockres->l_level <= new_level) { 3062 mlog(ML_ERROR, "lockres->l_level (%d) <= new_level (%d)\n", 3063 lockres->l_level, new_level); 3064 BUG(); 3065 } 3066 3067 mlog(0, "lock %s, new_level = %d, l_blocking = %d\n", 3068 lockres->l_name, new_level, lockres->l_blocking); 3069 3070 lockres->l_action = OCFS2_AST_DOWNCONVERT; 3071 lockres->l_requested = new_level; 3072 lockres_or_flags(lockres, OCFS2_LOCK_BUSY); 3073 return lockres_set_pending(lockres); 3074 } 3075 3076 static int ocfs2_downconvert_lock(struct ocfs2_super *osb, 3077 struct ocfs2_lock_res *lockres, 3078 int new_level, 3079 int lvb, 3080 unsigned int generation) 3081 { 3082 int ret; 3083 u32 dlm_flags = DLM_LKF_CONVERT; 3084 3085 mlog_entry_void(); 3086 3087 if (lvb) 3088 dlm_flags |= DLM_LKF_VALBLK; 3089 3090 ret = ocfs2_dlm_lock(osb->cconn, 3091 new_level, 3092 &lockres->l_lksb, 3093 dlm_flags, 3094 lockres->l_name, 3095 OCFS2_LOCK_ID_MAX_LEN - 1, 3096 lockres); 3097 lockres_clear_pending(lockres, generation, osb); 3098 if (ret) { 3099 ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres); 3100 ocfs2_recover_from_dlm_error(lockres, 1); 3101 goto bail; 3102 } 3103 3104 ret = 0; 3105 bail: 3106 mlog_exit(ret); 3107 return ret; 3108 } 3109 3110 /* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */ 3111 static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb, 3112 struct ocfs2_lock_res *lockres) 3113 { 3114 assert_spin_locked(&lockres->l_lock); 3115 3116 mlog_entry_void(); 3117 mlog(0, "lock %s\n", lockres->l_name); 3118 3119 if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) { 3120 /* If we're already trying to cancel a lock conversion 3121 * then just drop the spinlock and allow the caller to 3122 * requeue this lock. */ 3123 3124 mlog(0, "Lockres %s, skip convert\n", lockres->l_name); 3125 return 0; 3126 } 3127 3128 /* were we in a convert when we got the bast fire? */ 3129 BUG_ON(lockres->l_action != OCFS2_AST_CONVERT && 3130 lockres->l_action != OCFS2_AST_DOWNCONVERT); 3131 /* set things up for the unlockast to know to just 3132 * clear out the ast_action and unset busy, etc. */ 3133 lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT; 3134 3135 mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY), 3136 "lock %s, invalid flags: 0x%lx\n", 3137 lockres->l_name, lockres->l_flags); 3138 3139 return 1; 3140 } 3141 3142 static int ocfs2_cancel_convert(struct ocfs2_super *osb, 3143 struct ocfs2_lock_res *lockres) 3144 { 3145 int ret; 3146 3147 mlog_entry_void(); 3148 mlog(0, "lock %s\n", lockres->l_name); 3149 3150 ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, 3151 DLM_LKF_CANCEL, lockres); 3152 if (ret) { 3153 ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres); 3154 ocfs2_recover_from_dlm_error(lockres, 0); 3155 } 3156 3157 mlog(0, "lock %s return from ocfs2_dlm_unlock\n", lockres->l_name); 3158 3159 mlog_exit(ret); 3160 return ret; 3161 } 3162 3163 static int ocfs2_unblock_lock(struct ocfs2_super *osb, 3164 struct ocfs2_lock_res *lockres, 3165 struct ocfs2_unblock_ctl *ctl) 3166 { 3167 unsigned long flags; 3168 int blocking; 3169 int new_level; 3170 int ret = 0; 3171 int set_lvb = 0; 3172 unsigned int gen; 3173 3174 mlog_entry_void(); 3175 3176 spin_lock_irqsave(&lockres->l_lock, flags); 3177 3178 BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED)); 3179 3180 recheck: 3181 if (lockres->l_flags & OCFS2_LOCK_BUSY) { 3182 /* XXX 3183 * This is a *big* race. The OCFS2_LOCK_PENDING flag 3184 * exists entirely for one reason - another thread has set 3185 * OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock(). 3186 * 3187 * If we do ocfs2_cancel_convert() before the other thread 3188 * calls dlm_lock(), our cancel will do nothing. We will 3189 * get no ast, and we will have no way of knowing the 3190 * cancel failed. Meanwhile, the other thread will call 3191 * into dlm_lock() and wait...forever. 3192 * 3193 * Why forever? Because another node has asked for the 3194 * lock first; that's why we're here in unblock_lock(). 3195 * 3196 * The solution is OCFS2_LOCK_PENDING. When PENDING is 3197 * set, we just requeue the unblock. Only when the other 3198 * thread has called dlm_lock() and cleared PENDING will 3199 * we then cancel their request. 3200 * 3201 * All callers of dlm_lock() must set OCFS2_DLM_PENDING 3202 * at the same time they set OCFS2_DLM_BUSY. They must 3203 * clear OCFS2_DLM_PENDING after dlm_lock() returns. 3204 */ 3205 if (lockres->l_flags & OCFS2_LOCK_PENDING) 3206 goto leave_requeue; 3207 3208 ctl->requeue = 1; 3209 ret = ocfs2_prepare_cancel_convert(osb, lockres); 3210 spin_unlock_irqrestore(&lockres->l_lock, flags); 3211 if (ret) { 3212 ret = ocfs2_cancel_convert(osb, lockres); 3213 if (ret < 0) 3214 mlog_errno(ret); 3215 } 3216 goto leave; 3217 } 3218 3219 /* if we're blocking an exclusive and we have *any* holders, 3220 * then requeue. */ 3221 if ((lockres->l_blocking == DLM_LOCK_EX) 3222 && (lockres->l_ex_holders || lockres->l_ro_holders)) 3223 goto leave_requeue; 3224 3225 /* If it's a PR we're blocking, then only 3226 * requeue if we've got any EX holders */ 3227 if (lockres->l_blocking == DLM_LOCK_PR && 3228 lockres->l_ex_holders) 3229 goto leave_requeue; 3230 3231 /* 3232 * Can we get a lock in this state if the holder counts are 3233 * zero? The meta data unblock code used to check this. 3234 */ 3235 if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH) 3236 && (lockres->l_flags & OCFS2_LOCK_REFRESHING)) 3237 goto leave_requeue; 3238 3239 new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking); 3240 3241 if (lockres->l_ops->check_downconvert 3242 && !lockres->l_ops->check_downconvert(lockres, new_level)) 3243 goto leave_requeue; 3244 3245 /* If we get here, then we know that there are no more 3246 * incompatible holders (and anyone asking for an incompatible 3247 * lock is blocked). We can now downconvert the lock */ 3248 if (!lockres->l_ops->downconvert_worker) 3249 goto downconvert; 3250 3251 /* Some lockres types want to do a bit of work before 3252 * downconverting a lock. Allow that here. The worker function 3253 * may sleep, so we save off a copy of what we're blocking as 3254 * it may change while we're not holding the spin lock. */ 3255 blocking = lockres->l_blocking; 3256 spin_unlock_irqrestore(&lockres->l_lock, flags); 3257 3258 ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking); 3259 3260 if (ctl->unblock_action == UNBLOCK_STOP_POST) 3261 goto leave; 3262 3263 spin_lock_irqsave(&lockres->l_lock, flags); 3264 if (blocking != lockres->l_blocking) { 3265 /* If this changed underneath us, then we can't drop 3266 * it just yet. */ 3267 goto recheck; 3268 } 3269 3270 downconvert: 3271 ctl->requeue = 0; 3272 3273 if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) { 3274 if (lockres->l_level == DLM_LOCK_EX) 3275 set_lvb = 1; 3276 3277 /* 3278 * We only set the lvb if the lock has been fully 3279 * refreshed - otherwise we risk setting stale 3280 * data. Otherwise, there's no need to actually clear 3281 * out the lvb here as it's value is still valid. 3282 */ 3283 if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) 3284 lockres->l_ops->set_lvb(lockres); 3285 } 3286 3287 gen = ocfs2_prepare_downconvert(lockres, new_level); 3288 spin_unlock_irqrestore(&lockres->l_lock, flags); 3289 ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb, 3290 gen); 3291 3292 leave: 3293 mlog_exit(ret); 3294 return ret; 3295 3296 leave_requeue: 3297 spin_unlock_irqrestore(&lockres->l_lock, flags); 3298 ctl->requeue = 1; 3299 3300 mlog_exit(0); 3301 return 0; 3302 } 3303 3304 static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, 3305 int blocking) 3306 { 3307 struct inode *inode; 3308 struct address_space *mapping; 3309 3310 inode = ocfs2_lock_res_inode(lockres); 3311 mapping = inode->i_mapping; 3312 3313 if (!S_ISREG(inode->i_mode)) 3314 goto out; 3315 3316 /* 3317 * We need this before the filemap_fdatawrite() so that it can 3318 * transfer the dirty bit from the PTE to the 3319 * page. Unfortunately this means that even for EX->PR 3320 * downconverts, we'll lose our mappings and have to build 3321 * them up again. 3322 */ 3323 unmap_mapping_range(mapping, 0, 0, 0); 3324 3325 if (filemap_fdatawrite(mapping)) { 3326 mlog(ML_ERROR, "Could not sync inode %llu for downconvert!", 3327 (unsigned long long)OCFS2_I(inode)->ip_blkno); 3328 } 3329 sync_mapping_buffers(mapping); 3330 if (blocking == DLM_LOCK_EX) { 3331 truncate_inode_pages(mapping, 0); 3332 } else { 3333 /* We only need to wait on the I/O if we're not also 3334 * truncating pages because truncate_inode_pages waits 3335 * for us above. We don't truncate pages if we're 3336 * blocking anything < EXMODE because we want to keep 3337 * them around in that case. */ 3338 filemap_fdatawait(mapping); 3339 } 3340 3341 out: 3342 return UNBLOCK_CONTINUE; 3343 } 3344 3345 static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, 3346 int new_level) 3347 { 3348 struct inode *inode = ocfs2_lock_res_inode(lockres); 3349 int checkpointed = ocfs2_inode_fully_checkpointed(inode); 3350 3351 BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR); 3352 BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed); 3353 3354 if (checkpointed) 3355 return 1; 3356 3357 ocfs2_start_checkpoint(OCFS2_SB(inode->i_sb)); 3358 return 0; 3359 } 3360 3361 static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres) 3362 { 3363 struct inode *inode = ocfs2_lock_res_inode(lockres); 3364 3365 __ocfs2_stuff_meta_lvb(inode); 3366 } 3367 3368 /* 3369 * Does the final reference drop on our dentry lock. Right now this 3370 * happens in the downconvert thread, but we could choose to simplify the 3371 * dlmglue API and push these off to the ocfs2_wq in the future. 3372 */ 3373 static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb, 3374 struct ocfs2_lock_res *lockres) 3375 { 3376 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); 3377 ocfs2_dentry_lock_put(osb, dl); 3378 } 3379 3380 /* 3381 * d_delete() matching dentries before the lock downconvert. 3382 * 3383 * At this point, any process waiting to destroy the 3384 * dentry_lock due to last ref count is stopped by the 3385 * OCFS2_LOCK_QUEUED flag. 3386 * 3387 * We have two potential problems 3388 * 3389 * 1) If we do the last reference drop on our dentry_lock (via dput) 3390 * we'll wind up in ocfs2_release_dentry_lock(), waiting on 3391 * the downconvert to finish. Instead we take an elevated 3392 * reference and push the drop until after we've completed our 3393 * unblock processing. 3394 * 3395 * 2) There might be another process with a final reference, 3396 * waiting on us to finish processing. If this is the case, we 3397 * detect it and exit out - there's no more dentries anyway. 3398 */ 3399 static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres, 3400 int blocking) 3401 { 3402 struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres); 3403 struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode); 3404 struct dentry *dentry; 3405 unsigned long flags; 3406 int extra_ref = 0; 3407 3408 /* 3409 * This node is blocking another node from getting a read 3410 * lock. This happens when we've renamed within a 3411 * directory. We've forced the other nodes to d_delete(), but 3412 * we never actually dropped our lock because it's still 3413 * valid. The downconvert code will retain a PR for this node, 3414 * so there's no further work to do. 3415 */ 3416 if (blocking == DLM_LOCK_PR) 3417 return UNBLOCK_CONTINUE; 3418 3419 /* 3420 * Mark this inode as potentially orphaned. The code in 3421 * ocfs2_delete_inode() will figure out whether it actually 3422 * needs to be freed or not. 3423 */ 3424 spin_lock(&oi->ip_lock); 3425 oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED; 3426 spin_unlock(&oi->ip_lock); 3427 3428 /* 3429 * Yuck. We need to make sure however that the check of 3430 * OCFS2_LOCK_FREEING and the extra reference are atomic with 3431 * respect to a reference decrement or the setting of that 3432 * flag. 3433 */ 3434 spin_lock_irqsave(&lockres->l_lock, flags); 3435 spin_lock(&dentry_attach_lock); 3436 if (!(lockres->l_flags & OCFS2_LOCK_FREEING) 3437 && dl->dl_count) { 3438 dl->dl_count++; 3439 extra_ref = 1; 3440 } 3441 spin_unlock(&dentry_attach_lock); 3442 spin_unlock_irqrestore(&lockres->l_lock, flags); 3443 3444 mlog(0, "extra_ref = %d\n", extra_ref); 3445 3446 /* 3447 * We have a process waiting on us in ocfs2_dentry_iput(), 3448 * which means we can't have any more outstanding 3449 * aliases. There's no need to do any more work. 3450 */ 3451 if (!extra_ref) 3452 return UNBLOCK_CONTINUE; 3453 3454 spin_lock(&dentry_attach_lock); 3455 while (1) { 3456 dentry = ocfs2_find_local_alias(dl->dl_inode, 3457 dl->dl_parent_blkno, 1); 3458 if (!dentry) 3459 break; 3460 spin_unlock(&dentry_attach_lock); 3461 3462 mlog(0, "d_delete(%.*s);\n", dentry->d_name.len, 3463 dentry->d_name.name); 3464 3465 /* 3466 * The following dcache calls may do an 3467 * iput(). Normally we don't want that from the 3468 * downconverting thread, but in this case it's ok 3469 * because the requesting node already has an 3470 * exclusive lock on the inode, so it can't be queued 3471 * for a downconvert. 3472 */ 3473 d_delete(dentry); 3474 dput(dentry); 3475 3476 spin_lock(&dentry_attach_lock); 3477 } 3478 spin_unlock(&dentry_attach_lock); 3479 3480 /* 3481 * If we are the last holder of this dentry lock, there is no 3482 * reason to downconvert so skip straight to the unlock. 3483 */ 3484 if (dl->dl_count == 1) 3485 return UNBLOCK_STOP_POST; 3486 3487 return UNBLOCK_CONTINUE_POST; 3488 } 3489 3490 static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres) 3491 { 3492 struct ocfs2_qinfo_lvb *lvb; 3493 struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres); 3494 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, 3495 oinfo->dqi_gi.dqi_type); 3496 3497 mlog_entry_void(); 3498 3499 lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 3500 lvb->lvb_version = OCFS2_QINFO_LVB_VERSION; 3501 lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace); 3502 lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace); 3503 lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms); 3504 lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks); 3505 lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk); 3506 lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry); 3507 3508 mlog_exit_void(); 3509 } 3510 3511 void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex) 3512 { 3513 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; 3514 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); 3515 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 3516 3517 mlog_entry_void(); 3518 if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) 3519 ocfs2_cluster_unlock(osb, lockres, level); 3520 mlog_exit_void(); 3521 } 3522 3523 static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo) 3524 { 3525 struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb, 3526 oinfo->dqi_gi.dqi_type); 3527 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; 3528 struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb); 3529 struct buffer_head *bh = NULL; 3530 struct ocfs2_global_disk_dqinfo *gdinfo; 3531 int status = 0; 3532 3533 if (lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) { 3534 info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace); 3535 info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace); 3536 oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms); 3537 oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks); 3538 oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk); 3539 oinfo->dqi_gi.dqi_free_entry = 3540 be32_to_cpu(lvb->lvb_free_entry); 3541 } else { 3542 status = ocfs2_read_quota_block(oinfo->dqi_gqinode, 0, &bh); 3543 if (status) { 3544 mlog_errno(status); 3545 goto bail; 3546 } 3547 gdinfo = (struct ocfs2_global_disk_dqinfo *) 3548 (bh->b_data + OCFS2_GLOBAL_INFO_OFF); 3549 info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace); 3550 info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace); 3551 oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms); 3552 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks); 3553 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk); 3554 oinfo->dqi_gi.dqi_free_entry = 3555 le32_to_cpu(gdinfo->dqi_free_entry); 3556 brelse(bh); 3557 ocfs2_track_lock_refresh(lockres); 3558 } 3559 3560 bail: 3561 return status; 3562 } 3563 3564 /* Lock quota info, this function expects at least shared lock on the quota file 3565 * so that we can safely refresh quota info from disk. */ 3566 int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex) 3567 { 3568 struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock; 3569 struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb); 3570 int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR; 3571 int status = 0; 3572 3573 mlog_entry_void(); 3574 3575 /* On RO devices, locking really isn't needed... */ 3576 if (ocfs2_is_hard_readonly(osb)) { 3577 if (ex) 3578 status = -EROFS; 3579 goto bail; 3580 } 3581 if (ocfs2_mount_local(osb)) 3582 goto bail; 3583 3584 status = ocfs2_cluster_lock(osb, lockres, level, 0, 0); 3585 if (status < 0) { 3586 mlog_errno(status); 3587 goto bail; 3588 } 3589 if (!ocfs2_should_refresh_lock_res(lockres)) 3590 goto bail; 3591 /* OK, we have the lock but we need to refresh the quota info */ 3592 status = ocfs2_refresh_qinfo(oinfo); 3593 if (status) 3594 ocfs2_qinfo_unlock(oinfo, ex); 3595 ocfs2_complete_lock_res_refresh(lockres, status); 3596 bail: 3597 mlog_exit(status); 3598 return status; 3599 } 3600 3601 /* 3602 * This is the filesystem locking protocol. It provides the lock handling 3603 * hooks for the underlying DLM. It has a maximum version number. 3604 * The version number allows interoperability with systems running at 3605 * the same major number and an equal or smaller minor number. 3606 * 3607 * Whenever the filesystem does new things with locks (adds or removes a 3608 * lock, orders them differently, does different things underneath a lock), 3609 * the version must be changed. The protocol is negotiated when joining 3610 * the dlm domain. A node may join the domain if its major version is 3611 * identical to all other nodes and its minor version is greater than 3612 * or equal to all other nodes. When its minor version is greater than 3613 * the other nodes, it will run at the minor version specified by the 3614 * other nodes. 3615 * 3616 * If a locking change is made that will not be compatible with older 3617 * versions, the major number must be increased and the minor version set 3618 * to zero. If a change merely adds a behavior that can be disabled when 3619 * speaking to older versions, the minor version must be increased. If a 3620 * change adds a fully backwards compatible change (eg, LVB changes that 3621 * are just ignored by older versions), the version does not need to be 3622 * updated. 3623 */ 3624 static struct ocfs2_locking_protocol lproto = { 3625 .lp_max_version = { 3626 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR, 3627 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR, 3628 }, 3629 .lp_lock_ast = ocfs2_locking_ast, 3630 .lp_blocking_ast = ocfs2_blocking_ast, 3631 .lp_unlock_ast = ocfs2_unlock_ast, 3632 }; 3633 3634 void ocfs2_set_locking_protocol(void) 3635 { 3636 ocfs2_stack_glue_set_locking_protocol(&lproto); 3637 } 3638 3639 3640 static void ocfs2_process_blocked_lock(struct ocfs2_super *osb, 3641 struct ocfs2_lock_res *lockres) 3642 { 3643 int status; 3644 struct ocfs2_unblock_ctl ctl = {0, 0,}; 3645 unsigned long flags; 3646 3647 /* Our reference to the lockres in this function can be 3648 * considered valid until we remove the OCFS2_LOCK_QUEUED 3649 * flag. */ 3650 3651 mlog_entry_void(); 3652 3653 BUG_ON(!lockres); 3654 BUG_ON(!lockres->l_ops); 3655 3656 mlog(0, "lockres %s blocked.\n", lockres->l_name); 3657 3658 /* Detect whether a lock has been marked as going away while 3659 * the downconvert thread was processing other things. A lock can 3660 * still be marked with OCFS2_LOCK_FREEING after this check, 3661 * but short circuiting here will still save us some 3662 * performance. */ 3663 spin_lock_irqsave(&lockres->l_lock, flags); 3664 if (lockres->l_flags & OCFS2_LOCK_FREEING) 3665 goto unqueue; 3666 spin_unlock_irqrestore(&lockres->l_lock, flags); 3667 3668 status = ocfs2_unblock_lock(osb, lockres, &ctl); 3669 if (status < 0) 3670 mlog_errno(status); 3671 3672 spin_lock_irqsave(&lockres->l_lock, flags); 3673 unqueue: 3674 if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) { 3675 lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED); 3676 } else 3677 ocfs2_schedule_blocked_lock(osb, lockres); 3678 3679 mlog(0, "lockres %s, requeue = %s.\n", lockres->l_name, 3680 ctl.requeue ? "yes" : "no"); 3681 spin_unlock_irqrestore(&lockres->l_lock, flags); 3682 3683 if (ctl.unblock_action != UNBLOCK_CONTINUE 3684 && lockres->l_ops->post_unlock) 3685 lockres->l_ops->post_unlock(osb, lockres); 3686 3687 mlog_exit_void(); 3688 } 3689 3690 static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb, 3691 struct ocfs2_lock_res *lockres) 3692 { 3693 mlog_entry_void(); 3694 3695 assert_spin_locked(&lockres->l_lock); 3696 3697 if (lockres->l_flags & OCFS2_LOCK_FREEING) { 3698 /* Do not schedule a lock for downconvert when it's on 3699 * the way to destruction - any nodes wanting access 3700 * to the resource will get it soon. */ 3701 mlog(0, "Lockres %s won't be scheduled: flags 0x%lx\n", 3702 lockres->l_name, lockres->l_flags); 3703 return; 3704 } 3705 3706 lockres_or_flags(lockres, OCFS2_LOCK_QUEUED); 3707 3708 spin_lock(&osb->dc_task_lock); 3709 if (list_empty(&lockres->l_blocked_list)) { 3710 list_add_tail(&lockres->l_blocked_list, 3711 &osb->blocked_lock_list); 3712 osb->blocked_lock_count++; 3713 } 3714 spin_unlock(&osb->dc_task_lock); 3715 3716 mlog_exit_void(); 3717 } 3718 3719 static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb) 3720 { 3721 unsigned long processed; 3722 struct ocfs2_lock_res *lockres; 3723 3724 mlog_entry_void(); 3725 3726 spin_lock(&osb->dc_task_lock); 3727 /* grab this early so we know to try again if a state change and 3728 * wake happens part-way through our work */ 3729 osb->dc_work_sequence = osb->dc_wake_sequence; 3730 3731 processed = osb->blocked_lock_count; 3732 while (processed) { 3733 BUG_ON(list_empty(&osb->blocked_lock_list)); 3734 3735 lockres = list_entry(osb->blocked_lock_list.next, 3736 struct ocfs2_lock_res, l_blocked_list); 3737 list_del_init(&lockres->l_blocked_list); 3738 osb->blocked_lock_count--; 3739 spin_unlock(&osb->dc_task_lock); 3740 3741 BUG_ON(!processed); 3742 processed--; 3743 3744 ocfs2_process_blocked_lock(osb, lockres); 3745 3746 spin_lock(&osb->dc_task_lock); 3747 } 3748 spin_unlock(&osb->dc_task_lock); 3749 3750 mlog_exit_void(); 3751 } 3752 3753 static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb) 3754 { 3755 int empty = 0; 3756 3757 spin_lock(&osb->dc_task_lock); 3758 if (list_empty(&osb->blocked_lock_list)) 3759 empty = 1; 3760 3761 spin_unlock(&osb->dc_task_lock); 3762 return empty; 3763 } 3764 3765 static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb) 3766 { 3767 int should_wake = 0; 3768 3769 spin_lock(&osb->dc_task_lock); 3770 if (osb->dc_work_sequence != osb->dc_wake_sequence) 3771 should_wake = 1; 3772 spin_unlock(&osb->dc_task_lock); 3773 3774 return should_wake; 3775 } 3776 3777 static int ocfs2_downconvert_thread(void *arg) 3778 { 3779 int status = 0; 3780 struct ocfs2_super *osb = arg; 3781 3782 /* only quit once we've been asked to stop and there is no more 3783 * work available */ 3784 while (!(kthread_should_stop() && 3785 ocfs2_downconvert_thread_lists_empty(osb))) { 3786 3787 wait_event_interruptible(osb->dc_event, 3788 ocfs2_downconvert_thread_should_wake(osb) || 3789 kthread_should_stop()); 3790 3791 mlog(0, "downconvert_thread: awoken\n"); 3792 3793 ocfs2_downconvert_thread_do_work(osb); 3794 } 3795 3796 osb->dc_task = NULL; 3797 return status; 3798 } 3799 3800 void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb) 3801 { 3802 spin_lock(&osb->dc_task_lock); 3803 /* make sure the voting thread gets a swipe at whatever changes 3804 * the caller may have made to the voting state */ 3805 osb->dc_wake_sequence++; 3806 spin_unlock(&osb->dc_task_lock); 3807 wake_up(&osb->dc_event); 3808 } 3809