1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dcache.c 5 * 6 * dentry cache handling code 7 * 8 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/namei.h> 30 31 #define MLOG_MASK_PREFIX ML_DCACHE 32 #include <cluster/masklog.h> 33 34 #include "ocfs2.h" 35 36 #include "alloc.h" 37 #include "dcache.h" 38 #include "dlmglue.h" 39 #include "file.h" 40 #include "inode.h" 41 #include "super.h" 42 43 void ocfs2_dentry_attach_gen(struct dentry *dentry) 44 { 45 unsigned long gen = 46 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 47 BUG_ON(dentry->d_inode); 48 dentry->d_fsdata = (void *)gen; 49 } 50 51 52 static int ocfs2_dentry_revalidate(struct dentry *dentry, 53 struct nameidata *nd) 54 { 55 struct inode *inode = dentry->d_inode; 56 int ret = 0; /* if all else fails, just return false */ 57 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 58 59 mlog_entry("(0x%p, '%.*s')\n", dentry, 60 dentry->d_name.len, dentry->d_name.name); 61 62 /* For a negative dentry - 63 * check the generation number of the parent and compare with the 64 * one stored in the inode. 65 */ 66 if (inode == NULL) { 67 unsigned long gen = (unsigned long) dentry->d_fsdata; 68 unsigned long pgen = 69 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 70 mlog(0, "negative dentry: %.*s parent gen: %lu " 71 "dentry gen: %lu\n", 72 dentry->d_name.len, dentry->d_name.name, pgen, gen); 73 if (gen != pgen) 74 goto bail; 75 goto valid; 76 } 77 78 BUG_ON(!osb); 79 80 if (inode == osb->root_inode || is_bad_inode(inode)) 81 goto bail; 82 83 spin_lock(&OCFS2_I(inode)->ip_lock); 84 /* did we or someone else delete this inode? */ 85 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 86 spin_unlock(&OCFS2_I(inode)->ip_lock); 87 mlog(0, "inode (%llu) deleted, returning false\n", 88 (unsigned long long)OCFS2_I(inode)->ip_blkno); 89 goto bail; 90 } 91 spin_unlock(&OCFS2_I(inode)->ip_lock); 92 93 /* 94 * We don't need a cluster lock to test this because once an 95 * inode nlink hits zero, it never goes back. 96 */ 97 if (inode->i_nlink == 0) { 98 mlog(0, "Inode %llu orphaned, returning false " 99 "dir = %d\n", 100 (unsigned long long)OCFS2_I(inode)->ip_blkno, 101 S_ISDIR(inode->i_mode)); 102 goto bail; 103 } 104 105 /* 106 * If the last lookup failed to create dentry lock, let us 107 * redo it. 108 */ 109 if (!dentry->d_fsdata) { 110 mlog(0, "Inode %llu doesn't have dentry lock, " 111 "returning false\n", 112 (unsigned long long)OCFS2_I(inode)->ip_blkno); 113 goto bail; 114 } 115 116 valid: 117 ret = 1; 118 119 bail: 120 mlog_exit(ret); 121 122 return ret; 123 } 124 125 static int ocfs2_match_dentry(struct dentry *dentry, 126 u64 parent_blkno, 127 int skip_unhashed) 128 { 129 struct inode *parent; 130 131 /* 132 * ocfs2_lookup() does a d_splice_alias() _before_ attaching 133 * to the lock data, so we skip those here, otherwise 134 * ocfs2_dentry_attach_lock() will get its original dentry 135 * back. 136 */ 137 if (!dentry->d_fsdata) 138 return 0; 139 140 if (!dentry->d_parent) 141 return 0; 142 143 if (skip_unhashed && d_unhashed(dentry)) 144 return 0; 145 146 parent = dentry->d_parent->d_inode; 147 /* Negative parent dentry? */ 148 if (!parent) 149 return 0; 150 151 /* Name is in a different directory. */ 152 if (OCFS2_I(parent)->ip_blkno != parent_blkno) 153 return 0; 154 155 return 1; 156 } 157 158 /* 159 * Walk the inode alias list, and find a dentry which has a given 160 * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it 161 * is looking for a dentry_lock reference. The downconvert thread is 162 * looking to unhash aliases, so we allow it to skip any that already 163 * have that property. 164 */ 165 struct dentry *ocfs2_find_local_alias(struct inode *inode, 166 u64 parent_blkno, 167 int skip_unhashed) 168 { 169 struct list_head *p; 170 struct dentry *dentry = NULL; 171 172 spin_lock(&dcache_lock); 173 spin_lock(&dcache_inode_lock); 174 list_for_each(p, &inode->i_dentry) { 175 dentry = list_entry(p, struct dentry, d_alias); 176 177 spin_lock(&dentry->d_lock); 178 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 179 mlog(0, "dentry found: %.*s\n", 180 dentry->d_name.len, dentry->d_name.name); 181 182 dget_locked_dlock(dentry); 183 spin_unlock(&dentry->d_lock); 184 break; 185 } 186 spin_unlock(&dentry->d_lock); 187 188 dentry = NULL; 189 } 190 191 spin_unlock(&dcache_inode_lock); 192 spin_unlock(&dcache_lock); 193 194 return dentry; 195 } 196 197 DEFINE_SPINLOCK(dentry_attach_lock); 198 199 /* 200 * Attach this dentry to a cluster lock. 201 * 202 * Dentry locks cover all links in a given directory to a particular 203 * inode. We do this so that ocfs2 can build a lock name which all 204 * nodes in the cluster can agree on at all times. Shoving full names 205 * in the cluster lock won't work due to size restrictions. Covering 206 * links inside of a directory is a good compromise because it still 207 * allows us to use the parent directory lock to synchronize 208 * operations. 209 * 210 * Call this function with the parent dir semaphore and the parent dir 211 * cluster lock held. 212 * 213 * The dir semaphore will protect us from having to worry about 214 * concurrent processes on our node trying to attach a lock at the 215 * same time. 216 * 217 * The dir cluster lock (held at either PR or EX mode) protects us 218 * from unlink and rename on other nodes. 219 * 220 * A dput() can happen asynchronously due to pruning, so we cover 221 * attaching and detaching the dentry lock with a 222 * dentry_attach_lock. 223 * 224 * A node which has done lookup on a name retains a protected read 225 * lock until final dput. If the user requests and unlink or rename, 226 * the protected read is upgraded to an exclusive lock. Other nodes 227 * who have seen the dentry will then be informed that they need to 228 * downgrade their lock, which will involve d_delete on the 229 * dentry. This happens in ocfs2_dentry_convert_worker(). 230 */ 231 int ocfs2_dentry_attach_lock(struct dentry *dentry, 232 struct inode *inode, 233 u64 parent_blkno) 234 { 235 int ret; 236 struct dentry *alias; 237 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 238 239 mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", 240 dentry->d_name.len, dentry->d_name.name, 241 (unsigned long long)parent_blkno, dl); 242 243 /* 244 * Negative dentry. We ignore these for now. 245 * 246 * XXX: Could we can improve ocfs2_dentry_revalidate() by 247 * tracking these? 248 */ 249 if (!inode) 250 return 0; 251 252 if (!dentry->d_inode && dentry->d_fsdata) { 253 /* Converting a negative dentry to positive 254 Clear dentry->d_fsdata */ 255 dentry->d_fsdata = dl = NULL; 256 } 257 258 if (dl) { 259 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 260 " \"%.*s\": old parent: %llu, new: %llu\n", 261 dentry->d_name.len, dentry->d_name.name, 262 (unsigned long long)parent_blkno, 263 (unsigned long long)dl->dl_parent_blkno); 264 return 0; 265 } 266 267 alias = ocfs2_find_local_alias(inode, parent_blkno, 0); 268 if (alias) { 269 /* 270 * Great, an alias exists, which means we must have a 271 * dentry lock already. We can just grab the lock off 272 * the alias and add it to the list. 273 * 274 * We're depending here on the fact that this dentry 275 * was found and exists in the dcache and so must have 276 * a reference to the dentry_lock because we can't 277 * race creates. Final dput() cannot happen on it 278 * since we have it pinned, so our reference is safe. 279 */ 280 dl = alias->d_fsdata; 281 mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", 282 (unsigned long long)parent_blkno, 283 (unsigned long long)OCFS2_I(inode)->ip_blkno); 284 285 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 286 " \"%.*s\": old parent: %llu, new: %llu\n", 287 dentry->d_name.len, dentry->d_name.name, 288 (unsigned long long)parent_blkno, 289 (unsigned long long)dl->dl_parent_blkno); 290 291 mlog(0, "Found: %s\n", dl->dl_lockres.l_name); 292 293 goto out_attach; 294 } 295 296 /* 297 * There are no other aliases 298 */ 299 dl = kmalloc(sizeof(*dl), GFP_NOFS); 300 if (!dl) { 301 ret = -ENOMEM; 302 mlog_errno(ret); 303 return ret; 304 } 305 306 dl->dl_count = 0; 307 /* 308 * Does this have to happen below, for all attaches, in case 309 * the struct inode gets blown away by the downconvert thread? 310 */ 311 dl->dl_inode = igrab(inode); 312 dl->dl_parent_blkno = parent_blkno; 313 ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); 314 315 out_attach: 316 spin_lock(&dentry_attach_lock); 317 dentry->d_fsdata = dl; 318 dl->dl_count++; 319 spin_unlock(&dentry_attach_lock); 320 321 /* 322 * This actually gets us our PRMODE level lock. From now on, 323 * we'll have a notification if one of these names is 324 * destroyed on another node. 325 */ 326 ret = ocfs2_dentry_lock(dentry, 0); 327 if (!ret) 328 ocfs2_dentry_unlock(dentry, 0); 329 else 330 mlog_errno(ret); 331 332 /* 333 * In case of error, manually free the allocation and do the iput(). 334 * We need to do this because error here means no d_instantiate(), 335 * which means iput() will not be called during dput(dentry). 336 */ 337 if (ret < 0 && !alias) { 338 ocfs2_lock_res_free(&dl->dl_lockres); 339 BUG_ON(dl->dl_count != 1); 340 spin_lock(&dentry_attach_lock); 341 dentry->d_fsdata = NULL; 342 spin_unlock(&dentry_attach_lock); 343 kfree(dl); 344 iput(inode); 345 } 346 347 dput(alias); 348 349 return ret; 350 } 351 352 DEFINE_SPINLOCK(dentry_list_lock); 353 354 /* We limit the number of dentry locks to drop in one go. We have 355 * this limit so that we don't starve other users of ocfs2_wq. */ 356 #define DL_INODE_DROP_COUNT 64 357 358 /* Drop inode references from dentry locks */ 359 static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count) 360 { 361 struct ocfs2_dentry_lock *dl; 362 363 spin_lock(&dentry_list_lock); 364 while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) { 365 dl = osb->dentry_lock_list; 366 osb->dentry_lock_list = dl->dl_next; 367 spin_unlock(&dentry_list_lock); 368 iput(dl->dl_inode); 369 kfree(dl); 370 spin_lock(&dentry_list_lock); 371 } 372 spin_unlock(&dentry_list_lock); 373 } 374 375 void ocfs2_drop_dl_inodes(struct work_struct *work) 376 { 377 struct ocfs2_super *osb = container_of(work, struct ocfs2_super, 378 dentry_lock_work); 379 380 __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT); 381 /* 382 * Don't queue dropping if umount is in progress. We flush the 383 * list in ocfs2_dismount_volume 384 */ 385 spin_lock(&dentry_list_lock); 386 if (osb->dentry_lock_list && 387 !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) 388 queue_work(ocfs2_wq, &osb->dentry_lock_work); 389 spin_unlock(&dentry_list_lock); 390 } 391 392 /* Flush the whole work queue */ 393 void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb) 394 { 395 __ocfs2_drop_dl_inodes(osb, -1); 396 } 397 398 /* 399 * ocfs2_dentry_iput() and friends. 400 * 401 * At this point, our particular dentry is detached from the inodes 402 * alias list, so there's no way that the locking code can find it. 403 * 404 * The interesting stuff happens when we determine that our lock needs 405 * to go away because this is the last subdir alias in the 406 * system. This function needs to handle a couple things: 407 * 408 * 1) Synchronizing lock shutdown with the downconvert threads. This 409 * is already handled for us via the lockres release drop function 410 * called in ocfs2_release_dentry_lock() 411 * 412 * 2) A race may occur when we're doing our lock shutdown and 413 * another process wants to create a new dentry lock. Right now we 414 * let them race, which means that for a very short while, this 415 * node might have two locks on a lock resource. This should be a 416 * problem though because one of them is in the process of being 417 * thrown out. 418 */ 419 static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, 420 struct ocfs2_dentry_lock *dl) 421 { 422 ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); 423 ocfs2_lock_res_free(&dl->dl_lockres); 424 425 /* We leave dropping of inode reference to ocfs2_wq as that can 426 * possibly lead to inode deletion which gets tricky */ 427 spin_lock(&dentry_list_lock); 428 if (!osb->dentry_lock_list && 429 !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) 430 queue_work(ocfs2_wq, &osb->dentry_lock_work); 431 dl->dl_next = osb->dentry_lock_list; 432 osb->dentry_lock_list = dl; 433 spin_unlock(&dentry_list_lock); 434 } 435 436 void ocfs2_dentry_lock_put(struct ocfs2_super *osb, 437 struct ocfs2_dentry_lock *dl) 438 { 439 int unlock; 440 441 BUG_ON(dl->dl_count == 0); 442 443 spin_lock(&dentry_attach_lock); 444 dl->dl_count--; 445 unlock = !dl->dl_count; 446 spin_unlock(&dentry_attach_lock); 447 448 if (unlock) 449 ocfs2_drop_dentry_lock(osb, dl); 450 } 451 452 static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) 453 { 454 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 455 456 if (!dl) { 457 /* 458 * No dentry lock is ok if we're disconnected or 459 * unhashed. 460 */ 461 if (!(dentry->d_flags & DCACHE_DISCONNECTED) && 462 !d_unhashed(dentry)) { 463 unsigned long long ino = 0ULL; 464 if (inode) 465 ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; 466 mlog(ML_ERROR, "Dentry is missing cluster lock. " 467 "inode: %llu, d_flags: 0x%x, d_name: %.*s\n", 468 ino, dentry->d_flags, dentry->d_name.len, 469 dentry->d_name.name); 470 } 471 472 goto out; 473 } 474 475 mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n", 476 dentry->d_name.len, dentry->d_name.name, 477 dl->dl_count); 478 479 ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); 480 481 out: 482 iput(inode); 483 } 484 485 /* 486 * d_move(), but keep the locks in sync. 487 * 488 * When we are done, "dentry" will have the parent dir and name of 489 * "target", which will be thrown away. 490 * 491 * We manually update the lock of "dentry" if need be. 492 * 493 * "target" doesn't have it's dentry lock touched - we allow the later 494 * dput() to handle this for us. 495 * 496 * This is called during ocfs2_rename(), while holding parent 497 * directory locks. The dentries have already been deleted on other 498 * nodes via ocfs2_remote_dentry_delete(). 499 * 500 * Normally, the VFS handles the d_move() for the file system, after 501 * the ->rename() callback. OCFS2 wants to handle this internally, so 502 * the new lock can be created atomically with respect to the cluster. 503 */ 504 void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, 505 struct inode *old_dir, struct inode *new_dir) 506 { 507 int ret; 508 struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); 509 struct inode *inode = dentry->d_inode; 510 511 /* 512 * Move within the same directory, so the actual lock info won't 513 * change. 514 * 515 * XXX: Is there any advantage to dropping the lock here? 516 */ 517 if (old_dir == new_dir) 518 goto out_move; 519 520 ocfs2_dentry_lock_put(osb, dentry->d_fsdata); 521 522 dentry->d_fsdata = NULL; 523 ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); 524 if (ret) 525 mlog_errno(ret); 526 527 out_move: 528 d_move(dentry, target); 529 } 530 531 const struct dentry_operations ocfs2_dentry_ops = { 532 .d_revalidate = ocfs2_dentry_revalidate, 533 .d_iput = ocfs2_dentry_iput, 534 }; 535