1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dcache.c 5 * 6 * dentry cache handling code 7 * 8 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/namei.h> 30 31 #define MLOG_MASK_PREFIX ML_DCACHE 32 #include <cluster/masklog.h> 33 34 #include "ocfs2.h" 35 36 #include "alloc.h" 37 #include "dcache.h" 38 #include "dlmglue.h" 39 #include "file.h" 40 #include "inode.h" 41 #include "super.h" 42 43 void ocfs2_dentry_attach_gen(struct dentry *dentry) 44 { 45 unsigned long gen = 46 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 47 BUG_ON(dentry->d_inode); 48 dentry->d_fsdata = (void *)gen; 49 } 50 51 52 static int ocfs2_dentry_revalidate(struct dentry *dentry, 53 struct nameidata *nd) 54 { 55 struct inode *inode; 56 int ret = 0; /* if all else fails, just return false */ 57 struct ocfs2_super *osb; 58 59 if (nd->flags & LOOKUP_RCU) 60 return -ECHILD; 61 62 inode = dentry->d_inode; 63 osb = OCFS2_SB(dentry->d_sb); 64 65 mlog_entry("(0x%p, '%.*s')\n", dentry, 66 dentry->d_name.len, dentry->d_name.name); 67 68 /* For a negative dentry - 69 * check the generation number of the parent and compare with the 70 * one stored in the inode. 71 */ 72 if (inode == NULL) { 73 unsigned long gen = (unsigned long) dentry->d_fsdata; 74 unsigned long pgen = 75 OCFS2_I(dentry->d_parent->d_inode)->ip_dir_lock_gen; 76 mlog(0, "negative dentry: %.*s parent gen: %lu " 77 "dentry gen: %lu\n", 78 dentry->d_name.len, dentry->d_name.name, pgen, gen); 79 if (gen != pgen) 80 goto bail; 81 goto valid; 82 } 83 84 BUG_ON(!osb); 85 86 if (inode == osb->root_inode || is_bad_inode(inode)) 87 goto bail; 88 89 spin_lock(&OCFS2_I(inode)->ip_lock); 90 /* did we or someone else delete this inode? */ 91 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 92 spin_unlock(&OCFS2_I(inode)->ip_lock); 93 mlog(0, "inode (%llu) deleted, returning false\n", 94 (unsigned long long)OCFS2_I(inode)->ip_blkno); 95 goto bail; 96 } 97 spin_unlock(&OCFS2_I(inode)->ip_lock); 98 99 /* 100 * We don't need a cluster lock to test this because once an 101 * inode nlink hits zero, it never goes back. 102 */ 103 if (inode->i_nlink == 0) { 104 mlog(0, "Inode %llu orphaned, returning false " 105 "dir = %d\n", 106 (unsigned long long)OCFS2_I(inode)->ip_blkno, 107 S_ISDIR(inode->i_mode)); 108 goto bail; 109 } 110 111 /* 112 * If the last lookup failed to create dentry lock, let us 113 * redo it. 114 */ 115 if (!dentry->d_fsdata) { 116 mlog(0, "Inode %llu doesn't have dentry lock, " 117 "returning false\n", 118 (unsigned long long)OCFS2_I(inode)->ip_blkno); 119 goto bail; 120 } 121 122 valid: 123 ret = 1; 124 125 bail: 126 mlog_exit(ret); 127 128 return ret; 129 } 130 131 static int ocfs2_match_dentry(struct dentry *dentry, 132 u64 parent_blkno, 133 int skip_unhashed) 134 { 135 struct inode *parent; 136 137 /* 138 * ocfs2_lookup() does a d_splice_alias() _before_ attaching 139 * to the lock data, so we skip those here, otherwise 140 * ocfs2_dentry_attach_lock() will get its original dentry 141 * back. 142 */ 143 if (!dentry->d_fsdata) 144 return 0; 145 146 if (!dentry->d_parent) 147 return 0; 148 149 if (skip_unhashed && d_unhashed(dentry)) 150 return 0; 151 152 parent = dentry->d_parent->d_inode; 153 /* Negative parent dentry? */ 154 if (!parent) 155 return 0; 156 157 /* Name is in a different directory. */ 158 if (OCFS2_I(parent)->ip_blkno != parent_blkno) 159 return 0; 160 161 return 1; 162 } 163 164 /* 165 * Walk the inode alias list, and find a dentry which has a given 166 * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it 167 * is looking for a dentry_lock reference. The downconvert thread is 168 * looking to unhash aliases, so we allow it to skip any that already 169 * have that property. 170 */ 171 struct dentry *ocfs2_find_local_alias(struct inode *inode, 172 u64 parent_blkno, 173 int skip_unhashed) 174 { 175 struct list_head *p; 176 struct dentry *dentry = NULL; 177 178 spin_lock(&inode->i_lock); 179 list_for_each(p, &inode->i_dentry) { 180 dentry = list_entry(p, struct dentry, d_alias); 181 182 spin_lock(&dentry->d_lock); 183 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 184 mlog(0, "dentry found: %.*s\n", 185 dentry->d_name.len, dentry->d_name.name); 186 187 dget_dlock(dentry); 188 spin_unlock(&dentry->d_lock); 189 break; 190 } 191 spin_unlock(&dentry->d_lock); 192 193 dentry = NULL; 194 } 195 196 spin_unlock(&inode->i_lock); 197 198 return dentry; 199 } 200 201 DEFINE_SPINLOCK(dentry_attach_lock); 202 203 /* 204 * Attach this dentry to a cluster lock. 205 * 206 * Dentry locks cover all links in a given directory to a particular 207 * inode. We do this so that ocfs2 can build a lock name which all 208 * nodes in the cluster can agree on at all times. Shoving full names 209 * in the cluster lock won't work due to size restrictions. Covering 210 * links inside of a directory is a good compromise because it still 211 * allows us to use the parent directory lock to synchronize 212 * operations. 213 * 214 * Call this function with the parent dir semaphore and the parent dir 215 * cluster lock held. 216 * 217 * The dir semaphore will protect us from having to worry about 218 * concurrent processes on our node trying to attach a lock at the 219 * same time. 220 * 221 * The dir cluster lock (held at either PR or EX mode) protects us 222 * from unlink and rename on other nodes. 223 * 224 * A dput() can happen asynchronously due to pruning, so we cover 225 * attaching and detaching the dentry lock with a 226 * dentry_attach_lock. 227 * 228 * A node which has done lookup on a name retains a protected read 229 * lock until final dput. If the user requests and unlink or rename, 230 * the protected read is upgraded to an exclusive lock. Other nodes 231 * who have seen the dentry will then be informed that they need to 232 * downgrade their lock, which will involve d_delete on the 233 * dentry. This happens in ocfs2_dentry_convert_worker(). 234 */ 235 int ocfs2_dentry_attach_lock(struct dentry *dentry, 236 struct inode *inode, 237 u64 parent_blkno) 238 { 239 int ret; 240 struct dentry *alias; 241 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 242 243 mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", 244 dentry->d_name.len, dentry->d_name.name, 245 (unsigned long long)parent_blkno, dl); 246 247 /* 248 * Negative dentry. We ignore these for now. 249 * 250 * XXX: Could we can improve ocfs2_dentry_revalidate() by 251 * tracking these? 252 */ 253 if (!inode) 254 return 0; 255 256 if (!dentry->d_inode && dentry->d_fsdata) { 257 /* Converting a negative dentry to positive 258 Clear dentry->d_fsdata */ 259 dentry->d_fsdata = dl = NULL; 260 } 261 262 if (dl) { 263 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 264 " \"%.*s\": old parent: %llu, new: %llu\n", 265 dentry->d_name.len, dentry->d_name.name, 266 (unsigned long long)parent_blkno, 267 (unsigned long long)dl->dl_parent_blkno); 268 return 0; 269 } 270 271 alias = ocfs2_find_local_alias(inode, parent_blkno, 0); 272 if (alias) { 273 /* 274 * Great, an alias exists, which means we must have a 275 * dentry lock already. We can just grab the lock off 276 * the alias and add it to the list. 277 * 278 * We're depending here on the fact that this dentry 279 * was found and exists in the dcache and so must have 280 * a reference to the dentry_lock because we can't 281 * race creates. Final dput() cannot happen on it 282 * since we have it pinned, so our reference is safe. 283 */ 284 dl = alias->d_fsdata; 285 mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", 286 (unsigned long long)parent_blkno, 287 (unsigned long long)OCFS2_I(inode)->ip_blkno); 288 289 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 290 " \"%.*s\": old parent: %llu, new: %llu\n", 291 dentry->d_name.len, dentry->d_name.name, 292 (unsigned long long)parent_blkno, 293 (unsigned long long)dl->dl_parent_blkno); 294 295 mlog(0, "Found: %s\n", dl->dl_lockres.l_name); 296 297 goto out_attach; 298 } 299 300 /* 301 * There are no other aliases 302 */ 303 dl = kmalloc(sizeof(*dl), GFP_NOFS); 304 if (!dl) { 305 ret = -ENOMEM; 306 mlog_errno(ret); 307 return ret; 308 } 309 310 dl->dl_count = 0; 311 /* 312 * Does this have to happen below, for all attaches, in case 313 * the struct inode gets blown away by the downconvert thread? 314 */ 315 dl->dl_inode = igrab(inode); 316 dl->dl_parent_blkno = parent_blkno; 317 ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); 318 319 out_attach: 320 spin_lock(&dentry_attach_lock); 321 dentry->d_fsdata = dl; 322 dl->dl_count++; 323 spin_unlock(&dentry_attach_lock); 324 325 /* 326 * This actually gets us our PRMODE level lock. From now on, 327 * we'll have a notification if one of these names is 328 * destroyed on another node. 329 */ 330 ret = ocfs2_dentry_lock(dentry, 0); 331 if (!ret) 332 ocfs2_dentry_unlock(dentry, 0); 333 else 334 mlog_errno(ret); 335 336 /* 337 * In case of error, manually free the allocation and do the iput(). 338 * We need to do this because error here means no d_instantiate(), 339 * which means iput() will not be called during dput(dentry). 340 */ 341 if (ret < 0 && !alias) { 342 ocfs2_lock_res_free(&dl->dl_lockres); 343 BUG_ON(dl->dl_count != 1); 344 spin_lock(&dentry_attach_lock); 345 dentry->d_fsdata = NULL; 346 spin_unlock(&dentry_attach_lock); 347 kfree(dl); 348 iput(inode); 349 } 350 351 dput(alias); 352 353 return ret; 354 } 355 356 DEFINE_SPINLOCK(dentry_list_lock); 357 358 /* We limit the number of dentry locks to drop in one go. We have 359 * this limit so that we don't starve other users of ocfs2_wq. */ 360 #define DL_INODE_DROP_COUNT 64 361 362 /* Drop inode references from dentry locks */ 363 static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count) 364 { 365 struct ocfs2_dentry_lock *dl; 366 367 spin_lock(&dentry_list_lock); 368 while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) { 369 dl = osb->dentry_lock_list; 370 osb->dentry_lock_list = dl->dl_next; 371 spin_unlock(&dentry_list_lock); 372 iput(dl->dl_inode); 373 kfree(dl); 374 spin_lock(&dentry_list_lock); 375 } 376 spin_unlock(&dentry_list_lock); 377 } 378 379 void ocfs2_drop_dl_inodes(struct work_struct *work) 380 { 381 struct ocfs2_super *osb = container_of(work, struct ocfs2_super, 382 dentry_lock_work); 383 384 __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT); 385 /* 386 * Don't queue dropping if umount is in progress. We flush the 387 * list in ocfs2_dismount_volume 388 */ 389 spin_lock(&dentry_list_lock); 390 if (osb->dentry_lock_list && 391 !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) 392 queue_work(ocfs2_wq, &osb->dentry_lock_work); 393 spin_unlock(&dentry_list_lock); 394 } 395 396 /* Flush the whole work queue */ 397 void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb) 398 { 399 __ocfs2_drop_dl_inodes(osb, -1); 400 } 401 402 /* 403 * ocfs2_dentry_iput() and friends. 404 * 405 * At this point, our particular dentry is detached from the inodes 406 * alias list, so there's no way that the locking code can find it. 407 * 408 * The interesting stuff happens when we determine that our lock needs 409 * to go away because this is the last subdir alias in the 410 * system. This function needs to handle a couple things: 411 * 412 * 1) Synchronizing lock shutdown with the downconvert threads. This 413 * is already handled for us via the lockres release drop function 414 * called in ocfs2_release_dentry_lock() 415 * 416 * 2) A race may occur when we're doing our lock shutdown and 417 * another process wants to create a new dentry lock. Right now we 418 * let them race, which means that for a very short while, this 419 * node might have two locks on a lock resource. This should be a 420 * problem though because one of them is in the process of being 421 * thrown out. 422 */ 423 static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, 424 struct ocfs2_dentry_lock *dl) 425 { 426 ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); 427 ocfs2_lock_res_free(&dl->dl_lockres); 428 429 /* We leave dropping of inode reference to ocfs2_wq as that can 430 * possibly lead to inode deletion which gets tricky */ 431 spin_lock(&dentry_list_lock); 432 if (!osb->dentry_lock_list && 433 !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED)) 434 queue_work(ocfs2_wq, &osb->dentry_lock_work); 435 dl->dl_next = osb->dentry_lock_list; 436 osb->dentry_lock_list = dl; 437 spin_unlock(&dentry_list_lock); 438 } 439 440 void ocfs2_dentry_lock_put(struct ocfs2_super *osb, 441 struct ocfs2_dentry_lock *dl) 442 { 443 int unlock; 444 445 BUG_ON(dl->dl_count == 0); 446 447 spin_lock(&dentry_attach_lock); 448 dl->dl_count--; 449 unlock = !dl->dl_count; 450 spin_unlock(&dentry_attach_lock); 451 452 if (unlock) 453 ocfs2_drop_dentry_lock(osb, dl); 454 } 455 456 static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) 457 { 458 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 459 460 if (!dl) { 461 /* 462 * No dentry lock is ok if we're disconnected or 463 * unhashed. 464 */ 465 if (!(dentry->d_flags & DCACHE_DISCONNECTED) && 466 !d_unhashed(dentry)) { 467 unsigned long long ino = 0ULL; 468 if (inode) 469 ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; 470 mlog(ML_ERROR, "Dentry is missing cluster lock. " 471 "inode: %llu, d_flags: 0x%x, d_name: %.*s\n", 472 ino, dentry->d_flags, dentry->d_name.len, 473 dentry->d_name.name); 474 } 475 476 goto out; 477 } 478 479 mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n", 480 dentry->d_name.len, dentry->d_name.name, 481 dl->dl_count); 482 483 ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); 484 485 out: 486 iput(inode); 487 } 488 489 /* 490 * d_move(), but keep the locks in sync. 491 * 492 * When we are done, "dentry" will have the parent dir and name of 493 * "target", which will be thrown away. 494 * 495 * We manually update the lock of "dentry" if need be. 496 * 497 * "target" doesn't have it's dentry lock touched - we allow the later 498 * dput() to handle this for us. 499 * 500 * This is called during ocfs2_rename(), while holding parent 501 * directory locks. The dentries have already been deleted on other 502 * nodes via ocfs2_remote_dentry_delete(). 503 * 504 * Normally, the VFS handles the d_move() for the file system, after 505 * the ->rename() callback. OCFS2 wants to handle this internally, so 506 * the new lock can be created atomically with respect to the cluster. 507 */ 508 void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, 509 struct inode *old_dir, struct inode *new_dir) 510 { 511 int ret; 512 struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); 513 struct inode *inode = dentry->d_inode; 514 515 /* 516 * Move within the same directory, so the actual lock info won't 517 * change. 518 * 519 * XXX: Is there any advantage to dropping the lock here? 520 */ 521 if (old_dir == new_dir) 522 goto out_move; 523 524 ocfs2_dentry_lock_put(osb, dentry->d_fsdata); 525 526 dentry->d_fsdata = NULL; 527 ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); 528 if (ret) 529 mlog_errno(ret); 530 531 out_move: 532 d_move(dentry, target); 533 } 534 535 const struct dentry_operations ocfs2_dentry_ops = { 536 .d_revalidate = ocfs2_dentry_revalidate, 537 .d_iput = ocfs2_dentry_iput, 538 }; 539