1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct dentry_operations ceph_dentry_ops; 30 31 /* 32 * Initialize ceph dentry state. 33 */ 34 int ceph_init_dentry(struct dentry *dentry) 35 { 36 struct ceph_dentry_info *di; 37 38 if (dentry->d_fsdata) 39 return 0; 40 41 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 42 if (!di) 43 return -ENOMEM; /* oh well */ 44 45 spin_lock(&dentry->d_lock); 46 if (dentry->d_fsdata) { 47 /* lost a race */ 48 kmem_cache_free(ceph_dentry_cachep, di); 49 goto out_unlock; 50 } 51 52 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 53 d_set_d_op(dentry, &ceph_dentry_ops); 54 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 55 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 56 else 57 d_set_d_op(dentry, &ceph_snap_dentry_ops); 58 59 di->dentry = dentry; 60 di->lease_session = NULL; 61 dentry->d_time = jiffies; 62 /* avoid reordering d_fsdata setup so that the check above is safe */ 63 smp_mb(); 64 dentry->d_fsdata = di; 65 ceph_dentry_lru_add(dentry); 66 out_unlock: 67 spin_unlock(&dentry->d_lock); 68 return 0; 69 } 70 71 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) 72 { 73 struct inode *inode = NULL; 74 75 if (!dentry) 76 return NULL; 77 78 spin_lock(&dentry->d_lock); 79 if (!IS_ROOT(dentry)) { 80 inode = dentry->d_parent->d_inode; 81 ihold(inode); 82 } 83 spin_unlock(&dentry->d_lock); 84 return inode; 85 } 86 87 88 /* 89 * for readdir, we encode the directory frag and offset within that 90 * frag into f_pos. 91 */ 92 static unsigned fpos_frag(loff_t p) 93 { 94 return p >> 32; 95 } 96 static unsigned fpos_off(loff_t p) 97 { 98 return p & 0xffffffff; 99 } 100 101 static int fpos_cmp(loff_t l, loff_t r) 102 { 103 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 104 if (v) 105 return v; 106 return (int)(fpos_off(l) - fpos_off(r)); 107 } 108 109 /* 110 * When possible, we try to satisfy a readdir by peeking at the 111 * dcache. We make this work by carefully ordering dentries on 112 * d_child when we initially get results back from the MDS, and 113 * falling back to a "normal" sync readdir if any dentries in the dir 114 * are dropped. 115 * 116 * Complete dir indicates that we have all dentries in the dir. It is 117 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 118 * the MDS if/when the directory is modified). 119 */ 120 static int __dcache_readdir(struct file *file, struct dir_context *ctx, 121 u32 shared_gen) 122 { 123 struct ceph_file_info *fi = file->private_data; 124 struct dentry *parent = file->f_path.dentry; 125 struct inode *dir = parent->d_inode; 126 struct list_head *p; 127 struct dentry *dentry, *last; 128 struct ceph_dentry_info *di; 129 int err = 0; 130 131 /* claim ref on last dentry we returned */ 132 last = fi->dentry; 133 fi->dentry = NULL; 134 135 dout("__dcache_readdir %p v%u at %llu (last %p)\n", 136 dir, shared_gen, ctx->pos, last); 137 138 spin_lock(&parent->d_lock); 139 140 /* start at beginning? */ 141 if (ctx->pos == 2 || last == NULL || 142 fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) { 143 if (list_empty(&parent->d_subdirs)) 144 goto out_unlock; 145 p = parent->d_subdirs.prev; 146 dout(" initial p %p/%p\n", p->prev, p->next); 147 } else { 148 p = last->d_child.prev; 149 } 150 151 more: 152 dentry = list_entry(p, struct dentry, d_child); 153 di = ceph_dentry(dentry); 154 while (1) { 155 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 156 d_unhashed(dentry) ? "!hashed" : "hashed", 157 parent->d_subdirs.prev, parent->d_subdirs.next); 158 if (p == &parent->d_subdirs) { 159 fi->flags |= CEPH_F_ATEND; 160 goto out_unlock; 161 } 162 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 163 if (di->lease_shared_gen == shared_gen && 164 !d_unhashed(dentry) && dentry->d_inode && 165 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 166 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 167 fpos_cmp(ctx->pos, di->offset) <= 0) 168 break; 169 dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry, 170 dentry, di->offset, 171 ctx->pos, d_unhashed(dentry) ? " unhashed" : "", 172 !dentry->d_inode ? " null" : ""); 173 spin_unlock(&dentry->d_lock); 174 p = p->prev; 175 dentry = list_entry(p, struct dentry, d_child); 176 di = ceph_dentry(dentry); 177 } 178 179 dget_dlock(dentry); 180 spin_unlock(&dentry->d_lock); 181 spin_unlock(&parent->d_lock); 182 183 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 184 if (!ceph_dir_is_complete_ordered(dir)) { 185 dout(" lost dir complete on %p; falling back to mds\n", dir); 186 dput(dentry); 187 err = -EAGAIN; 188 goto out; 189 } 190 191 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos, 192 dentry, dentry, dentry->d_inode); 193 if (!dir_emit(ctx, dentry->d_name.name, 194 dentry->d_name.len, 195 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), 196 dentry->d_inode->i_mode >> 12)) { 197 if (last) { 198 /* remember our position */ 199 fi->dentry = last; 200 fi->next_offset = fpos_off(di->offset); 201 } 202 dput(dentry); 203 return 0; 204 } 205 206 ctx->pos = di->offset + 1; 207 208 if (last) 209 dput(last); 210 last = dentry; 211 212 spin_lock(&parent->d_lock); 213 p = p->prev; /* advance to next dentry */ 214 goto more; 215 216 out_unlock: 217 spin_unlock(&parent->d_lock); 218 out: 219 if (last) 220 dput(last); 221 return err; 222 } 223 224 /* 225 * make note of the last dentry we read, so we can 226 * continue at the same lexicographical point, 227 * regardless of what dir changes take place on the 228 * server. 229 */ 230 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 231 int len) 232 { 233 kfree(fi->last_name); 234 fi->last_name = kmalloc(len+1, GFP_NOFS); 235 if (!fi->last_name) 236 return -ENOMEM; 237 memcpy(fi->last_name, name, len); 238 fi->last_name[len] = 0; 239 dout("note_last_dentry '%s'\n", fi->last_name); 240 return 0; 241 } 242 243 static int ceph_readdir(struct file *file, struct dir_context *ctx) 244 { 245 struct ceph_file_info *fi = file->private_data; 246 struct inode *inode = file_inode(file); 247 struct ceph_inode_info *ci = ceph_inode(inode); 248 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 249 struct ceph_mds_client *mdsc = fsc->mdsc; 250 unsigned frag = fpos_frag(ctx->pos); 251 int off = fpos_off(ctx->pos); 252 int err; 253 u32 ftype; 254 struct ceph_mds_reply_info_parsed *rinfo; 255 256 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off); 257 if (fi->flags & CEPH_F_ATEND) 258 return 0; 259 260 /* always start with . and .. */ 261 if (ctx->pos == 0) { 262 dout("readdir off 0 -> '.'\n"); 263 if (!dir_emit(ctx, ".", 1, 264 ceph_translate_ino(inode->i_sb, inode->i_ino), 265 inode->i_mode >> 12)) 266 return 0; 267 ctx->pos = 1; 268 off = 1; 269 } 270 if (ctx->pos == 1) { 271 ino_t ino = parent_ino(file->f_path.dentry); 272 dout("readdir off 1 -> '..'\n"); 273 if (!dir_emit(ctx, "..", 2, 274 ceph_translate_ino(inode->i_sb, ino), 275 inode->i_mode >> 12)) 276 return 0; 277 ctx->pos = 2; 278 off = 2; 279 } 280 281 /* can we use the dcache? */ 282 spin_lock(&ci->i_ceph_lock); 283 if ((ctx->pos == 2 || fi->dentry) && 284 ceph_test_mount_opt(fsc, DCACHE) && 285 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 286 ceph_snap(inode) != CEPH_SNAPDIR && 287 __ceph_dir_is_complete_ordered(ci) && 288 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 289 u32 shared_gen = ci->i_shared_gen; 290 spin_unlock(&ci->i_ceph_lock); 291 err = __dcache_readdir(file, ctx, shared_gen); 292 if (err != -EAGAIN) 293 return err; 294 frag = fpos_frag(ctx->pos); 295 off = fpos_off(ctx->pos); 296 } else { 297 spin_unlock(&ci->i_ceph_lock); 298 } 299 if (fi->dentry) { 300 err = note_last_dentry(fi, fi->dentry->d_name.name, 301 fi->dentry->d_name.len); 302 if (err) 303 return err; 304 dput(fi->dentry); 305 fi->dentry = NULL; 306 } 307 308 /* proceed with a normal readdir */ 309 310 if (ctx->pos == 2) { 311 /* note dir version at start of readdir so we can tell 312 * if any dentries get dropped */ 313 fi->dir_release_count = atomic_read(&ci->i_release_count); 314 fi->dir_ordered_count = ci->i_ordered_count; 315 } 316 317 more: 318 /* do we have the correct frag content buffered? */ 319 if (fi->frag != frag || fi->last_readdir == NULL) { 320 struct ceph_mds_request *req; 321 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 322 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 323 324 /* discard old result, if any */ 325 if (fi->last_readdir) { 326 ceph_mdsc_put_request(fi->last_readdir); 327 fi->last_readdir = NULL; 328 } 329 330 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 331 ceph_vinop(inode), frag, fi->last_name); 332 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 333 if (IS_ERR(req)) 334 return PTR_ERR(req); 335 err = ceph_alloc_readdir_reply_buffer(req, inode); 336 if (err) { 337 ceph_mdsc_put_request(req); 338 return err; 339 } 340 req->r_inode = inode; 341 ihold(inode); 342 req->r_dentry = dget(file->f_path.dentry); 343 /* hints to request -> mds selection code */ 344 req->r_direct_mode = USE_AUTH_MDS; 345 req->r_direct_hash = ceph_frag_value(frag); 346 req->r_direct_is_hash = true; 347 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 348 req->r_readdir_offset = fi->next_offset; 349 req->r_args.readdir.frag = cpu_to_le32(frag); 350 err = ceph_mdsc_do_request(mdsc, NULL, req); 351 if (err < 0) { 352 ceph_mdsc_put_request(req); 353 return err; 354 } 355 dout("readdir got and parsed readdir result=%d" 356 " on frag %x, end=%d, complete=%d\n", err, frag, 357 (int)req->r_reply_info.dir_end, 358 (int)req->r_reply_info.dir_complete); 359 360 if (!req->r_did_prepopulate) { 361 dout("readdir !did_prepopulate"); 362 /* preclude from marking dir complete */ 363 fi->dir_release_count--; 364 } 365 366 /* note next offset and last dentry name */ 367 rinfo = &req->r_reply_info; 368 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { 369 frag = le32_to_cpu(rinfo->dir_dir->frag); 370 if (ceph_frag_is_leftmost(frag)) 371 fi->next_offset = 2; 372 else 373 fi->next_offset = 0; 374 off = fi->next_offset; 375 } 376 fi->frag = frag; 377 fi->offset = fi->next_offset; 378 fi->last_readdir = req; 379 380 if (req->r_reply_info.dir_end) { 381 kfree(fi->last_name); 382 fi->last_name = NULL; 383 if (ceph_frag_is_rightmost(frag)) 384 fi->next_offset = 2; 385 else 386 fi->next_offset = 0; 387 } else { 388 err = note_last_dentry(fi, 389 rinfo->dir_dname[rinfo->dir_nr-1], 390 rinfo->dir_dname_len[rinfo->dir_nr-1]); 391 if (err) 392 return err; 393 fi->next_offset += rinfo->dir_nr; 394 } 395 } 396 397 rinfo = &fi->last_readdir->r_reply_info; 398 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 399 rinfo->dir_nr, off, fi->offset); 400 401 ctx->pos = ceph_make_fpos(frag, off); 402 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 403 struct ceph_mds_reply_inode *in = 404 rinfo->dir_in[off - fi->offset].in; 405 struct ceph_vino vino; 406 ino_t ino; 407 408 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 409 off, off - fi->offset, rinfo->dir_nr, ctx->pos, 410 rinfo->dir_dname_len[off - fi->offset], 411 rinfo->dir_dname[off - fi->offset], in); 412 BUG_ON(!in); 413 ftype = le32_to_cpu(in->mode) >> 12; 414 vino.ino = le64_to_cpu(in->ino); 415 vino.snap = le64_to_cpu(in->snapid); 416 ino = ceph_vino_to_ino(vino); 417 if (!dir_emit(ctx, 418 rinfo->dir_dname[off - fi->offset], 419 rinfo->dir_dname_len[off - fi->offset], 420 ceph_translate_ino(inode->i_sb, ino), ftype)) { 421 dout("filldir stopping us...\n"); 422 return 0; 423 } 424 off++; 425 ctx->pos++; 426 } 427 428 if (fi->last_name) { 429 ceph_mdsc_put_request(fi->last_readdir); 430 fi->last_readdir = NULL; 431 goto more; 432 } 433 434 /* more frags? */ 435 if (!ceph_frag_is_rightmost(frag)) { 436 frag = ceph_frag_next(frag); 437 off = 0; 438 ctx->pos = ceph_make_fpos(frag, off); 439 dout("readdir next frag is %x\n", frag); 440 goto more; 441 } 442 fi->flags |= CEPH_F_ATEND; 443 444 /* 445 * if dir_release_count still matches the dir, no dentries 446 * were released during the whole readdir, and we should have 447 * the complete dir contents in our cache. 448 */ 449 spin_lock(&ci->i_ceph_lock); 450 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { 451 if (ci->i_ordered_count == fi->dir_ordered_count) 452 dout(" marking %p complete and ordered\n", inode); 453 else 454 dout(" marking %p complete\n", inode); 455 __ceph_dir_set_complete(ci, fi->dir_release_count, 456 fi->dir_ordered_count); 457 } 458 spin_unlock(&ci->i_ceph_lock); 459 460 dout("readdir %p file %p done.\n", inode, file); 461 return 0; 462 } 463 464 static void reset_readdir(struct ceph_file_info *fi, unsigned frag) 465 { 466 if (fi->last_readdir) { 467 ceph_mdsc_put_request(fi->last_readdir); 468 fi->last_readdir = NULL; 469 } 470 kfree(fi->last_name); 471 fi->last_name = NULL; 472 if (ceph_frag_is_leftmost(frag)) 473 fi->next_offset = 2; /* compensate for . and .. */ 474 else 475 fi->next_offset = 0; 476 if (fi->dentry) { 477 dput(fi->dentry); 478 fi->dentry = NULL; 479 } 480 fi->flags &= ~CEPH_F_ATEND; 481 } 482 483 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 484 { 485 struct ceph_file_info *fi = file->private_data; 486 struct inode *inode = file->f_mapping->host; 487 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset); 488 loff_t retval; 489 490 mutex_lock(&inode->i_mutex); 491 retval = -EINVAL; 492 switch (whence) { 493 case SEEK_END: 494 offset += inode->i_size + 2; /* FIXME */ 495 break; 496 case SEEK_CUR: 497 offset += file->f_pos; 498 case SEEK_SET: 499 break; 500 default: 501 goto out; 502 } 503 504 if (offset >= 0) { 505 if (offset != file->f_pos) { 506 file->f_pos = offset; 507 file->f_version = 0; 508 fi->flags &= ~CEPH_F_ATEND; 509 } 510 retval = offset; 511 512 /* 513 * discard buffered readdir content on seekdir(0), or 514 * seek to new frag, or seek prior to current chunk. 515 */ 516 if (offset == 0 || 517 fpos_frag(offset) != fi->frag || 518 fpos_off(offset) < fi->offset) { 519 dout("dir_llseek dropping %p content\n", file); 520 reset_readdir(fi, fpos_frag(offset)); 521 } 522 523 /* bump dir_release_count if we did a forward seek */ 524 if (fpos_cmp(offset, old_offset) > 0) 525 fi->dir_release_count--; 526 } 527 out: 528 mutex_unlock(&inode->i_mutex); 529 return retval; 530 } 531 532 /* 533 * Handle lookups for the hidden .snap directory. 534 */ 535 int ceph_handle_snapdir(struct ceph_mds_request *req, 536 struct dentry *dentry, int err) 537 { 538 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 539 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ 540 541 /* .snap dir? */ 542 if (err == -ENOENT && 543 ceph_snap(parent) == CEPH_NOSNAP && 544 strcmp(dentry->d_name.name, 545 fsc->mount_options->snapdir_name) == 0) { 546 struct inode *inode = ceph_get_snapdir(parent); 547 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n", 548 dentry, dentry, inode); 549 BUG_ON(!d_unhashed(dentry)); 550 d_add(dentry, inode); 551 err = 0; 552 } 553 return err; 554 } 555 556 /* 557 * Figure out final result of a lookup/open request. 558 * 559 * Mainly, make sure we return the final req->r_dentry (if it already 560 * existed) in place of the original VFS-provided dentry when they 561 * differ. 562 * 563 * Gracefully handle the case where the MDS replies with -ENOENT and 564 * no trace (which it may do, at its discretion, e.g., if it doesn't 565 * care to issue a lease on the negative dentry). 566 */ 567 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 568 struct dentry *dentry, int err) 569 { 570 if (err == -ENOENT) { 571 /* no trace? */ 572 err = 0; 573 if (!req->r_reply_info.head->is_dentry) { 574 dout("ENOENT and no trace, dentry %p inode %p\n", 575 dentry, dentry->d_inode); 576 if (dentry->d_inode) { 577 d_drop(dentry); 578 err = -ENOENT; 579 } else { 580 d_add(dentry, NULL); 581 } 582 } 583 } 584 if (err) 585 dentry = ERR_PTR(err); 586 else if (dentry != req->r_dentry) 587 dentry = dget(req->r_dentry); /* we got spliced */ 588 else 589 dentry = NULL; 590 return dentry; 591 } 592 593 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 594 { 595 return ceph_ino(inode) == CEPH_INO_ROOT && 596 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 597 } 598 599 /* 600 * Look up a single dir entry. If there is a lookup intent, inform 601 * the MDS so that it gets our 'caps wanted' value in a single op. 602 */ 603 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 604 unsigned int flags) 605 { 606 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 607 struct ceph_mds_client *mdsc = fsc->mdsc; 608 struct ceph_mds_request *req; 609 int op; 610 int err; 611 612 dout("lookup %p dentry %p '%pd'\n", 613 dir, dentry, dentry); 614 615 if (dentry->d_name.len > NAME_MAX) 616 return ERR_PTR(-ENAMETOOLONG); 617 618 err = ceph_init_dentry(dentry); 619 if (err < 0) 620 return ERR_PTR(err); 621 622 /* can we conclude ENOENT locally? */ 623 if (dentry->d_inode == NULL) { 624 struct ceph_inode_info *ci = ceph_inode(dir); 625 struct ceph_dentry_info *di = ceph_dentry(dentry); 626 627 spin_lock(&ci->i_ceph_lock); 628 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 629 if (strncmp(dentry->d_name.name, 630 fsc->mount_options->snapdir_name, 631 dentry->d_name.len) && 632 !is_root_ceph_dentry(dir, dentry) && 633 ceph_test_mount_opt(fsc, DCACHE) && 634 __ceph_dir_is_complete(ci) && 635 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 636 spin_unlock(&ci->i_ceph_lock); 637 dout(" dir %p complete, -ENOENT\n", dir); 638 d_add(dentry, NULL); 639 di->lease_shared_gen = ci->i_shared_gen; 640 return NULL; 641 } 642 spin_unlock(&ci->i_ceph_lock); 643 } 644 645 op = ceph_snap(dir) == CEPH_SNAPDIR ? 646 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 647 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 648 if (IS_ERR(req)) 649 return ERR_CAST(req); 650 req->r_dentry = dget(dentry); 651 req->r_num_caps = 2; 652 /* we only need inode linkage */ 653 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 654 req->r_locked_dir = dir; 655 err = ceph_mdsc_do_request(mdsc, NULL, req); 656 err = ceph_handle_snapdir(req, dentry, err); 657 dentry = ceph_finish_lookup(req, dentry, err); 658 ceph_mdsc_put_request(req); /* will dput(dentry) */ 659 dout("lookup result=%p\n", dentry); 660 return dentry; 661 } 662 663 /* 664 * If we do a create but get no trace back from the MDS, follow up with 665 * a lookup (the VFS expects us to link up the provided dentry). 666 */ 667 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 668 { 669 struct dentry *result = ceph_lookup(dir, dentry, 0); 670 671 if (result && !IS_ERR(result)) { 672 /* 673 * We created the item, then did a lookup, and found 674 * it was already linked to another inode we already 675 * had in our cache (and thus got spliced). To not 676 * confuse VFS (especially when inode is a directory), 677 * we don't link our dentry to that inode, return an 678 * error instead. 679 * 680 * This event should be rare and it happens only when 681 * we talk to old MDS. Recent MDS does not send traceless 682 * reply for request that creates new inode. 683 */ 684 d_drop(result); 685 return -ESTALE; 686 } 687 return PTR_ERR(result); 688 } 689 690 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 691 umode_t mode, dev_t rdev) 692 { 693 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 694 struct ceph_mds_client *mdsc = fsc->mdsc; 695 struct ceph_mds_request *req; 696 struct ceph_acls_info acls = {}; 697 int err; 698 699 if (ceph_snap(dir) != CEPH_NOSNAP) 700 return -EROFS; 701 702 err = ceph_pre_init_acls(dir, &mode, &acls); 703 if (err < 0) 704 return err; 705 706 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 707 dir, dentry, mode, rdev); 708 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 709 if (IS_ERR(req)) { 710 err = PTR_ERR(req); 711 goto out; 712 } 713 req->r_dentry = dget(dentry); 714 req->r_num_caps = 2; 715 req->r_locked_dir = dir; 716 req->r_args.mknod.mode = cpu_to_le32(mode); 717 req->r_args.mknod.rdev = cpu_to_le32(rdev); 718 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 719 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 720 if (acls.pagelist) { 721 req->r_pagelist = acls.pagelist; 722 acls.pagelist = NULL; 723 } 724 err = ceph_mdsc_do_request(mdsc, dir, req); 725 if (!err && !req->r_reply_info.head->is_dentry) 726 err = ceph_handle_notrace_create(dir, dentry); 727 ceph_mdsc_put_request(req); 728 out: 729 if (!err) 730 ceph_init_inode_acls(dentry->d_inode, &acls); 731 else 732 d_drop(dentry); 733 ceph_release_acls_info(&acls); 734 return err; 735 } 736 737 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 738 bool excl) 739 { 740 return ceph_mknod(dir, dentry, mode, 0); 741 } 742 743 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 744 const char *dest) 745 { 746 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 747 struct ceph_mds_client *mdsc = fsc->mdsc; 748 struct ceph_mds_request *req; 749 int err; 750 751 if (ceph_snap(dir) != CEPH_NOSNAP) 752 return -EROFS; 753 754 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 755 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 756 if (IS_ERR(req)) { 757 err = PTR_ERR(req); 758 goto out; 759 } 760 req->r_dentry = dget(dentry); 761 req->r_num_caps = 2; 762 req->r_path2 = kstrdup(dest, GFP_NOFS); 763 req->r_locked_dir = dir; 764 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 765 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 766 err = ceph_mdsc_do_request(mdsc, dir, req); 767 if (!err && !req->r_reply_info.head->is_dentry) 768 err = ceph_handle_notrace_create(dir, dentry); 769 ceph_mdsc_put_request(req); 770 out: 771 if (err) 772 d_drop(dentry); 773 return err; 774 } 775 776 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 777 { 778 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 779 struct ceph_mds_client *mdsc = fsc->mdsc; 780 struct ceph_mds_request *req; 781 struct ceph_acls_info acls = {}; 782 int err = -EROFS; 783 int op; 784 785 if (ceph_snap(dir) == CEPH_SNAPDIR) { 786 /* mkdir .snap/foo is a MKSNAP */ 787 op = CEPH_MDS_OP_MKSNAP; 788 dout("mksnap dir %p snap '%pd' dn %p\n", dir, 789 dentry, dentry); 790 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 791 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 792 op = CEPH_MDS_OP_MKDIR; 793 } else { 794 goto out; 795 } 796 797 mode |= S_IFDIR; 798 err = ceph_pre_init_acls(dir, &mode, &acls); 799 if (err < 0) 800 goto out; 801 802 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 803 if (IS_ERR(req)) { 804 err = PTR_ERR(req); 805 goto out; 806 } 807 808 req->r_dentry = dget(dentry); 809 req->r_num_caps = 2; 810 req->r_locked_dir = dir; 811 req->r_args.mkdir.mode = cpu_to_le32(mode); 812 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 813 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 814 if (acls.pagelist) { 815 req->r_pagelist = acls.pagelist; 816 acls.pagelist = NULL; 817 } 818 err = ceph_mdsc_do_request(mdsc, dir, req); 819 if (!err && 820 !req->r_reply_info.head->is_target && 821 !req->r_reply_info.head->is_dentry) 822 err = ceph_handle_notrace_create(dir, dentry); 823 ceph_mdsc_put_request(req); 824 out: 825 if (!err) 826 ceph_init_inode_acls(dentry->d_inode, &acls); 827 else 828 d_drop(dentry); 829 ceph_release_acls_info(&acls); 830 return err; 831 } 832 833 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 834 struct dentry *dentry) 835 { 836 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 837 struct ceph_mds_client *mdsc = fsc->mdsc; 838 struct ceph_mds_request *req; 839 int err; 840 841 if (ceph_snap(dir) != CEPH_NOSNAP) 842 return -EROFS; 843 844 dout("link in dir %p old_dentry %p dentry %p\n", dir, 845 old_dentry, dentry); 846 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 847 if (IS_ERR(req)) { 848 d_drop(dentry); 849 return PTR_ERR(req); 850 } 851 req->r_dentry = dget(dentry); 852 req->r_num_caps = 2; 853 req->r_old_dentry = dget(old_dentry); 854 req->r_locked_dir = dir; 855 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 856 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 857 /* release LINK_SHARED on source inode (mds will lock it) */ 858 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 859 err = ceph_mdsc_do_request(mdsc, dir, req); 860 if (err) { 861 d_drop(dentry); 862 } else if (!req->r_reply_info.head->is_dentry) { 863 ihold(old_dentry->d_inode); 864 d_instantiate(dentry, old_dentry->d_inode); 865 } 866 ceph_mdsc_put_request(req); 867 return err; 868 } 869 870 /* 871 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 872 * looks like the link count will hit 0, drop any other caps (other 873 * than PIN) we don't specifically want (due to the file still being 874 * open). 875 */ 876 static int drop_caps_for_unlink(struct inode *inode) 877 { 878 struct ceph_inode_info *ci = ceph_inode(inode); 879 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 880 881 spin_lock(&ci->i_ceph_lock); 882 if (inode->i_nlink == 1) { 883 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 884 ci->i_ceph_flags |= CEPH_I_NODELAY; 885 } 886 spin_unlock(&ci->i_ceph_lock); 887 return drop; 888 } 889 890 /* 891 * rmdir and unlink are differ only by the metadata op code 892 */ 893 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 894 { 895 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 896 struct ceph_mds_client *mdsc = fsc->mdsc; 897 struct inode *inode = dentry->d_inode; 898 struct ceph_mds_request *req; 899 int err = -EROFS; 900 int op; 901 902 if (ceph_snap(dir) == CEPH_SNAPDIR) { 903 /* rmdir .snap/foo is RMSNAP */ 904 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry); 905 op = CEPH_MDS_OP_RMSNAP; 906 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 907 dout("unlink/rmdir dir %p dn %p inode %p\n", 908 dir, dentry, inode); 909 op = d_is_dir(dentry) ? 910 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 911 } else 912 goto out; 913 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 914 if (IS_ERR(req)) { 915 err = PTR_ERR(req); 916 goto out; 917 } 918 req->r_dentry = dget(dentry); 919 req->r_num_caps = 2; 920 req->r_locked_dir = dir; 921 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 922 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 923 req->r_inode_drop = drop_caps_for_unlink(inode); 924 err = ceph_mdsc_do_request(mdsc, dir, req); 925 if (!err && !req->r_reply_info.head->is_dentry) 926 d_delete(dentry); 927 ceph_mdsc_put_request(req); 928 out: 929 return err; 930 } 931 932 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 933 struct inode *new_dir, struct dentry *new_dentry) 934 { 935 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 936 struct ceph_mds_client *mdsc = fsc->mdsc; 937 struct ceph_mds_request *req; 938 int err; 939 940 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 941 return -EXDEV; 942 if (ceph_snap(old_dir) != CEPH_NOSNAP || 943 ceph_snap(new_dir) != CEPH_NOSNAP) 944 return -EROFS; 945 dout("rename dir %p dentry %p to dir %p dentry %p\n", 946 old_dir, old_dentry, new_dir, new_dentry); 947 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 948 if (IS_ERR(req)) 949 return PTR_ERR(req); 950 ihold(old_dir); 951 req->r_dentry = dget(new_dentry); 952 req->r_num_caps = 2; 953 req->r_old_dentry = dget(old_dentry); 954 req->r_old_dentry_dir = old_dir; 955 req->r_locked_dir = new_dir; 956 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 957 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 958 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 959 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 960 /* release LINK_RDCACHE on source inode (mds will lock it) */ 961 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 962 if (new_dentry->d_inode) 963 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 964 err = ceph_mdsc_do_request(mdsc, old_dir, req); 965 if (!err && !req->r_reply_info.head->is_dentry) { 966 /* 967 * Normally d_move() is done by fill_trace (called by 968 * do_request, above). If there is no trace, we need 969 * to do it here. 970 */ 971 972 d_move(old_dentry, new_dentry); 973 974 /* ensure target dentry is invalidated, despite 975 rehashing bug in vfs_rename_dir */ 976 ceph_invalidate_dentry_lease(new_dentry); 977 978 /* d_move screws up sibling dentries' offsets */ 979 ceph_dir_clear_complete(old_dir); 980 ceph_dir_clear_complete(new_dir); 981 982 } 983 ceph_mdsc_put_request(req); 984 return err; 985 } 986 987 /* 988 * Ensure a dentry lease will no longer revalidate. 989 */ 990 void ceph_invalidate_dentry_lease(struct dentry *dentry) 991 { 992 spin_lock(&dentry->d_lock); 993 dentry->d_time = jiffies; 994 ceph_dentry(dentry)->lease_shared_gen = 0; 995 spin_unlock(&dentry->d_lock); 996 } 997 998 /* 999 * Check if dentry lease is valid. If not, delete the lease. Try to 1000 * renew if the least is more than half up. 1001 */ 1002 static int dentry_lease_is_valid(struct dentry *dentry) 1003 { 1004 struct ceph_dentry_info *di; 1005 struct ceph_mds_session *s; 1006 int valid = 0; 1007 u32 gen; 1008 unsigned long ttl; 1009 struct ceph_mds_session *session = NULL; 1010 struct inode *dir = NULL; 1011 u32 seq = 0; 1012 1013 spin_lock(&dentry->d_lock); 1014 di = ceph_dentry(dentry); 1015 if (di->lease_session) { 1016 s = di->lease_session; 1017 spin_lock(&s->s_gen_ttl_lock); 1018 gen = s->s_cap_gen; 1019 ttl = s->s_cap_ttl; 1020 spin_unlock(&s->s_gen_ttl_lock); 1021 1022 if (di->lease_gen == gen && 1023 time_before(jiffies, dentry->d_time) && 1024 time_before(jiffies, ttl)) { 1025 valid = 1; 1026 if (di->lease_renew_after && 1027 time_after(jiffies, di->lease_renew_after)) { 1028 /* we should renew */ 1029 dir = dentry->d_parent->d_inode; 1030 session = ceph_get_mds_session(s); 1031 seq = di->lease_seq; 1032 di->lease_renew_after = 0; 1033 di->lease_renew_from = jiffies; 1034 } 1035 } 1036 } 1037 spin_unlock(&dentry->d_lock); 1038 1039 if (session) { 1040 ceph_mdsc_lease_send_msg(session, dir, dentry, 1041 CEPH_MDS_LEASE_RENEW, seq); 1042 ceph_put_mds_session(session); 1043 } 1044 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 1045 return valid; 1046 } 1047 1048 /* 1049 * Check if directory-wide content lease/cap is valid. 1050 */ 1051 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 1052 { 1053 struct ceph_inode_info *ci = ceph_inode(dir); 1054 struct ceph_dentry_info *di = ceph_dentry(dentry); 1055 int valid = 0; 1056 1057 spin_lock(&ci->i_ceph_lock); 1058 if (ci->i_shared_gen == di->lease_shared_gen) 1059 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 1060 spin_unlock(&ci->i_ceph_lock); 1061 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 1062 dir, (unsigned)ci->i_shared_gen, dentry, 1063 (unsigned)di->lease_shared_gen, valid); 1064 return valid; 1065 } 1066 1067 /* 1068 * Check if cached dentry can be trusted. 1069 */ 1070 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1071 { 1072 int valid = 0; 1073 struct inode *dir; 1074 1075 if (flags & LOOKUP_RCU) 1076 return -ECHILD; 1077 1078 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, 1079 dentry, dentry->d_inode, ceph_dentry(dentry)->offset); 1080 1081 dir = ceph_get_dentry_parent_inode(dentry); 1082 1083 /* always trust cached snapped dentries, snapdir dentry */ 1084 if (ceph_snap(dir) != CEPH_NOSNAP) { 1085 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, 1086 dentry, dentry->d_inode); 1087 valid = 1; 1088 } else if (dentry->d_inode && 1089 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { 1090 valid = 1; 1091 } else if (dentry_lease_is_valid(dentry) || 1092 dir_lease_is_valid(dir, dentry)) { 1093 if (dentry->d_inode) 1094 valid = ceph_is_any_caps(dentry->d_inode); 1095 else 1096 valid = 1; 1097 } 1098 1099 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1100 if (valid) { 1101 ceph_dentry_lru_touch(dentry); 1102 } else { 1103 ceph_dir_clear_complete(dir); 1104 } 1105 iput(dir); 1106 return valid; 1107 } 1108 1109 /* 1110 * Release our ceph_dentry_info. 1111 */ 1112 static void ceph_d_release(struct dentry *dentry) 1113 { 1114 struct ceph_dentry_info *di = ceph_dentry(dentry); 1115 1116 dout("d_release %p\n", dentry); 1117 ceph_dentry_lru_del(dentry); 1118 if (di->lease_session) 1119 ceph_put_mds_session(di->lease_session); 1120 kmem_cache_free(ceph_dentry_cachep, di); 1121 dentry->d_fsdata = NULL; 1122 } 1123 1124 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1125 unsigned int flags) 1126 { 1127 /* 1128 * Eventually, we'll want to revalidate snapped metadata 1129 * too... probably... 1130 */ 1131 return 1; 1132 } 1133 1134 /* 1135 * When the VFS prunes a dentry from the cache, we need to clear the 1136 * complete flag on the parent directory. 1137 * 1138 * Called under dentry->d_lock. 1139 */ 1140 static void ceph_d_prune(struct dentry *dentry) 1141 { 1142 dout("ceph_d_prune %p\n", dentry); 1143 1144 /* do we have a valid parent? */ 1145 if (IS_ROOT(dentry)) 1146 return; 1147 1148 /* if we are not hashed, we don't affect dir's completeness */ 1149 if (d_unhashed(dentry)) 1150 return; 1151 1152 /* 1153 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1154 * cleared until d_release 1155 */ 1156 ceph_dir_clear_complete(dentry->d_parent->d_inode); 1157 } 1158 1159 /* 1160 * read() on a dir. This weird interface hack only works if mounted 1161 * with '-o dirstat'. 1162 */ 1163 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1164 loff_t *ppos) 1165 { 1166 struct ceph_file_info *cf = file->private_data; 1167 struct inode *inode = file_inode(file); 1168 struct ceph_inode_info *ci = ceph_inode(inode); 1169 int left; 1170 const int bufsize = 1024; 1171 1172 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1173 return -EISDIR; 1174 1175 if (!cf->dir_info) { 1176 cf->dir_info = kmalloc(bufsize, GFP_NOFS); 1177 if (!cf->dir_info) 1178 return -ENOMEM; 1179 cf->dir_info_len = 1180 snprintf(cf->dir_info, bufsize, 1181 "entries: %20lld\n" 1182 " files: %20lld\n" 1183 " subdirs: %20lld\n" 1184 "rentries: %20lld\n" 1185 " rfiles: %20lld\n" 1186 " rsubdirs: %20lld\n" 1187 "rbytes: %20lld\n" 1188 "rctime: %10ld.%09ld\n", 1189 ci->i_files + ci->i_subdirs, 1190 ci->i_files, 1191 ci->i_subdirs, 1192 ci->i_rfiles + ci->i_rsubdirs, 1193 ci->i_rfiles, 1194 ci->i_rsubdirs, 1195 ci->i_rbytes, 1196 (long)ci->i_rctime.tv_sec, 1197 (long)ci->i_rctime.tv_nsec); 1198 } 1199 1200 if (*ppos >= cf->dir_info_len) 1201 return 0; 1202 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1203 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1204 if (left == size) 1205 return -EFAULT; 1206 *ppos += (size - left); 1207 return size - left; 1208 } 1209 1210 /* 1211 * an fsync() on a dir will wait for any uncommitted directory 1212 * operations to commit. 1213 */ 1214 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, 1215 int datasync) 1216 { 1217 struct inode *inode = file_inode(file); 1218 struct ceph_inode_info *ci = ceph_inode(inode); 1219 struct list_head *head = &ci->i_unsafe_dirops; 1220 struct ceph_mds_request *req; 1221 u64 last_tid; 1222 int ret = 0; 1223 1224 dout("dir_fsync %p\n", inode); 1225 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1226 if (ret) 1227 return ret; 1228 mutex_lock(&inode->i_mutex); 1229 1230 spin_lock(&ci->i_unsafe_lock); 1231 if (list_empty(head)) 1232 goto out; 1233 1234 req = list_entry(head->prev, 1235 struct ceph_mds_request, r_unsafe_dir_item); 1236 last_tid = req->r_tid; 1237 1238 do { 1239 ceph_mdsc_get_request(req); 1240 spin_unlock(&ci->i_unsafe_lock); 1241 1242 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1243 inode, req->r_tid, last_tid); 1244 if (req->r_timeout) { 1245 ret = wait_for_completion_timeout( 1246 &req->r_safe_completion, req->r_timeout); 1247 if (ret > 0) 1248 ret = 0; 1249 else if (ret == 0) 1250 ret = -EIO; /* timed out */ 1251 } else { 1252 wait_for_completion(&req->r_safe_completion); 1253 } 1254 ceph_mdsc_put_request(req); 1255 1256 spin_lock(&ci->i_unsafe_lock); 1257 if (ret || list_empty(head)) 1258 break; 1259 req = list_entry(head->next, 1260 struct ceph_mds_request, r_unsafe_dir_item); 1261 } while (req->r_tid < last_tid); 1262 out: 1263 spin_unlock(&ci->i_unsafe_lock); 1264 mutex_unlock(&inode->i_mutex); 1265 1266 return ret; 1267 } 1268 1269 /* 1270 * We maintain a private dentry LRU. 1271 * 1272 * FIXME: this needs to be changed to a per-mds lru to be useful. 1273 */ 1274 void ceph_dentry_lru_add(struct dentry *dn) 1275 { 1276 struct ceph_dentry_info *di = ceph_dentry(dn); 1277 struct ceph_mds_client *mdsc; 1278 1279 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn); 1280 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1281 spin_lock(&mdsc->dentry_lru_lock); 1282 list_add_tail(&di->lru, &mdsc->dentry_lru); 1283 mdsc->num_dentry++; 1284 spin_unlock(&mdsc->dentry_lru_lock); 1285 } 1286 1287 void ceph_dentry_lru_touch(struct dentry *dn) 1288 { 1289 struct ceph_dentry_info *di = ceph_dentry(dn); 1290 struct ceph_mds_client *mdsc; 1291 1292 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn, 1293 di->offset); 1294 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1295 spin_lock(&mdsc->dentry_lru_lock); 1296 list_move_tail(&di->lru, &mdsc->dentry_lru); 1297 spin_unlock(&mdsc->dentry_lru_lock); 1298 } 1299 1300 void ceph_dentry_lru_del(struct dentry *dn) 1301 { 1302 struct ceph_dentry_info *di = ceph_dentry(dn); 1303 struct ceph_mds_client *mdsc; 1304 1305 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn); 1306 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1307 spin_lock(&mdsc->dentry_lru_lock); 1308 list_del_init(&di->lru); 1309 mdsc->num_dentry--; 1310 spin_unlock(&mdsc->dentry_lru_lock); 1311 } 1312 1313 /* 1314 * Return name hash for a given dentry. This is dependent on 1315 * the parent directory's hash function. 1316 */ 1317 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1318 { 1319 struct ceph_inode_info *dci = ceph_inode(dir); 1320 1321 switch (dci->i_dir_layout.dl_dir_hash) { 1322 case 0: /* for backward compat */ 1323 case CEPH_STR_HASH_LINUX: 1324 return dn->d_name.hash; 1325 1326 default: 1327 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1328 dn->d_name.name, dn->d_name.len); 1329 } 1330 } 1331 1332 const struct file_operations ceph_dir_fops = { 1333 .read = ceph_read_dir, 1334 .iterate = ceph_readdir, 1335 .llseek = ceph_dir_llseek, 1336 .open = ceph_open, 1337 .release = ceph_release, 1338 .unlocked_ioctl = ceph_ioctl, 1339 .fsync = ceph_dir_fsync, 1340 }; 1341 1342 const struct file_operations ceph_snapdir_fops = { 1343 .iterate = ceph_readdir, 1344 .llseek = ceph_dir_llseek, 1345 .open = ceph_open, 1346 .release = ceph_release, 1347 }; 1348 1349 const struct inode_operations ceph_dir_iops = { 1350 .lookup = ceph_lookup, 1351 .permission = ceph_permission, 1352 .getattr = ceph_getattr, 1353 .setattr = ceph_setattr, 1354 .setxattr = ceph_setxattr, 1355 .getxattr = ceph_getxattr, 1356 .listxattr = ceph_listxattr, 1357 .removexattr = ceph_removexattr, 1358 .get_acl = ceph_get_acl, 1359 .set_acl = ceph_set_acl, 1360 .mknod = ceph_mknod, 1361 .symlink = ceph_symlink, 1362 .mkdir = ceph_mkdir, 1363 .link = ceph_link, 1364 .unlink = ceph_unlink, 1365 .rmdir = ceph_unlink, 1366 .rename = ceph_rename, 1367 .create = ceph_create, 1368 .atomic_open = ceph_atomic_open, 1369 }; 1370 1371 const struct inode_operations ceph_snapdir_iops = { 1372 .lookup = ceph_lookup, 1373 .permission = ceph_permission, 1374 .getattr = ceph_getattr, 1375 .mkdir = ceph_mkdir, 1376 .rmdir = ceph_unlink, 1377 }; 1378 1379 const struct dentry_operations ceph_dentry_ops = { 1380 .d_revalidate = ceph_d_revalidate, 1381 .d_release = ceph_d_release, 1382 .d_prune = ceph_d_prune, 1383 }; 1384 1385 const struct dentry_operations ceph_snapdir_dentry_ops = { 1386 .d_revalidate = ceph_snapdir_d_revalidate, 1387 .d_release = ceph_d_release, 1388 }; 1389 1390 const struct dentry_operations ceph_snap_dentry_ops = { 1391 .d_release = ceph_d_release, 1392 .d_prune = ceph_d_prune, 1393 }; 1394