1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct inode_operations ceph_dir_iops; 30 const struct file_operations ceph_dir_fops; 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 int ceph_init_dentry(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 if (dentry->d_fsdata) 41 return 0; 42 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 44 if (!di) 45 return -ENOMEM; /* oh well */ 46 47 spin_lock(&dentry->d_lock); 48 if (dentry->d_fsdata) { 49 /* lost a race */ 50 kmem_cache_free(ceph_dentry_cachep, di); 51 goto out_unlock; 52 } 53 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 55 d_set_d_op(dentry, &ceph_dentry_ops); 56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 58 else 59 d_set_d_op(dentry, &ceph_snap_dentry_ops); 60 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 dentry->d_time = jiffies; 64 /* avoid reordering d_fsdata setup so that the check above is safe */ 65 smp_mb(); 66 dentry->d_fsdata = di; 67 ceph_dentry_lru_add(dentry); 68 out_unlock: 69 spin_unlock(&dentry->d_lock); 70 return 0; 71 } 72 73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) 74 { 75 struct inode *inode = NULL; 76 77 if (!dentry) 78 return NULL; 79 80 spin_lock(&dentry->d_lock); 81 if (!IS_ROOT(dentry)) { 82 inode = dentry->d_parent->d_inode; 83 ihold(inode); 84 } 85 spin_unlock(&dentry->d_lock); 86 return inode; 87 } 88 89 90 /* 91 * for readdir, we encode the directory frag and offset within that 92 * frag into f_pos. 93 */ 94 static unsigned fpos_frag(loff_t p) 95 { 96 return p >> 32; 97 } 98 static unsigned fpos_off(loff_t p) 99 { 100 return p & 0xffffffff; 101 } 102 103 static int fpos_cmp(loff_t l, loff_t r) 104 { 105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 106 if (v) 107 return v; 108 return (int)(fpos_off(l) - fpos_off(r)); 109 } 110 111 /* 112 * When possible, we try to satisfy a readdir by peeking at the 113 * dcache. We make this work by carefully ordering dentries on 114 * d_u.d_child when we initially get results back from the MDS, and 115 * falling back to a "normal" sync readdir if any dentries in the dir 116 * are dropped. 117 * 118 * Complete dir indicates that we have all dentries in the dir. It is 119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 120 * the MDS if/when the directory is modified). 121 */ 122 static int __dcache_readdir(struct file *file, struct dir_context *ctx, 123 u32 shared_gen) 124 { 125 struct ceph_file_info *fi = file->private_data; 126 struct dentry *parent = file->f_dentry; 127 struct inode *dir = parent->d_inode; 128 struct list_head *p; 129 struct dentry *dentry, *last; 130 struct ceph_dentry_info *di; 131 int err = 0; 132 133 /* claim ref on last dentry we returned */ 134 last = fi->dentry; 135 fi->dentry = NULL; 136 137 dout("__dcache_readdir %p v%u at %llu (last %p)\n", 138 dir, shared_gen, ctx->pos, last); 139 140 spin_lock(&parent->d_lock); 141 142 /* start at beginning? */ 143 if (ctx->pos == 2 || last == NULL || 144 fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) { 145 if (list_empty(&parent->d_subdirs)) 146 goto out_unlock; 147 p = parent->d_subdirs.prev; 148 dout(" initial p %p/%p\n", p->prev, p->next); 149 } else { 150 p = last->d_u.d_child.prev; 151 } 152 153 more: 154 dentry = list_entry(p, struct dentry, d_u.d_child); 155 di = ceph_dentry(dentry); 156 while (1) { 157 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 158 d_unhashed(dentry) ? "!hashed" : "hashed", 159 parent->d_subdirs.prev, parent->d_subdirs.next); 160 if (p == &parent->d_subdirs) { 161 fi->flags |= CEPH_F_ATEND; 162 goto out_unlock; 163 } 164 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 165 if (di->lease_shared_gen == shared_gen && 166 !d_unhashed(dentry) && dentry->d_inode && 167 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 168 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 169 fpos_cmp(ctx->pos, di->offset) <= 0) 170 break; 171 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 172 dentry->d_name.len, dentry->d_name.name, di->offset, 173 ctx->pos, d_unhashed(dentry) ? " unhashed" : "", 174 !dentry->d_inode ? " null" : ""); 175 spin_unlock(&dentry->d_lock); 176 p = p->prev; 177 dentry = list_entry(p, struct dentry, d_u.d_child); 178 di = ceph_dentry(dentry); 179 } 180 181 dget_dlock(dentry); 182 spin_unlock(&dentry->d_lock); 183 spin_unlock(&parent->d_lock); 184 185 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 186 if (!ceph_dir_is_complete(dir)) { 187 dout(" lost dir complete on %p; falling back to mds\n", dir); 188 dput(dentry); 189 err = -EAGAIN; 190 goto out; 191 } 192 193 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos, 194 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 195 if (!dir_emit(ctx, dentry->d_name.name, 196 dentry->d_name.len, 197 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), 198 dentry->d_inode->i_mode >> 12)) { 199 if (last) { 200 /* remember our position */ 201 fi->dentry = last; 202 fi->next_offset = fpos_off(di->offset); 203 } 204 dput(dentry); 205 return 0; 206 } 207 208 ctx->pos = di->offset + 1; 209 210 if (last) 211 dput(last); 212 last = dentry; 213 214 spin_lock(&parent->d_lock); 215 p = p->prev; /* advance to next dentry */ 216 goto more; 217 218 out_unlock: 219 spin_unlock(&parent->d_lock); 220 out: 221 if (last) 222 dput(last); 223 return err; 224 } 225 226 /* 227 * make note of the last dentry we read, so we can 228 * continue at the same lexicographical point, 229 * regardless of what dir changes take place on the 230 * server. 231 */ 232 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 233 int len) 234 { 235 kfree(fi->last_name); 236 fi->last_name = kmalloc(len+1, GFP_NOFS); 237 if (!fi->last_name) 238 return -ENOMEM; 239 memcpy(fi->last_name, name, len); 240 fi->last_name[len] = 0; 241 dout("note_last_dentry '%s'\n", fi->last_name); 242 return 0; 243 } 244 245 static int ceph_readdir(struct file *file, struct dir_context *ctx) 246 { 247 struct ceph_file_info *fi = file->private_data; 248 struct inode *inode = file_inode(file); 249 struct ceph_inode_info *ci = ceph_inode(inode); 250 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 251 struct ceph_mds_client *mdsc = fsc->mdsc; 252 unsigned frag = fpos_frag(ctx->pos); 253 int off = fpos_off(ctx->pos); 254 int err; 255 u32 ftype; 256 struct ceph_mds_reply_info_parsed *rinfo; 257 258 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off); 259 if (fi->flags & CEPH_F_ATEND) 260 return 0; 261 262 /* always start with . and .. */ 263 if (ctx->pos == 0) { 264 /* note dir version at start of readdir so we can tell 265 * if any dentries get dropped */ 266 fi->dir_release_count = atomic_read(&ci->i_release_count); 267 268 dout("readdir off 0 -> '.'\n"); 269 if (!dir_emit(ctx, ".", 1, 270 ceph_translate_ino(inode->i_sb, inode->i_ino), 271 inode->i_mode >> 12)) 272 return 0; 273 ctx->pos = 1; 274 off = 1; 275 } 276 if (ctx->pos == 1) { 277 ino_t ino = parent_ino(file->f_dentry); 278 dout("readdir off 1 -> '..'\n"); 279 if (!dir_emit(ctx, "..", 2, 280 ceph_translate_ino(inode->i_sb, ino), 281 inode->i_mode >> 12)) 282 return 0; 283 ctx->pos = 2; 284 off = 2; 285 } 286 287 /* can we use the dcache? */ 288 spin_lock(&ci->i_ceph_lock); 289 if ((ctx->pos == 2 || fi->dentry) && 290 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 291 ceph_snap(inode) != CEPH_SNAPDIR && 292 __ceph_dir_is_complete(ci) && 293 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 294 u32 shared_gen = ci->i_shared_gen; 295 spin_unlock(&ci->i_ceph_lock); 296 err = __dcache_readdir(file, ctx, shared_gen); 297 if (err != -EAGAIN) 298 return err; 299 frag = fpos_frag(ctx->pos); 300 off = fpos_off(ctx->pos); 301 } else { 302 spin_unlock(&ci->i_ceph_lock); 303 } 304 if (fi->dentry) { 305 err = note_last_dentry(fi, fi->dentry->d_name.name, 306 fi->dentry->d_name.len); 307 if (err) 308 return err; 309 dput(fi->dentry); 310 fi->dentry = NULL; 311 } 312 313 /* proceed with a normal readdir */ 314 315 more: 316 /* do we have the correct frag content buffered? */ 317 if (fi->frag != frag || fi->last_readdir == NULL) { 318 struct ceph_mds_request *req; 319 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 320 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 321 322 /* discard old result, if any */ 323 if (fi->last_readdir) { 324 ceph_mdsc_put_request(fi->last_readdir); 325 fi->last_readdir = NULL; 326 } 327 328 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 329 ceph_vinop(inode), frag, fi->last_name); 330 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 331 if (IS_ERR(req)) 332 return PTR_ERR(req); 333 err = ceph_alloc_readdir_reply_buffer(req, inode); 334 if (err) { 335 ceph_mdsc_put_request(req); 336 return err; 337 } 338 req->r_inode = inode; 339 ihold(inode); 340 req->r_dentry = dget(file->f_dentry); 341 /* hints to request -> mds selection code */ 342 req->r_direct_mode = USE_AUTH_MDS; 343 req->r_direct_hash = ceph_frag_value(frag); 344 req->r_direct_is_hash = true; 345 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 346 req->r_readdir_offset = fi->next_offset; 347 req->r_args.readdir.frag = cpu_to_le32(frag); 348 err = ceph_mdsc_do_request(mdsc, NULL, req); 349 if (err < 0) { 350 ceph_mdsc_put_request(req); 351 return err; 352 } 353 dout("readdir got and parsed readdir result=%d" 354 " on frag %x, end=%d, complete=%d\n", err, frag, 355 (int)req->r_reply_info.dir_end, 356 (int)req->r_reply_info.dir_complete); 357 358 if (!req->r_did_prepopulate) { 359 dout("readdir !did_prepopulate"); 360 /* preclude from marking dir complete */ 361 fi->dir_release_count--; 362 } 363 364 /* note next offset and last dentry name */ 365 rinfo = &req->r_reply_info; 366 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { 367 frag = le32_to_cpu(rinfo->dir_dir->frag); 368 if (ceph_frag_is_leftmost(frag)) 369 fi->next_offset = 2; 370 else 371 fi->next_offset = 0; 372 off = fi->next_offset; 373 } 374 fi->frag = frag; 375 fi->offset = fi->next_offset; 376 fi->last_readdir = req; 377 378 if (req->r_reply_info.dir_end) { 379 kfree(fi->last_name); 380 fi->last_name = NULL; 381 if (ceph_frag_is_rightmost(frag)) 382 fi->next_offset = 2; 383 else 384 fi->next_offset = 0; 385 } else { 386 err = note_last_dentry(fi, 387 rinfo->dir_dname[rinfo->dir_nr-1], 388 rinfo->dir_dname_len[rinfo->dir_nr-1]); 389 if (err) 390 return err; 391 fi->next_offset += rinfo->dir_nr; 392 } 393 } 394 395 rinfo = &fi->last_readdir->r_reply_info; 396 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 397 rinfo->dir_nr, off, fi->offset); 398 399 ctx->pos = ceph_make_fpos(frag, off); 400 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 401 struct ceph_mds_reply_inode *in = 402 rinfo->dir_in[off - fi->offset].in; 403 struct ceph_vino vino; 404 ino_t ino; 405 406 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 407 off, off - fi->offset, rinfo->dir_nr, ctx->pos, 408 rinfo->dir_dname_len[off - fi->offset], 409 rinfo->dir_dname[off - fi->offset], in); 410 BUG_ON(!in); 411 ftype = le32_to_cpu(in->mode) >> 12; 412 vino.ino = le64_to_cpu(in->ino); 413 vino.snap = le64_to_cpu(in->snapid); 414 ino = ceph_vino_to_ino(vino); 415 if (!dir_emit(ctx, 416 rinfo->dir_dname[off - fi->offset], 417 rinfo->dir_dname_len[off - fi->offset], 418 ceph_translate_ino(inode->i_sb, ino), ftype)) { 419 dout("filldir stopping us...\n"); 420 return 0; 421 } 422 off++; 423 ctx->pos++; 424 } 425 426 if (fi->last_name) { 427 ceph_mdsc_put_request(fi->last_readdir); 428 fi->last_readdir = NULL; 429 goto more; 430 } 431 432 /* more frags? */ 433 if (!ceph_frag_is_rightmost(frag)) { 434 frag = ceph_frag_next(frag); 435 off = 0; 436 ctx->pos = ceph_make_fpos(frag, off); 437 dout("readdir next frag is %x\n", frag); 438 goto more; 439 } 440 fi->flags |= CEPH_F_ATEND; 441 442 /* 443 * if dir_release_count still matches the dir, no dentries 444 * were released during the whole readdir, and we should have 445 * the complete dir contents in our cache. 446 */ 447 spin_lock(&ci->i_ceph_lock); 448 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { 449 dout(" marking %p complete\n", inode); 450 __ceph_dir_set_complete(ci, fi->dir_release_count); 451 } 452 spin_unlock(&ci->i_ceph_lock); 453 454 dout("readdir %p file %p done.\n", inode, file); 455 return 0; 456 } 457 458 static void reset_readdir(struct ceph_file_info *fi, unsigned frag) 459 { 460 if (fi->last_readdir) { 461 ceph_mdsc_put_request(fi->last_readdir); 462 fi->last_readdir = NULL; 463 } 464 kfree(fi->last_name); 465 fi->last_name = NULL; 466 if (ceph_frag_is_leftmost(frag)) 467 fi->next_offset = 2; /* compensate for . and .. */ 468 else 469 fi->next_offset = 0; 470 if (fi->dentry) { 471 dput(fi->dentry); 472 fi->dentry = NULL; 473 } 474 fi->flags &= ~CEPH_F_ATEND; 475 } 476 477 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 478 { 479 struct ceph_file_info *fi = file->private_data; 480 struct inode *inode = file->f_mapping->host; 481 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset); 482 loff_t retval; 483 484 mutex_lock(&inode->i_mutex); 485 retval = -EINVAL; 486 switch (whence) { 487 case SEEK_END: 488 offset += inode->i_size + 2; /* FIXME */ 489 break; 490 case SEEK_CUR: 491 offset += file->f_pos; 492 case SEEK_SET: 493 break; 494 default: 495 goto out; 496 } 497 498 if (offset >= 0) { 499 if (offset != file->f_pos) { 500 file->f_pos = offset; 501 file->f_version = 0; 502 fi->flags &= ~CEPH_F_ATEND; 503 } 504 retval = offset; 505 506 /* 507 * discard buffered readdir content on seekdir(0), or 508 * seek to new frag, or seek prior to current chunk. 509 */ 510 if (offset == 0 || 511 fpos_frag(offset) != fi->frag || 512 fpos_off(offset) < fi->offset) { 513 dout("dir_llseek dropping %p content\n", file); 514 reset_readdir(fi, fpos_frag(offset)); 515 } 516 517 /* bump dir_release_count if we did a forward seek */ 518 if (fpos_cmp(offset, old_offset) > 0) 519 fi->dir_release_count--; 520 } 521 out: 522 mutex_unlock(&inode->i_mutex); 523 return retval; 524 } 525 526 /* 527 * Handle lookups for the hidden .snap directory. 528 */ 529 int ceph_handle_snapdir(struct ceph_mds_request *req, 530 struct dentry *dentry, int err) 531 { 532 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 533 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ 534 535 /* .snap dir? */ 536 if (err == -ENOENT && 537 ceph_snap(parent) == CEPH_NOSNAP && 538 strcmp(dentry->d_name.name, 539 fsc->mount_options->snapdir_name) == 0) { 540 struct inode *inode = ceph_get_snapdir(parent); 541 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 542 dentry, dentry->d_name.len, dentry->d_name.name, inode); 543 BUG_ON(!d_unhashed(dentry)); 544 d_add(dentry, inode); 545 err = 0; 546 } 547 return err; 548 } 549 550 /* 551 * Figure out final result of a lookup/open request. 552 * 553 * Mainly, make sure we return the final req->r_dentry (if it already 554 * existed) in place of the original VFS-provided dentry when they 555 * differ. 556 * 557 * Gracefully handle the case where the MDS replies with -ENOENT and 558 * no trace (which it may do, at its discretion, e.g., if it doesn't 559 * care to issue a lease on the negative dentry). 560 */ 561 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 562 struct dentry *dentry, int err) 563 { 564 if (err == -ENOENT) { 565 /* no trace? */ 566 err = 0; 567 if (!req->r_reply_info.head->is_dentry) { 568 dout("ENOENT and no trace, dentry %p inode %p\n", 569 dentry, dentry->d_inode); 570 if (dentry->d_inode) { 571 d_drop(dentry); 572 err = -ENOENT; 573 } else { 574 d_add(dentry, NULL); 575 } 576 } 577 } 578 if (err) 579 dentry = ERR_PTR(err); 580 else if (dentry != req->r_dentry) 581 dentry = dget(req->r_dentry); /* we got spliced */ 582 else 583 dentry = NULL; 584 return dentry; 585 } 586 587 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 588 { 589 return ceph_ino(inode) == CEPH_INO_ROOT && 590 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 591 } 592 593 /* 594 * Look up a single dir entry. If there is a lookup intent, inform 595 * the MDS so that it gets our 'caps wanted' value in a single op. 596 */ 597 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 598 unsigned int flags) 599 { 600 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 601 struct ceph_mds_client *mdsc = fsc->mdsc; 602 struct ceph_mds_request *req; 603 int op; 604 int err; 605 606 dout("lookup %p dentry %p '%.*s'\n", 607 dir, dentry, dentry->d_name.len, dentry->d_name.name); 608 609 if (dentry->d_name.len > NAME_MAX) 610 return ERR_PTR(-ENAMETOOLONG); 611 612 err = ceph_init_dentry(dentry); 613 if (err < 0) 614 return ERR_PTR(err); 615 616 /* can we conclude ENOENT locally? */ 617 if (dentry->d_inode == NULL) { 618 struct ceph_inode_info *ci = ceph_inode(dir); 619 struct ceph_dentry_info *di = ceph_dentry(dentry); 620 621 spin_lock(&ci->i_ceph_lock); 622 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 623 if (strncmp(dentry->d_name.name, 624 fsc->mount_options->snapdir_name, 625 dentry->d_name.len) && 626 !is_root_ceph_dentry(dir, dentry) && 627 __ceph_dir_is_complete(ci) && 628 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 629 spin_unlock(&ci->i_ceph_lock); 630 dout(" dir %p complete, -ENOENT\n", dir); 631 d_add(dentry, NULL); 632 di->lease_shared_gen = ci->i_shared_gen; 633 return NULL; 634 } 635 spin_unlock(&ci->i_ceph_lock); 636 } 637 638 op = ceph_snap(dir) == CEPH_SNAPDIR ? 639 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 640 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 641 if (IS_ERR(req)) 642 return ERR_CAST(req); 643 req->r_dentry = dget(dentry); 644 req->r_num_caps = 2; 645 /* we only need inode linkage */ 646 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 647 req->r_locked_dir = dir; 648 err = ceph_mdsc_do_request(mdsc, NULL, req); 649 err = ceph_handle_snapdir(req, dentry, err); 650 dentry = ceph_finish_lookup(req, dentry, err); 651 ceph_mdsc_put_request(req); /* will dput(dentry) */ 652 dout("lookup result=%p\n", dentry); 653 return dentry; 654 } 655 656 /* 657 * If we do a create but get no trace back from the MDS, follow up with 658 * a lookup (the VFS expects us to link up the provided dentry). 659 */ 660 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 661 { 662 struct dentry *result = ceph_lookup(dir, dentry, 0); 663 664 if (result && !IS_ERR(result)) { 665 /* 666 * We created the item, then did a lookup, and found 667 * it was already linked to another inode we already 668 * had in our cache (and thus got spliced). Link our 669 * dentry to that inode, but don't hash it, just in 670 * case the VFS wants to dereference it. 671 */ 672 BUG_ON(!result->d_inode); 673 d_instantiate(dentry, result->d_inode); 674 return 0; 675 } 676 return PTR_ERR(result); 677 } 678 679 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 680 umode_t mode, dev_t rdev) 681 { 682 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 683 struct ceph_mds_client *mdsc = fsc->mdsc; 684 struct ceph_mds_request *req; 685 struct ceph_acls_info acls = {}; 686 int err; 687 688 if (ceph_snap(dir) != CEPH_NOSNAP) 689 return -EROFS; 690 691 err = ceph_pre_init_acls(dir, &mode, &acls); 692 if (err < 0) 693 return err; 694 695 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 696 dir, dentry, mode, rdev); 697 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 698 if (IS_ERR(req)) { 699 err = PTR_ERR(req); 700 goto out; 701 } 702 req->r_dentry = dget(dentry); 703 req->r_num_caps = 2; 704 req->r_locked_dir = dir; 705 req->r_args.mknod.mode = cpu_to_le32(mode); 706 req->r_args.mknod.rdev = cpu_to_le32(rdev); 707 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 708 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 709 if (acls.pagelist) { 710 req->r_pagelist = acls.pagelist; 711 acls.pagelist = NULL; 712 } 713 err = ceph_mdsc_do_request(mdsc, dir, req); 714 if (!err && !req->r_reply_info.head->is_dentry) 715 err = ceph_handle_notrace_create(dir, dentry); 716 ceph_mdsc_put_request(req); 717 out: 718 if (!err) 719 ceph_init_inode_acls(dentry->d_inode, &acls); 720 else 721 d_drop(dentry); 722 ceph_release_acls_info(&acls); 723 return err; 724 } 725 726 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 727 bool excl) 728 { 729 return ceph_mknod(dir, dentry, mode, 0); 730 } 731 732 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 733 const char *dest) 734 { 735 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 736 struct ceph_mds_client *mdsc = fsc->mdsc; 737 struct ceph_mds_request *req; 738 int err; 739 740 if (ceph_snap(dir) != CEPH_NOSNAP) 741 return -EROFS; 742 743 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 744 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 745 if (IS_ERR(req)) { 746 err = PTR_ERR(req); 747 goto out; 748 } 749 req->r_dentry = dget(dentry); 750 req->r_num_caps = 2; 751 req->r_path2 = kstrdup(dest, GFP_NOFS); 752 req->r_locked_dir = dir; 753 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 754 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 755 err = ceph_mdsc_do_request(mdsc, dir, req); 756 if (!err && !req->r_reply_info.head->is_dentry) 757 err = ceph_handle_notrace_create(dir, dentry); 758 ceph_mdsc_put_request(req); 759 out: 760 if (err) 761 d_drop(dentry); 762 return err; 763 } 764 765 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 766 { 767 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 768 struct ceph_mds_client *mdsc = fsc->mdsc; 769 struct ceph_mds_request *req; 770 struct ceph_acls_info acls = {}; 771 int err = -EROFS; 772 int op; 773 774 if (ceph_snap(dir) == CEPH_SNAPDIR) { 775 /* mkdir .snap/foo is a MKSNAP */ 776 op = CEPH_MDS_OP_MKSNAP; 777 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 778 dentry->d_name.len, dentry->d_name.name, dentry); 779 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 780 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 781 op = CEPH_MDS_OP_MKDIR; 782 } else { 783 goto out; 784 } 785 786 mode |= S_IFDIR; 787 err = ceph_pre_init_acls(dir, &mode, &acls); 788 if (err < 0) 789 goto out; 790 791 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 792 if (IS_ERR(req)) { 793 err = PTR_ERR(req); 794 goto out; 795 } 796 797 req->r_dentry = dget(dentry); 798 req->r_num_caps = 2; 799 req->r_locked_dir = dir; 800 req->r_args.mkdir.mode = cpu_to_le32(mode); 801 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 802 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 803 if (acls.pagelist) { 804 req->r_pagelist = acls.pagelist; 805 acls.pagelist = NULL; 806 } 807 err = ceph_mdsc_do_request(mdsc, dir, req); 808 if (!err && !req->r_reply_info.head->is_dentry) 809 err = ceph_handle_notrace_create(dir, dentry); 810 ceph_mdsc_put_request(req); 811 out: 812 if (!err) 813 ceph_init_inode_acls(dentry->d_inode, &acls); 814 else 815 d_drop(dentry); 816 ceph_release_acls_info(&acls); 817 return err; 818 } 819 820 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 821 struct dentry *dentry) 822 { 823 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 824 struct ceph_mds_client *mdsc = fsc->mdsc; 825 struct ceph_mds_request *req; 826 int err; 827 828 if (ceph_snap(dir) != CEPH_NOSNAP) 829 return -EROFS; 830 831 dout("link in dir %p old_dentry %p dentry %p\n", dir, 832 old_dentry, dentry); 833 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 834 if (IS_ERR(req)) { 835 d_drop(dentry); 836 return PTR_ERR(req); 837 } 838 req->r_dentry = dget(dentry); 839 req->r_num_caps = 2; 840 req->r_old_dentry = dget(old_dentry); 841 req->r_locked_dir = dir; 842 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 843 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 844 /* release LINK_SHARED on source inode (mds will lock it) */ 845 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 846 err = ceph_mdsc_do_request(mdsc, dir, req); 847 if (err) { 848 d_drop(dentry); 849 } else if (!req->r_reply_info.head->is_dentry) { 850 ihold(old_dentry->d_inode); 851 d_instantiate(dentry, old_dentry->d_inode); 852 } 853 ceph_mdsc_put_request(req); 854 return err; 855 } 856 857 /* 858 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 859 * looks like the link count will hit 0, drop any other caps (other 860 * than PIN) we don't specifically want (due to the file still being 861 * open). 862 */ 863 static int drop_caps_for_unlink(struct inode *inode) 864 { 865 struct ceph_inode_info *ci = ceph_inode(inode); 866 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 867 868 spin_lock(&ci->i_ceph_lock); 869 if (inode->i_nlink == 1) { 870 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 871 ci->i_ceph_flags |= CEPH_I_NODELAY; 872 } 873 spin_unlock(&ci->i_ceph_lock); 874 return drop; 875 } 876 877 /* 878 * rmdir and unlink are differ only by the metadata op code 879 */ 880 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 881 { 882 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 883 struct ceph_mds_client *mdsc = fsc->mdsc; 884 struct inode *inode = dentry->d_inode; 885 struct ceph_mds_request *req; 886 int err = -EROFS; 887 int op; 888 889 if (ceph_snap(dir) == CEPH_SNAPDIR) { 890 /* rmdir .snap/foo is RMSNAP */ 891 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 892 dentry->d_name.name, dentry); 893 op = CEPH_MDS_OP_RMSNAP; 894 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 895 dout("unlink/rmdir dir %p dn %p inode %p\n", 896 dir, dentry, inode); 897 op = S_ISDIR(dentry->d_inode->i_mode) ? 898 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 899 } else 900 goto out; 901 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 902 if (IS_ERR(req)) { 903 err = PTR_ERR(req); 904 goto out; 905 } 906 req->r_dentry = dget(dentry); 907 req->r_num_caps = 2; 908 req->r_locked_dir = dir; 909 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 910 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 911 req->r_inode_drop = drop_caps_for_unlink(inode); 912 err = ceph_mdsc_do_request(mdsc, dir, req); 913 if (!err && !req->r_reply_info.head->is_dentry) 914 d_delete(dentry); 915 ceph_mdsc_put_request(req); 916 out: 917 return err; 918 } 919 920 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 921 struct inode *new_dir, struct dentry *new_dentry) 922 { 923 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 924 struct ceph_mds_client *mdsc = fsc->mdsc; 925 struct ceph_mds_request *req; 926 int err; 927 928 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 929 return -EXDEV; 930 if (ceph_snap(old_dir) != CEPH_NOSNAP || 931 ceph_snap(new_dir) != CEPH_NOSNAP) 932 return -EROFS; 933 dout("rename dir %p dentry %p to dir %p dentry %p\n", 934 old_dir, old_dentry, new_dir, new_dentry); 935 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 936 if (IS_ERR(req)) 937 return PTR_ERR(req); 938 ihold(old_dir); 939 req->r_dentry = dget(new_dentry); 940 req->r_num_caps = 2; 941 req->r_old_dentry = dget(old_dentry); 942 req->r_old_dentry_dir = old_dir; 943 req->r_locked_dir = new_dir; 944 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 945 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 946 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 947 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 948 /* release LINK_RDCACHE on source inode (mds will lock it) */ 949 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 950 if (new_dentry->d_inode) 951 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 952 err = ceph_mdsc_do_request(mdsc, old_dir, req); 953 if (!err && !req->r_reply_info.head->is_dentry) { 954 /* 955 * Normally d_move() is done by fill_trace (called by 956 * do_request, above). If there is no trace, we need 957 * to do it here. 958 */ 959 960 d_move(old_dentry, new_dentry); 961 962 /* ensure target dentry is invalidated, despite 963 rehashing bug in vfs_rename_dir */ 964 ceph_invalidate_dentry_lease(new_dentry); 965 966 /* d_move screws up sibling dentries' offsets */ 967 ceph_dir_clear_complete(old_dir); 968 ceph_dir_clear_complete(new_dir); 969 970 } 971 ceph_mdsc_put_request(req); 972 return err; 973 } 974 975 /* 976 * Ensure a dentry lease will no longer revalidate. 977 */ 978 void ceph_invalidate_dentry_lease(struct dentry *dentry) 979 { 980 spin_lock(&dentry->d_lock); 981 dentry->d_time = jiffies; 982 ceph_dentry(dentry)->lease_shared_gen = 0; 983 spin_unlock(&dentry->d_lock); 984 } 985 986 /* 987 * Check if dentry lease is valid. If not, delete the lease. Try to 988 * renew if the least is more than half up. 989 */ 990 static int dentry_lease_is_valid(struct dentry *dentry) 991 { 992 struct ceph_dentry_info *di; 993 struct ceph_mds_session *s; 994 int valid = 0; 995 u32 gen; 996 unsigned long ttl; 997 struct ceph_mds_session *session = NULL; 998 struct inode *dir = NULL; 999 u32 seq = 0; 1000 1001 spin_lock(&dentry->d_lock); 1002 di = ceph_dentry(dentry); 1003 if (di->lease_session) { 1004 s = di->lease_session; 1005 spin_lock(&s->s_gen_ttl_lock); 1006 gen = s->s_cap_gen; 1007 ttl = s->s_cap_ttl; 1008 spin_unlock(&s->s_gen_ttl_lock); 1009 1010 if (di->lease_gen == gen && 1011 time_before(jiffies, dentry->d_time) && 1012 time_before(jiffies, ttl)) { 1013 valid = 1; 1014 if (di->lease_renew_after && 1015 time_after(jiffies, di->lease_renew_after)) { 1016 /* we should renew */ 1017 dir = dentry->d_parent->d_inode; 1018 session = ceph_get_mds_session(s); 1019 seq = di->lease_seq; 1020 di->lease_renew_after = 0; 1021 di->lease_renew_from = jiffies; 1022 } 1023 } 1024 } 1025 spin_unlock(&dentry->d_lock); 1026 1027 if (session) { 1028 ceph_mdsc_lease_send_msg(session, dir, dentry, 1029 CEPH_MDS_LEASE_RENEW, seq); 1030 ceph_put_mds_session(session); 1031 } 1032 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 1033 return valid; 1034 } 1035 1036 /* 1037 * Check if directory-wide content lease/cap is valid. 1038 */ 1039 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 1040 { 1041 struct ceph_inode_info *ci = ceph_inode(dir); 1042 struct ceph_dentry_info *di = ceph_dentry(dentry); 1043 int valid = 0; 1044 1045 spin_lock(&ci->i_ceph_lock); 1046 if (ci->i_shared_gen == di->lease_shared_gen) 1047 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 1048 spin_unlock(&ci->i_ceph_lock); 1049 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 1050 dir, (unsigned)ci->i_shared_gen, dentry, 1051 (unsigned)di->lease_shared_gen, valid); 1052 return valid; 1053 } 1054 1055 /* 1056 * Check if cached dentry can be trusted. 1057 */ 1058 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1059 { 1060 int valid = 0; 1061 struct inode *dir; 1062 1063 if (flags & LOOKUP_RCU) 1064 return -ECHILD; 1065 1066 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 1067 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 1068 ceph_dentry(dentry)->offset); 1069 1070 dir = ceph_get_dentry_parent_inode(dentry); 1071 1072 /* always trust cached snapped dentries, snapdir dentry */ 1073 if (ceph_snap(dir) != CEPH_NOSNAP) { 1074 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 1075 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 1076 valid = 1; 1077 } else if (dentry->d_inode && 1078 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { 1079 valid = 1; 1080 } else if (dentry_lease_is_valid(dentry) || 1081 dir_lease_is_valid(dir, dentry)) { 1082 if (dentry->d_inode) 1083 valid = ceph_is_any_caps(dentry->d_inode); 1084 else 1085 valid = 1; 1086 } 1087 1088 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1089 if (valid) { 1090 ceph_dentry_lru_touch(dentry); 1091 } else { 1092 ceph_dir_clear_complete(dir); 1093 d_drop(dentry); 1094 } 1095 iput(dir); 1096 return valid; 1097 } 1098 1099 /* 1100 * Release our ceph_dentry_info. 1101 */ 1102 static void ceph_d_release(struct dentry *dentry) 1103 { 1104 struct ceph_dentry_info *di = ceph_dentry(dentry); 1105 1106 dout("d_release %p\n", dentry); 1107 ceph_dentry_lru_del(dentry); 1108 if (di->lease_session) 1109 ceph_put_mds_session(di->lease_session); 1110 kmem_cache_free(ceph_dentry_cachep, di); 1111 dentry->d_fsdata = NULL; 1112 } 1113 1114 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1115 unsigned int flags) 1116 { 1117 /* 1118 * Eventually, we'll want to revalidate snapped metadata 1119 * too... probably... 1120 */ 1121 return 1; 1122 } 1123 1124 /* 1125 * When the VFS prunes a dentry from the cache, we need to clear the 1126 * complete flag on the parent directory. 1127 * 1128 * Called under dentry->d_lock. 1129 */ 1130 static void ceph_d_prune(struct dentry *dentry) 1131 { 1132 dout("ceph_d_prune %p\n", dentry); 1133 1134 /* do we have a valid parent? */ 1135 if (IS_ROOT(dentry)) 1136 return; 1137 1138 /* if we are not hashed, we don't affect dir's completeness */ 1139 if (d_unhashed(dentry)) 1140 return; 1141 1142 /* 1143 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1144 * cleared until d_release 1145 */ 1146 ceph_dir_clear_complete(dentry->d_parent->d_inode); 1147 } 1148 1149 /* 1150 * read() on a dir. This weird interface hack only works if mounted 1151 * with '-o dirstat'. 1152 */ 1153 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1154 loff_t *ppos) 1155 { 1156 struct ceph_file_info *cf = file->private_data; 1157 struct inode *inode = file_inode(file); 1158 struct ceph_inode_info *ci = ceph_inode(inode); 1159 int left; 1160 const int bufsize = 1024; 1161 1162 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1163 return -EISDIR; 1164 1165 if (!cf->dir_info) { 1166 cf->dir_info = kmalloc(bufsize, GFP_NOFS); 1167 if (!cf->dir_info) 1168 return -ENOMEM; 1169 cf->dir_info_len = 1170 snprintf(cf->dir_info, bufsize, 1171 "entries: %20lld\n" 1172 " files: %20lld\n" 1173 " subdirs: %20lld\n" 1174 "rentries: %20lld\n" 1175 " rfiles: %20lld\n" 1176 " rsubdirs: %20lld\n" 1177 "rbytes: %20lld\n" 1178 "rctime: %10ld.%09ld\n", 1179 ci->i_files + ci->i_subdirs, 1180 ci->i_files, 1181 ci->i_subdirs, 1182 ci->i_rfiles + ci->i_rsubdirs, 1183 ci->i_rfiles, 1184 ci->i_rsubdirs, 1185 ci->i_rbytes, 1186 (long)ci->i_rctime.tv_sec, 1187 (long)ci->i_rctime.tv_nsec); 1188 } 1189 1190 if (*ppos >= cf->dir_info_len) 1191 return 0; 1192 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1193 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1194 if (left == size) 1195 return -EFAULT; 1196 *ppos += (size - left); 1197 return size - left; 1198 } 1199 1200 /* 1201 * an fsync() on a dir will wait for any uncommitted directory 1202 * operations to commit. 1203 */ 1204 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, 1205 int datasync) 1206 { 1207 struct inode *inode = file_inode(file); 1208 struct ceph_inode_info *ci = ceph_inode(inode); 1209 struct list_head *head = &ci->i_unsafe_dirops; 1210 struct ceph_mds_request *req; 1211 u64 last_tid; 1212 int ret = 0; 1213 1214 dout("dir_fsync %p\n", inode); 1215 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1216 if (ret) 1217 return ret; 1218 mutex_lock(&inode->i_mutex); 1219 1220 spin_lock(&ci->i_unsafe_lock); 1221 if (list_empty(head)) 1222 goto out; 1223 1224 req = list_entry(head->prev, 1225 struct ceph_mds_request, r_unsafe_dir_item); 1226 last_tid = req->r_tid; 1227 1228 do { 1229 ceph_mdsc_get_request(req); 1230 spin_unlock(&ci->i_unsafe_lock); 1231 1232 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1233 inode, req->r_tid, last_tid); 1234 if (req->r_timeout) { 1235 ret = wait_for_completion_timeout( 1236 &req->r_safe_completion, req->r_timeout); 1237 if (ret > 0) 1238 ret = 0; 1239 else if (ret == 0) 1240 ret = -EIO; /* timed out */ 1241 } else { 1242 wait_for_completion(&req->r_safe_completion); 1243 } 1244 ceph_mdsc_put_request(req); 1245 1246 spin_lock(&ci->i_unsafe_lock); 1247 if (ret || list_empty(head)) 1248 break; 1249 req = list_entry(head->next, 1250 struct ceph_mds_request, r_unsafe_dir_item); 1251 } while (req->r_tid < last_tid); 1252 out: 1253 spin_unlock(&ci->i_unsafe_lock); 1254 mutex_unlock(&inode->i_mutex); 1255 1256 return ret; 1257 } 1258 1259 /* 1260 * We maintain a private dentry LRU. 1261 * 1262 * FIXME: this needs to be changed to a per-mds lru to be useful. 1263 */ 1264 void ceph_dentry_lru_add(struct dentry *dn) 1265 { 1266 struct ceph_dentry_info *di = ceph_dentry(dn); 1267 struct ceph_mds_client *mdsc; 1268 1269 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1270 dn->d_name.len, dn->d_name.name); 1271 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1272 spin_lock(&mdsc->dentry_lru_lock); 1273 list_add_tail(&di->lru, &mdsc->dentry_lru); 1274 mdsc->num_dentry++; 1275 spin_unlock(&mdsc->dentry_lru_lock); 1276 } 1277 1278 void ceph_dentry_lru_touch(struct dentry *dn) 1279 { 1280 struct ceph_dentry_info *di = ceph_dentry(dn); 1281 struct ceph_mds_client *mdsc; 1282 1283 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1284 dn->d_name.len, dn->d_name.name, di->offset); 1285 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1286 spin_lock(&mdsc->dentry_lru_lock); 1287 list_move_tail(&di->lru, &mdsc->dentry_lru); 1288 spin_unlock(&mdsc->dentry_lru_lock); 1289 } 1290 1291 void ceph_dentry_lru_del(struct dentry *dn) 1292 { 1293 struct ceph_dentry_info *di = ceph_dentry(dn); 1294 struct ceph_mds_client *mdsc; 1295 1296 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1297 dn->d_name.len, dn->d_name.name); 1298 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1299 spin_lock(&mdsc->dentry_lru_lock); 1300 list_del_init(&di->lru); 1301 mdsc->num_dentry--; 1302 spin_unlock(&mdsc->dentry_lru_lock); 1303 } 1304 1305 /* 1306 * Return name hash for a given dentry. This is dependent on 1307 * the parent directory's hash function. 1308 */ 1309 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1310 { 1311 struct ceph_inode_info *dci = ceph_inode(dir); 1312 1313 switch (dci->i_dir_layout.dl_dir_hash) { 1314 case 0: /* for backward compat */ 1315 case CEPH_STR_HASH_LINUX: 1316 return dn->d_name.hash; 1317 1318 default: 1319 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1320 dn->d_name.name, dn->d_name.len); 1321 } 1322 } 1323 1324 const struct file_operations ceph_dir_fops = { 1325 .read = ceph_read_dir, 1326 .iterate = ceph_readdir, 1327 .llseek = ceph_dir_llseek, 1328 .open = ceph_open, 1329 .release = ceph_release, 1330 .unlocked_ioctl = ceph_ioctl, 1331 .fsync = ceph_dir_fsync, 1332 }; 1333 1334 const struct inode_operations ceph_dir_iops = { 1335 .lookup = ceph_lookup, 1336 .permission = ceph_permission, 1337 .getattr = ceph_getattr, 1338 .setattr = ceph_setattr, 1339 .setxattr = ceph_setxattr, 1340 .getxattr = ceph_getxattr, 1341 .listxattr = ceph_listxattr, 1342 .removexattr = ceph_removexattr, 1343 .get_acl = ceph_get_acl, 1344 .set_acl = ceph_set_acl, 1345 .mknod = ceph_mknod, 1346 .symlink = ceph_symlink, 1347 .mkdir = ceph_mkdir, 1348 .link = ceph_link, 1349 .unlink = ceph_unlink, 1350 .rmdir = ceph_unlink, 1351 .rename = ceph_rename, 1352 .create = ceph_create, 1353 .atomic_open = ceph_atomic_open, 1354 }; 1355 1356 const struct dentry_operations ceph_dentry_ops = { 1357 .d_revalidate = ceph_d_revalidate, 1358 .d_release = ceph_d_release, 1359 .d_prune = ceph_d_prune, 1360 }; 1361 1362 const struct dentry_operations ceph_snapdir_dentry_ops = { 1363 .d_revalidate = ceph_snapdir_d_revalidate, 1364 .d_release = ceph_d_release, 1365 }; 1366 1367 const struct dentry_operations ceph_snap_dentry_ops = { 1368 .d_release = ceph_d_release, 1369 .d_prune = ceph_d_prune, 1370 }; 1371