1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct inode_operations ceph_dir_iops; 30 const struct file_operations ceph_dir_fops; 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 int ceph_init_dentry(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 if (dentry->d_fsdata) 41 return 0; 42 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 44 if (!di) 45 return -ENOMEM; /* oh well */ 46 47 spin_lock(&dentry->d_lock); 48 if (dentry->d_fsdata) { 49 /* lost a race */ 50 kmem_cache_free(ceph_dentry_cachep, di); 51 goto out_unlock; 52 } 53 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 55 d_set_d_op(dentry, &ceph_dentry_ops); 56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 58 else 59 d_set_d_op(dentry, &ceph_snap_dentry_ops); 60 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 dentry->d_time = jiffies; 64 /* avoid reordering d_fsdata setup so that the check above is safe */ 65 smp_mb(); 66 dentry->d_fsdata = di; 67 ceph_dentry_lru_add(dentry); 68 out_unlock: 69 spin_unlock(&dentry->d_lock); 70 return 0; 71 } 72 73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) 74 { 75 struct inode *inode = NULL; 76 77 if (!dentry) 78 return NULL; 79 80 spin_lock(&dentry->d_lock); 81 if (!IS_ROOT(dentry)) { 82 inode = dentry->d_parent->d_inode; 83 ihold(inode); 84 } 85 spin_unlock(&dentry->d_lock); 86 return inode; 87 } 88 89 90 /* 91 * for readdir, we encode the directory frag and offset within that 92 * frag into f_pos. 93 */ 94 static unsigned fpos_frag(loff_t p) 95 { 96 return p >> 32; 97 } 98 static unsigned fpos_off(loff_t p) 99 { 100 return p & 0xffffffff; 101 } 102 103 /* 104 * When possible, we try to satisfy a readdir by peeking at the 105 * dcache. We make this work by carefully ordering dentries on 106 * d_u.d_child when we initially get results back from the MDS, and 107 * falling back to a "normal" sync readdir if any dentries in the dir 108 * are dropped. 109 * 110 * D_COMPLETE tells indicates we have all dentries in the dir. It is 111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 112 * the MDS if/when the directory is modified). 113 */ 114 static int __dcache_readdir(struct file *filp, 115 void *dirent, filldir_t filldir) 116 { 117 struct ceph_file_info *fi = filp->private_data; 118 struct dentry *parent = filp->f_dentry; 119 struct inode *dir = parent->d_inode; 120 struct list_head *p; 121 struct dentry *dentry, *last; 122 struct ceph_dentry_info *di; 123 int err = 0; 124 125 /* claim ref on last dentry we returned */ 126 last = fi->dentry; 127 fi->dentry = NULL; 128 129 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, 130 last); 131 132 spin_lock(&parent->d_lock); 133 134 /* start at beginning? */ 135 if (filp->f_pos == 2 || last == NULL || 136 filp->f_pos < ceph_dentry(last)->offset) { 137 if (list_empty(&parent->d_subdirs)) 138 goto out_unlock; 139 p = parent->d_subdirs.prev; 140 dout(" initial p %p/%p\n", p->prev, p->next); 141 } else { 142 p = last->d_u.d_child.prev; 143 } 144 145 more: 146 dentry = list_entry(p, struct dentry, d_u.d_child); 147 di = ceph_dentry(dentry); 148 while (1) { 149 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 150 d_unhashed(dentry) ? "!hashed" : "hashed", 151 parent->d_subdirs.prev, parent->d_subdirs.next); 152 if (p == &parent->d_subdirs) { 153 fi->flags |= CEPH_F_ATEND; 154 goto out_unlock; 155 } 156 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 157 if (!d_unhashed(dentry) && dentry->d_inode && 158 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 159 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 160 filp->f_pos <= di->offset) 161 break; 162 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 163 dentry->d_name.len, dentry->d_name.name, di->offset, 164 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", 165 !dentry->d_inode ? " null" : ""); 166 spin_unlock(&dentry->d_lock); 167 p = p->prev; 168 dentry = list_entry(p, struct dentry, d_u.d_child); 169 di = ceph_dentry(dentry); 170 } 171 172 dget_dlock(dentry); 173 spin_unlock(&dentry->d_lock); 174 spin_unlock(&parent->d_lock); 175 176 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 177 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 178 filp->f_pos = di->offset; 179 err = filldir(dirent, dentry->d_name.name, 180 dentry->d_name.len, di->offset, 181 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), 182 dentry->d_inode->i_mode >> 12); 183 184 if (last) { 185 if (err < 0) { 186 /* remember our position */ 187 fi->dentry = last; 188 fi->next_offset = di->offset; 189 } else { 190 dput(last); 191 } 192 } 193 last = dentry; 194 195 if (err < 0) 196 goto out; 197 198 filp->f_pos++; 199 200 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 201 if (!ceph_dir_test_complete(dir)) { 202 dout(" lost D_COMPLETE on %p; falling back to mds\n", dir); 203 err = -EAGAIN; 204 goto out; 205 } 206 207 spin_lock(&parent->d_lock); 208 p = p->prev; /* advance to next dentry */ 209 goto more; 210 211 out_unlock: 212 spin_unlock(&parent->d_lock); 213 out: 214 if (last) 215 dput(last); 216 return err; 217 } 218 219 /* 220 * make note of the last dentry we read, so we can 221 * continue at the same lexicographical point, 222 * regardless of what dir changes take place on the 223 * server. 224 */ 225 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 226 int len) 227 { 228 kfree(fi->last_name); 229 fi->last_name = kmalloc(len+1, GFP_NOFS); 230 if (!fi->last_name) 231 return -ENOMEM; 232 memcpy(fi->last_name, name, len); 233 fi->last_name[len] = 0; 234 dout("note_last_dentry '%s'\n", fi->last_name); 235 return 0; 236 } 237 238 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) 239 { 240 struct ceph_file_info *fi = filp->private_data; 241 struct inode *inode = filp->f_dentry->d_inode; 242 struct ceph_inode_info *ci = ceph_inode(inode); 243 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 244 struct ceph_mds_client *mdsc = fsc->mdsc; 245 unsigned frag = fpos_frag(filp->f_pos); 246 int off = fpos_off(filp->f_pos); 247 int err; 248 u32 ftype; 249 struct ceph_mds_reply_info_parsed *rinfo; 250 const int max_entries = fsc->mount_options->max_readdir; 251 const int max_bytes = fsc->mount_options->max_readdir_bytes; 252 253 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 254 if (fi->flags & CEPH_F_ATEND) 255 return 0; 256 257 /* always start with . and .. */ 258 if (filp->f_pos == 0) { 259 /* note dir version at start of readdir so we can tell 260 * if any dentries get dropped */ 261 fi->dir_release_count = ci->i_release_count; 262 263 dout("readdir off 0 -> '.'\n"); 264 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), 265 ceph_translate_ino(inode->i_sb, inode->i_ino), 266 inode->i_mode >> 12) < 0) 267 return 0; 268 filp->f_pos = 1; 269 off = 1; 270 } 271 if (filp->f_pos == 1) { 272 ino_t ino = parent_ino(filp->f_dentry); 273 dout("readdir off 1 -> '..'\n"); 274 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 275 ceph_translate_ino(inode->i_sb, ino), 276 inode->i_mode >> 12) < 0) 277 return 0; 278 filp->f_pos = 2; 279 off = 2; 280 } 281 282 /* can we use the dcache? */ 283 spin_lock(&ci->i_ceph_lock); 284 if ((filp->f_pos == 2 || fi->dentry) && 285 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 286 ceph_snap(inode) != CEPH_SNAPDIR && 287 ceph_dir_test_complete(inode) && 288 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 289 spin_unlock(&ci->i_ceph_lock); 290 err = __dcache_readdir(filp, dirent, filldir); 291 if (err != -EAGAIN) 292 return err; 293 } else { 294 spin_unlock(&ci->i_ceph_lock); 295 } 296 if (fi->dentry) { 297 err = note_last_dentry(fi, fi->dentry->d_name.name, 298 fi->dentry->d_name.len); 299 if (err) 300 return err; 301 dput(fi->dentry); 302 fi->dentry = NULL; 303 } 304 305 /* proceed with a normal readdir */ 306 307 more: 308 /* do we have the correct frag content buffered? */ 309 if (fi->frag != frag || fi->last_readdir == NULL) { 310 struct ceph_mds_request *req; 311 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 312 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 313 314 /* discard old result, if any */ 315 if (fi->last_readdir) { 316 ceph_mdsc_put_request(fi->last_readdir); 317 fi->last_readdir = NULL; 318 } 319 320 /* requery frag tree, as the frag topology may have changed */ 321 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); 322 323 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 324 ceph_vinop(inode), frag, fi->last_name); 325 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 326 if (IS_ERR(req)) 327 return PTR_ERR(req); 328 req->r_inode = inode; 329 ihold(inode); 330 req->r_dentry = dget(filp->f_dentry); 331 /* hints to request -> mds selection code */ 332 req->r_direct_mode = USE_AUTH_MDS; 333 req->r_direct_hash = ceph_frag_value(frag); 334 req->r_direct_is_hash = true; 335 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 336 req->r_readdir_offset = fi->next_offset; 337 req->r_args.readdir.frag = cpu_to_le32(frag); 338 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 339 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); 340 req->r_num_caps = max_entries + 1; 341 err = ceph_mdsc_do_request(mdsc, NULL, req); 342 if (err < 0) { 343 ceph_mdsc_put_request(req); 344 return err; 345 } 346 dout("readdir got and parsed readdir result=%d" 347 " on frag %x, end=%d, complete=%d\n", err, frag, 348 (int)req->r_reply_info.dir_end, 349 (int)req->r_reply_info.dir_complete); 350 351 if (!req->r_did_prepopulate) { 352 dout("readdir !did_prepopulate"); 353 fi->dir_release_count--; /* preclude D_COMPLETE */ 354 } 355 356 /* note next offset and last dentry name */ 357 fi->offset = fi->next_offset; 358 fi->last_readdir = req; 359 360 if (req->r_reply_info.dir_end) { 361 kfree(fi->last_name); 362 fi->last_name = NULL; 363 if (ceph_frag_is_rightmost(frag)) 364 fi->next_offset = 2; 365 else 366 fi->next_offset = 0; 367 } else { 368 rinfo = &req->r_reply_info; 369 err = note_last_dentry(fi, 370 rinfo->dir_dname[rinfo->dir_nr-1], 371 rinfo->dir_dname_len[rinfo->dir_nr-1]); 372 if (err) 373 return err; 374 fi->next_offset += rinfo->dir_nr; 375 } 376 } 377 378 rinfo = &fi->last_readdir->r_reply_info; 379 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 380 rinfo->dir_nr, off, fi->offset); 381 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 382 u64 pos = ceph_make_fpos(frag, off); 383 struct ceph_mds_reply_inode *in = 384 rinfo->dir_in[off - fi->offset].in; 385 struct ceph_vino vino; 386 ino_t ino; 387 388 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 389 off, off - fi->offset, rinfo->dir_nr, pos, 390 rinfo->dir_dname_len[off - fi->offset], 391 rinfo->dir_dname[off - fi->offset], in); 392 BUG_ON(!in); 393 ftype = le32_to_cpu(in->mode) >> 12; 394 vino.ino = le64_to_cpu(in->ino); 395 vino.snap = le64_to_cpu(in->snapid); 396 ino = ceph_vino_to_ino(vino); 397 if (filldir(dirent, 398 rinfo->dir_dname[off - fi->offset], 399 rinfo->dir_dname_len[off - fi->offset], 400 pos, 401 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) { 402 dout("filldir stopping us...\n"); 403 return 0; 404 } 405 off++; 406 filp->f_pos = pos + 1; 407 } 408 409 if (fi->last_name) { 410 ceph_mdsc_put_request(fi->last_readdir); 411 fi->last_readdir = NULL; 412 goto more; 413 } 414 415 /* more frags? */ 416 if (!ceph_frag_is_rightmost(frag)) { 417 frag = ceph_frag_next(frag); 418 off = 0; 419 filp->f_pos = ceph_make_fpos(frag, off); 420 dout("readdir next frag is %x\n", frag); 421 goto more; 422 } 423 fi->flags |= CEPH_F_ATEND; 424 425 /* 426 * if dir_release_count still matches the dir, no dentries 427 * were released during the whole readdir, and we should have 428 * the complete dir contents in our cache. 429 */ 430 spin_lock(&ci->i_ceph_lock); 431 if (ci->i_release_count == fi->dir_release_count) { 432 ceph_dir_set_complete(inode); 433 ci->i_max_offset = filp->f_pos; 434 } 435 spin_unlock(&ci->i_ceph_lock); 436 437 dout("readdir %p filp %p done.\n", inode, filp); 438 return 0; 439 } 440 441 static void reset_readdir(struct ceph_file_info *fi) 442 { 443 if (fi->last_readdir) { 444 ceph_mdsc_put_request(fi->last_readdir); 445 fi->last_readdir = NULL; 446 } 447 kfree(fi->last_name); 448 fi->last_name = NULL; 449 fi->next_offset = 2; /* compensate for . and .. */ 450 if (fi->dentry) { 451 dput(fi->dentry); 452 fi->dentry = NULL; 453 } 454 fi->flags &= ~CEPH_F_ATEND; 455 } 456 457 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 458 { 459 struct ceph_file_info *fi = file->private_data; 460 struct inode *inode = file->f_mapping->host; 461 loff_t old_offset = offset; 462 loff_t retval; 463 464 mutex_lock(&inode->i_mutex); 465 retval = -EINVAL; 466 switch (whence) { 467 case SEEK_END: 468 offset += inode->i_size + 2; /* FIXME */ 469 break; 470 case SEEK_CUR: 471 offset += file->f_pos; 472 case SEEK_SET: 473 break; 474 default: 475 goto out; 476 } 477 478 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 479 if (offset != file->f_pos) { 480 file->f_pos = offset; 481 file->f_version = 0; 482 fi->flags &= ~CEPH_F_ATEND; 483 } 484 retval = offset; 485 486 /* 487 * discard buffered readdir content on seekdir(0), or 488 * seek to new frag, or seek prior to current chunk. 489 */ 490 if (offset == 0 || 491 fpos_frag(offset) != fpos_frag(old_offset) || 492 fpos_off(offset) < fi->offset) { 493 dout("dir_llseek dropping %p content\n", file); 494 reset_readdir(fi); 495 } 496 497 /* bump dir_release_count if we did a forward seek */ 498 if (offset > old_offset) 499 fi->dir_release_count--; 500 } 501 out: 502 mutex_unlock(&inode->i_mutex); 503 return retval; 504 } 505 506 /* 507 * Handle lookups for the hidden .snap directory. 508 */ 509 int ceph_handle_snapdir(struct ceph_mds_request *req, 510 struct dentry *dentry, int err) 511 { 512 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 513 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ 514 515 /* .snap dir? */ 516 if (err == -ENOENT && 517 ceph_snap(parent) == CEPH_NOSNAP && 518 strcmp(dentry->d_name.name, 519 fsc->mount_options->snapdir_name) == 0) { 520 struct inode *inode = ceph_get_snapdir(parent); 521 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 522 dentry, dentry->d_name.len, dentry->d_name.name, inode); 523 BUG_ON(!d_unhashed(dentry)); 524 d_add(dentry, inode); 525 err = 0; 526 } 527 return err; 528 } 529 530 /* 531 * Figure out final result of a lookup/open request. 532 * 533 * Mainly, make sure we return the final req->r_dentry (if it already 534 * existed) in place of the original VFS-provided dentry when they 535 * differ. 536 * 537 * Gracefully handle the case where the MDS replies with -ENOENT and 538 * no trace (which it may do, at its discretion, e.g., if it doesn't 539 * care to issue a lease on the negative dentry). 540 */ 541 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 542 struct dentry *dentry, int err) 543 { 544 if (err == -ENOENT) { 545 /* no trace? */ 546 err = 0; 547 if (!req->r_reply_info.head->is_dentry) { 548 dout("ENOENT and no trace, dentry %p inode %p\n", 549 dentry, dentry->d_inode); 550 if (dentry->d_inode) { 551 d_drop(dentry); 552 err = -ENOENT; 553 } else { 554 d_add(dentry, NULL); 555 } 556 } 557 } 558 if (err) 559 dentry = ERR_PTR(err); 560 else if (dentry != req->r_dentry) 561 dentry = dget(req->r_dentry); /* we got spliced */ 562 else 563 dentry = NULL; 564 return dentry; 565 } 566 567 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 568 { 569 return ceph_ino(inode) == CEPH_INO_ROOT && 570 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 571 } 572 573 /* 574 * Look up a single dir entry. If there is a lookup intent, inform 575 * the MDS so that it gets our 'caps wanted' value in a single op. 576 */ 577 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 578 unsigned int flags) 579 { 580 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 581 struct ceph_mds_client *mdsc = fsc->mdsc; 582 struct ceph_mds_request *req; 583 int op; 584 int err; 585 586 dout("lookup %p dentry %p '%.*s'\n", 587 dir, dentry, dentry->d_name.len, dentry->d_name.name); 588 589 if (dentry->d_name.len > NAME_MAX) 590 return ERR_PTR(-ENAMETOOLONG); 591 592 err = ceph_init_dentry(dentry); 593 if (err < 0) 594 return ERR_PTR(err); 595 596 /* can we conclude ENOENT locally? */ 597 if (dentry->d_inode == NULL) { 598 struct ceph_inode_info *ci = ceph_inode(dir); 599 struct ceph_dentry_info *di = ceph_dentry(dentry); 600 601 spin_lock(&ci->i_ceph_lock); 602 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 603 if (strncmp(dentry->d_name.name, 604 fsc->mount_options->snapdir_name, 605 dentry->d_name.len) && 606 !is_root_ceph_dentry(dir, dentry) && 607 ceph_dir_test_complete(dir) && 608 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 609 spin_unlock(&ci->i_ceph_lock); 610 dout(" dir %p complete, -ENOENT\n", dir); 611 d_add(dentry, NULL); 612 di->lease_shared_gen = ci->i_shared_gen; 613 return NULL; 614 } 615 spin_unlock(&ci->i_ceph_lock); 616 } 617 618 op = ceph_snap(dir) == CEPH_SNAPDIR ? 619 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 620 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 621 if (IS_ERR(req)) 622 return ERR_CAST(req); 623 req->r_dentry = dget(dentry); 624 req->r_num_caps = 2; 625 /* we only need inode linkage */ 626 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 627 req->r_locked_dir = dir; 628 err = ceph_mdsc_do_request(mdsc, NULL, req); 629 err = ceph_handle_snapdir(req, dentry, err); 630 dentry = ceph_finish_lookup(req, dentry, err); 631 ceph_mdsc_put_request(req); /* will dput(dentry) */ 632 dout("lookup result=%p\n", dentry); 633 return dentry; 634 } 635 636 /* 637 * If we do a create but get no trace back from the MDS, follow up with 638 * a lookup (the VFS expects us to link up the provided dentry). 639 */ 640 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 641 { 642 struct dentry *result = ceph_lookup(dir, dentry, 0); 643 644 if (result && !IS_ERR(result)) { 645 /* 646 * We created the item, then did a lookup, and found 647 * it was already linked to another inode we already 648 * had in our cache (and thus got spliced). Link our 649 * dentry to that inode, but don't hash it, just in 650 * case the VFS wants to dereference it. 651 */ 652 BUG_ON(!result->d_inode); 653 d_instantiate(dentry, result->d_inode); 654 return 0; 655 } 656 return PTR_ERR(result); 657 } 658 659 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 660 umode_t mode, dev_t rdev) 661 { 662 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 663 struct ceph_mds_client *mdsc = fsc->mdsc; 664 struct ceph_mds_request *req; 665 int err; 666 667 if (ceph_snap(dir) != CEPH_NOSNAP) 668 return -EROFS; 669 670 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 671 dir, dentry, mode, rdev); 672 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 673 if (IS_ERR(req)) { 674 d_drop(dentry); 675 return PTR_ERR(req); 676 } 677 req->r_dentry = dget(dentry); 678 req->r_num_caps = 2; 679 req->r_locked_dir = dir; 680 req->r_args.mknod.mode = cpu_to_le32(mode); 681 req->r_args.mknod.rdev = cpu_to_le32(rdev); 682 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 683 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 684 err = ceph_mdsc_do_request(mdsc, dir, req); 685 if (!err && !req->r_reply_info.head->is_dentry) 686 err = ceph_handle_notrace_create(dir, dentry); 687 ceph_mdsc_put_request(req); 688 if (err) 689 d_drop(dentry); 690 return err; 691 } 692 693 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 694 bool excl) 695 { 696 return ceph_mknod(dir, dentry, mode, 0); 697 } 698 699 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 700 const char *dest) 701 { 702 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 703 struct ceph_mds_client *mdsc = fsc->mdsc; 704 struct ceph_mds_request *req; 705 int err; 706 707 if (ceph_snap(dir) != CEPH_NOSNAP) 708 return -EROFS; 709 710 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 711 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 712 if (IS_ERR(req)) { 713 d_drop(dentry); 714 return PTR_ERR(req); 715 } 716 req->r_dentry = dget(dentry); 717 req->r_num_caps = 2; 718 req->r_path2 = kstrdup(dest, GFP_NOFS); 719 req->r_locked_dir = dir; 720 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 721 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 722 err = ceph_mdsc_do_request(mdsc, dir, req); 723 if (!err && !req->r_reply_info.head->is_dentry) 724 err = ceph_handle_notrace_create(dir, dentry); 725 ceph_mdsc_put_request(req); 726 if (err) 727 d_drop(dentry); 728 return err; 729 } 730 731 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 732 { 733 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 734 struct ceph_mds_client *mdsc = fsc->mdsc; 735 struct ceph_mds_request *req; 736 int err = -EROFS; 737 int op; 738 739 if (ceph_snap(dir) == CEPH_SNAPDIR) { 740 /* mkdir .snap/foo is a MKSNAP */ 741 op = CEPH_MDS_OP_MKSNAP; 742 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 743 dentry->d_name.len, dentry->d_name.name, dentry); 744 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 745 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 746 op = CEPH_MDS_OP_MKDIR; 747 } else { 748 goto out; 749 } 750 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 751 if (IS_ERR(req)) { 752 err = PTR_ERR(req); 753 goto out; 754 } 755 756 req->r_dentry = dget(dentry); 757 req->r_num_caps = 2; 758 req->r_locked_dir = dir; 759 req->r_args.mkdir.mode = cpu_to_le32(mode); 760 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 761 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 762 err = ceph_mdsc_do_request(mdsc, dir, req); 763 if (!err && !req->r_reply_info.head->is_dentry) 764 err = ceph_handle_notrace_create(dir, dentry); 765 ceph_mdsc_put_request(req); 766 out: 767 if (err < 0) 768 d_drop(dentry); 769 return err; 770 } 771 772 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 773 struct dentry *dentry) 774 { 775 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 776 struct ceph_mds_client *mdsc = fsc->mdsc; 777 struct ceph_mds_request *req; 778 int err; 779 780 if (ceph_snap(dir) != CEPH_NOSNAP) 781 return -EROFS; 782 783 dout("link in dir %p old_dentry %p dentry %p\n", dir, 784 old_dentry, dentry); 785 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 786 if (IS_ERR(req)) { 787 d_drop(dentry); 788 return PTR_ERR(req); 789 } 790 req->r_dentry = dget(dentry); 791 req->r_num_caps = 2; 792 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ 793 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); 794 req->r_locked_dir = dir; 795 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 796 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 797 err = ceph_mdsc_do_request(mdsc, dir, req); 798 if (err) { 799 d_drop(dentry); 800 } else if (!req->r_reply_info.head->is_dentry) { 801 ihold(old_dentry->d_inode); 802 d_instantiate(dentry, old_dentry->d_inode); 803 } 804 ceph_mdsc_put_request(req); 805 return err; 806 } 807 808 /* 809 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 810 * looks like the link count will hit 0, drop any other caps (other 811 * than PIN) we don't specifically want (due to the file still being 812 * open). 813 */ 814 static int drop_caps_for_unlink(struct inode *inode) 815 { 816 struct ceph_inode_info *ci = ceph_inode(inode); 817 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 818 819 spin_lock(&ci->i_ceph_lock); 820 if (inode->i_nlink == 1) { 821 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 822 ci->i_ceph_flags |= CEPH_I_NODELAY; 823 } 824 spin_unlock(&ci->i_ceph_lock); 825 return drop; 826 } 827 828 /* 829 * rmdir and unlink are differ only by the metadata op code 830 */ 831 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 832 { 833 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 834 struct ceph_mds_client *mdsc = fsc->mdsc; 835 struct inode *inode = dentry->d_inode; 836 struct ceph_mds_request *req; 837 int err = -EROFS; 838 int op; 839 840 if (ceph_snap(dir) == CEPH_SNAPDIR) { 841 /* rmdir .snap/foo is RMSNAP */ 842 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 843 dentry->d_name.name, dentry); 844 op = CEPH_MDS_OP_RMSNAP; 845 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 846 dout("unlink/rmdir dir %p dn %p inode %p\n", 847 dir, dentry, inode); 848 op = S_ISDIR(dentry->d_inode->i_mode) ? 849 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 850 } else 851 goto out; 852 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 853 if (IS_ERR(req)) { 854 err = PTR_ERR(req); 855 goto out; 856 } 857 req->r_dentry = dget(dentry); 858 req->r_num_caps = 2; 859 req->r_locked_dir = dir; 860 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 861 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 862 req->r_inode_drop = drop_caps_for_unlink(inode); 863 err = ceph_mdsc_do_request(mdsc, dir, req); 864 if (!err && !req->r_reply_info.head->is_dentry) 865 d_delete(dentry); 866 ceph_mdsc_put_request(req); 867 out: 868 return err; 869 } 870 871 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 872 struct inode *new_dir, struct dentry *new_dentry) 873 { 874 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 875 struct ceph_mds_client *mdsc = fsc->mdsc; 876 struct ceph_mds_request *req; 877 int err; 878 879 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 880 return -EXDEV; 881 if (ceph_snap(old_dir) != CEPH_NOSNAP || 882 ceph_snap(new_dir) != CEPH_NOSNAP) 883 return -EROFS; 884 dout("rename dir %p dentry %p to dir %p dentry %p\n", 885 old_dir, old_dentry, new_dir, new_dentry); 886 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 887 if (IS_ERR(req)) 888 return PTR_ERR(req); 889 req->r_dentry = dget(new_dentry); 890 req->r_num_caps = 2; 891 req->r_old_dentry = dget(old_dentry); 892 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); 893 req->r_locked_dir = new_dir; 894 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 895 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 896 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 897 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 898 /* release LINK_RDCACHE on source inode (mds will lock it) */ 899 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 900 if (new_dentry->d_inode) 901 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 902 err = ceph_mdsc_do_request(mdsc, old_dir, req); 903 if (!err && !req->r_reply_info.head->is_dentry) { 904 /* 905 * Normally d_move() is done by fill_trace (called by 906 * do_request, above). If there is no trace, we need 907 * to do it here. 908 */ 909 910 /* d_move screws up d_subdirs order */ 911 ceph_dir_clear_complete(new_dir); 912 913 d_move(old_dentry, new_dentry); 914 915 /* ensure target dentry is invalidated, despite 916 rehashing bug in vfs_rename_dir */ 917 ceph_invalidate_dentry_lease(new_dentry); 918 } 919 ceph_mdsc_put_request(req); 920 return err; 921 } 922 923 /* 924 * Ensure a dentry lease will no longer revalidate. 925 */ 926 void ceph_invalidate_dentry_lease(struct dentry *dentry) 927 { 928 spin_lock(&dentry->d_lock); 929 dentry->d_time = jiffies; 930 ceph_dentry(dentry)->lease_shared_gen = 0; 931 spin_unlock(&dentry->d_lock); 932 } 933 934 /* 935 * Check if dentry lease is valid. If not, delete the lease. Try to 936 * renew if the least is more than half up. 937 */ 938 static int dentry_lease_is_valid(struct dentry *dentry) 939 { 940 struct ceph_dentry_info *di; 941 struct ceph_mds_session *s; 942 int valid = 0; 943 u32 gen; 944 unsigned long ttl; 945 struct ceph_mds_session *session = NULL; 946 struct inode *dir = NULL; 947 u32 seq = 0; 948 949 spin_lock(&dentry->d_lock); 950 di = ceph_dentry(dentry); 951 if (di->lease_session) { 952 s = di->lease_session; 953 spin_lock(&s->s_gen_ttl_lock); 954 gen = s->s_cap_gen; 955 ttl = s->s_cap_ttl; 956 spin_unlock(&s->s_gen_ttl_lock); 957 958 if (di->lease_gen == gen && 959 time_before(jiffies, dentry->d_time) && 960 time_before(jiffies, ttl)) { 961 valid = 1; 962 if (di->lease_renew_after && 963 time_after(jiffies, di->lease_renew_after)) { 964 /* we should renew */ 965 dir = dentry->d_parent->d_inode; 966 session = ceph_get_mds_session(s); 967 seq = di->lease_seq; 968 di->lease_renew_after = 0; 969 di->lease_renew_from = jiffies; 970 } 971 } 972 } 973 spin_unlock(&dentry->d_lock); 974 975 if (session) { 976 ceph_mdsc_lease_send_msg(session, dir, dentry, 977 CEPH_MDS_LEASE_RENEW, seq); 978 ceph_put_mds_session(session); 979 } 980 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 981 return valid; 982 } 983 984 /* 985 * Check if directory-wide content lease/cap is valid. 986 */ 987 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 988 { 989 struct ceph_inode_info *ci = ceph_inode(dir); 990 struct ceph_dentry_info *di = ceph_dentry(dentry); 991 int valid = 0; 992 993 spin_lock(&ci->i_ceph_lock); 994 if (ci->i_shared_gen == di->lease_shared_gen) 995 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 996 spin_unlock(&ci->i_ceph_lock); 997 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 998 dir, (unsigned)ci->i_shared_gen, dentry, 999 (unsigned)di->lease_shared_gen, valid); 1000 return valid; 1001 } 1002 1003 /* 1004 * Check if cached dentry can be trusted. 1005 */ 1006 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1007 { 1008 int valid = 0; 1009 struct inode *dir; 1010 1011 if (flags & LOOKUP_RCU) 1012 return -ECHILD; 1013 1014 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 1015 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 1016 ceph_dentry(dentry)->offset); 1017 1018 dir = ceph_get_dentry_parent_inode(dentry); 1019 1020 /* always trust cached snapped dentries, snapdir dentry */ 1021 if (ceph_snap(dir) != CEPH_NOSNAP) { 1022 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 1023 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 1024 valid = 1; 1025 } else if (dentry->d_inode && 1026 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { 1027 valid = 1; 1028 } else if (dentry_lease_is_valid(dentry) || 1029 dir_lease_is_valid(dir, dentry)) { 1030 valid = 1; 1031 } 1032 1033 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1034 if (valid) 1035 ceph_dentry_lru_touch(dentry); 1036 else 1037 d_drop(dentry); 1038 iput(dir); 1039 return valid; 1040 } 1041 1042 /* 1043 * Release our ceph_dentry_info. 1044 */ 1045 static void ceph_d_release(struct dentry *dentry) 1046 { 1047 struct ceph_dentry_info *di = ceph_dentry(dentry); 1048 1049 dout("d_release %p\n", dentry); 1050 ceph_dentry_lru_del(dentry); 1051 if (di->lease_session) 1052 ceph_put_mds_session(di->lease_session); 1053 kmem_cache_free(ceph_dentry_cachep, di); 1054 dentry->d_fsdata = NULL; 1055 } 1056 1057 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1058 unsigned int flags) 1059 { 1060 /* 1061 * Eventually, we'll want to revalidate snapped metadata 1062 * too... probably... 1063 */ 1064 return 1; 1065 } 1066 1067 /* 1068 * Set/clear/test dir complete flag on the dir's dentry. 1069 */ 1070 void ceph_dir_set_complete(struct inode *inode) 1071 { 1072 struct dentry *dentry = d_find_any_alias(inode); 1073 1074 if (dentry && ceph_dentry(dentry) && 1075 ceph_test_mount_opt(ceph_sb_to_client(dentry->d_sb), DCACHE)) { 1076 dout(" marking %p (%p) complete\n", inode, dentry); 1077 set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); 1078 } 1079 dput(dentry); 1080 } 1081 1082 void ceph_dir_clear_complete(struct inode *inode) 1083 { 1084 struct dentry *dentry = d_find_any_alias(inode); 1085 1086 if (dentry && ceph_dentry(dentry)) { 1087 dout(" marking %p (%p) complete\n", inode, dentry); 1088 set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); 1089 } 1090 dput(dentry); 1091 } 1092 1093 bool ceph_dir_test_complete(struct inode *inode) 1094 { 1095 struct dentry *dentry = d_find_any_alias(inode); 1096 1097 if (dentry && ceph_dentry(dentry)) { 1098 dout(" marking %p (%p) NOT complete\n", inode, dentry); 1099 clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); 1100 } 1101 dput(dentry); 1102 return false; 1103 } 1104 1105 /* 1106 * When the VFS prunes a dentry from the cache, we need to clear the 1107 * complete flag on the parent directory. 1108 * 1109 * Called under dentry->d_lock. 1110 */ 1111 static void ceph_d_prune(struct dentry *dentry) 1112 { 1113 struct ceph_dentry_info *di; 1114 1115 dout("ceph_d_prune %p\n", dentry); 1116 1117 /* do we have a valid parent? */ 1118 if (IS_ROOT(dentry)) 1119 return; 1120 1121 /* if we are not hashed, we don't affect D_COMPLETE */ 1122 if (d_unhashed(dentry)) 1123 return; 1124 1125 /* 1126 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1127 * cleared until d_release 1128 */ 1129 di = ceph_dentry(dentry->d_parent); 1130 clear_bit(CEPH_D_COMPLETE, &di->flags); 1131 } 1132 1133 /* 1134 * read() on a dir. This weird interface hack only works if mounted 1135 * with '-o dirstat'. 1136 */ 1137 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1138 loff_t *ppos) 1139 { 1140 struct ceph_file_info *cf = file->private_data; 1141 struct inode *inode = file->f_dentry->d_inode; 1142 struct ceph_inode_info *ci = ceph_inode(inode); 1143 int left; 1144 const int bufsize = 1024; 1145 1146 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1147 return -EISDIR; 1148 1149 if (!cf->dir_info) { 1150 cf->dir_info = kmalloc(bufsize, GFP_NOFS); 1151 if (!cf->dir_info) 1152 return -ENOMEM; 1153 cf->dir_info_len = 1154 snprintf(cf->dir_info, bufsize, 1155 "entries: %20lld\n" 1156 " files: %20lld\n" 1157 " subdirs: %20lld\n" 1158 "rentries: %20lld\n" 1159 " rfiles: %20lld\n" 1160 " rsubdirs: %20lld\n" 1161 "rbytes: %20lld\n" 1162 "rctime: %10ld.%09ld\n", 1163 ci->i_files + ci->i_subdirs, 1164 ci->i_files, 1165 ci->i_subdirs, 1166 ci->i_rfiles + ci->i_rsubdirs, 1167 ci->i_rfiles, 1168 ci->i_rsubdirs, 1169 ci->i_rbytes, 1170 (long)ci->i_rctime.tv_sec, 1171 (long)ci->i_rctime.tv_nsec); 1172 } 1173 1174 if (*ppos >= cf->dir_info_len) 1175 return 0; 1176 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1177 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1178 if (left == size) 1179 return -EFAULT; 1180 *ppos += (size - left); 1181 return size - left; 1182 } 1183 1184 /* 1185 * an fsync() on a dir will wait for any uncommitted directory 1186 * operations to commit. 1187 */ 1188 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, 1189 int datasync) 1190 { 1191 struct inode *inode = file->f_path.dentry->d_inode; 1192 struct ceph_inode_info *ci = ceph_inode(inode); 1193 struct list_head *head = &ci->i_unsafe_dirops; 1194 struct ceph_mds_request *req; 1195 u64 last_tid; 1196 int ret = 0; 1197 1198 dout("dir_fsync %p\n", inode); 1199 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1200 if (ret) 1201 return ret; 1202 mutex_lock(&inode->i_mutex); 1203 1204 spin_lock(&ci->i_unsafe_lock); 1205 if (list_empty(head)) 1206 goto out; 1207 1208 req = list_entry(head->prev, 1209 struct ceph_mds_request, r_unsafe_dir_item); 1210 last_tid = req->r_tid; 1211 1212 do { 1213 ceph_mdsc_get_request(req); 1214 spin_unlock(&ci->i_unsafe_lock); 1215 1216 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1217 inode, req->r_tid, last_tid); 1218 if (req->r_timeout) { 1219 ret = wait_for_completion_timeout( 1220 &req->r_safe_completion, req->r_timeout); 1221 if (ret > 0) 1222 ret = 0; 1223 else if (ret == 0) 1224 ret = -EIO; /* timed out */ 1225 } else { 1226 wait_for_completion(&req->r_safe_completion); 1227 } 1228 ceph_mdsc_put_request(req); 1229 1230 spin_lock(&ci->i_unsafe_lock); 1231 if (ret || list_empty(head)) 1232 break; 1233 req = list_entry(head->next, 1234 struct ceph_mds_request, r_unsafe_dir_item); 1235 } while (req->r_tid < last_tid); 1236 out: 1237 spin_unlock(&ci->i_unsafe_lock); 1238 mutex_unlock(&inode->i_mutex); 1239 1240 return ret; 1241 } 1242 1243 /* 1244 * We maintain a private dentry LRU. 1245 * 1246 * FIXME: this needs to be changed to a per-mds lru to be useful. 1247 */ 1248 void ceph_dentry_lru_add(struct dentry *dn) 1249 { 1250 struct ceph_dentry_info *di = ceph_dentry(dn); 1251 struct ceph_mds_client *mdsc; 1252 1253 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1254 dn->d_name.len, dn->d_name.name); 1255 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1256 spin_lock(&mdsc->dentry_lru_lock); 1257 list_add_tail(&di->lru, &mdsc->dentry_lru); 1258 mdsc->num_dentry++; 1259 spin_unlock(&mdsc->dentry_lru_lock); 1260 } 1261 1262 void ceph_dentry_lru_touch(struct dentry *dn) 1263 { 1264 struct ceph_dentry_info *di = ceph_dentry(dn); 1265 struct ceph_mds_client *mdsc; 1266 1267 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1268 dn->d_name.len, dn->d_name.name, di->offset); 1269 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1270 spin_lock(&mdsc->dentry_lru_lock); 1271 list_move_tail(&di->lru, &mdsc->dentry_lru); 1272 spin_unlock(&mdsc->dentry_lru_lock); 1273 } 1274 1275 void ceph_dentry_lru_del(struct dentry *dn) 1276 { 1277 struct ceph_dentry_info *di = ceph_dentry(dn); 1278 struct ceph_mds_client *mdsc; 1279 1280 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1281 dn->d_name.len, dn->d_name.name); 1282 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1283 spin_lock(&mdsc->dentry_lru_lock); 1284 list_del_init(&di->lru); 1285 mdsc->num_dentry--; 1286 spin_unlock(&mdsc->dentry_lru_lock); 1287 } 1288 1289 /* 1290 * Return name hash for a given dentry. This is dependent on 1291 * the parent directory's hash function. 1292 */ 1293 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1294 { 1295 struct ceph_inode_info *dci = ceph_inode(dir); 1296 1297 switch (dci->i_dir_layout.dl_dir_hash) { 1298 case 0: /* for backward compat */ 1299 case CEPH_STR_HASH_LINUX: 1300 return dn->d_name.hash; 1301 1302 default: 1303 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1304 dn->d_name.name, dn->d_name.len); 1305 } 1306 } 1307 1308 const struct file_operations ceph_dir_fops = { 1309 .read = ceph_read_dir, 1310 .readdir = ceph_readdir, 1311 .llseek = ceph_dir_llseek, 1312 .open = ceph_open, 1313 .release = ceph_release, 1314 .unlocked_ioctl = ceph_ioctl, 1315 .fsync = ceph_dir_fsync, 1316 }; 1317 1318 const struct inode_operations ceph_dir_iops = { 1319 .lookup = ceph_lookup, 1320 .permission = ceph_permission, 1321 .getattr = ceph_getattr, 1322 .setattr = ceph_setattr, 1323 .setxattr = ceph_setxattr, 1324 .getxattr = ceph_getxattr, 1325 .listxattr = ceph_listxattr, 1326 .removexattr = ceph_removexattr, 1327 .mknod = ceph_mknod, 1328 .symlink = ceph_symlink, 1329 .mkdir = ceph_mkdir, 1330 .link = ceph_link, 1331 .unlink = ceph_unlink, 1332 .rmdir = ceph_unlink, 1333 .rename = ceph_rename, 1334 .create = ceph_create, 1335 .atomic_open = ceph_atomic_open, 1336 }; 1337 1338 const struct dentry_operations ceph_dentry_ops = { 1339 .d_revalidate = ceph_d_revalidate, 1340 .d_release = ceph_d_release, 1341 .d_prune = ceph_d_prune, 1342 }; 1343 1344 const struct dentry_operations ceph_snapdir_dentry_ops = { 1345 .d_revalidate = ceph_snapdir_d_revalidate, 1346 .d_release = ceph_d_release, 1347 }; 1348 1349 const struct dentry_operations ceph_snap_dentry_ops = { 1350 .d_release = ceph_d_release, 1351 .d_prune = ceph_d_prune, 1352 }; 1353