1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct inode_operations ceph_dir_iops; 30 const struct file_operations ceph_dir_fops; 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 int ceph_init_dentry(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 if (dentry->d_fsdata) 41 return 0; 42 43 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */ 44 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 45 d_set_d_op(dentry, &ceph_dentry_ops); 46 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 47 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 48 else 49 d_set_d_op(dentry, &ceph_snap_dentry_ops); 50 51 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 52 if (!di) 53 return -ENOMEM; /* oh well */ 54 55 spin_lock(&dentry->d_lock); 56 if (dentry->d_fsdata) { 57 /* lost a race */ 58 kmem_cache_free(ceph_dentry_cachep, di); 59 goto out_unlock; 60 } 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 dentry->d_fsdata = di; 64 dentry->d_time = jiffies; 65 ceph_dentry_lru_add(dentry); 66 out_unlock: 67 spin_unlock(&dentry->d_lock); 68 return 0; 69 } 70 71 72 73 /* 74 * for readdir, we encode the directory frag and offset within that 75 * frag into f_pos. 76 */ 77 static unsigned fpos_frag(loff_t p) 78 { 79 return p >> 32; 80 } 81 static unsigned fpos_off(loff_t p) 82 { 83 return p & 0xffffffff; 84 } 85 86 /* 87 * When possible, we try to satisfy a readdir by peeking at the 88 * dcache. We make this work by carefully ordering dentries on 89 * d_u.d_child when we initially get results back from the MDS, and 90 * falling back to a "normal" sync readdir if any dentries in the dir 91 * are dropped. 92 * 93 * I_COMPLETE tells indicates we have all dentries in the dir. It is 94 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 95 * the MDS if/when the directory is modified). 96 */ 97 static int __dcache_readdir(struct file *filp, 98 void *dirent, filldir_t filldir) 99 { 100 struct ceph_file_info *fi = filp->private_data; 101 struct dentry *parent = filp->f_dentry; 102 struct inode *dir = parent->d_inode; 103 struct list_head *p; 104 struct dentry *dentry, *last; 105 struct ceph_dentry_info *di; 106 int err = 0; 107 108 /* claim ref on last dentry we returned */ 109 last = fi->dentry; 110 fi->dentry = NULL; 111 112 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, 113 last); 114 115 spin_lock(&parent->d_lock); 116 117 /* start at beginning? */ 118 if (filp->f_pos == 2 || last == NULL || 119 filp->f_pos < ceph_dentry(last)->offset) { 120 if (list_empty(&parent->d_subdirs)) 121 goto out_unlock; 122 p = parent->d_subdirs.prev; 123 dout(" initial p %p/%p\n", p->prev, p->next); 124 } else { 125 p = last->d_u.d_child.prev; 126 } 127 128 more: 129 dentry = list_entry(p, struct dentry, d_u.d_child); 130 di = ceph_dentry(dentry); 131 while (1) { 132 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 133 d_unhashed(dentry) ? "!hashed" : "hashed", 134 parent->d_subdirs.prev, parent->d_subdirs.next); 135 if (p == &parent->d_subdirs) { 136 fi->at_end = 1; 137 goto out_unlock; 138 } 139 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 140 if (!d_unhashed(dentry) && dentry->d_inode && 141 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 142 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 143 filp->f_pos <= di->offset) 144 break; 145 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 146 dentry->d_name.len, dentry->d_name.name, di->offset, 147 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", 148 !dentry->d_inode ? " null" : ""); 149 spin_unlock(&dentry->d_lock); 150 p = p->prev; 151 dentry = list_entry(p, struct dentry, d_u.d_child); 152 di = ceph_dentry(dentry); 153 } 154 155 dget_dlock(dentry); 156 spin_unlock(&dentry->d_lock); 157 spin_unlock(&parent->d_lock); 158 159 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 160 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 161 filp->f_pos = di->offset; 162 err = filldir(dirent, dentry->d_name.name, 163 dentry->d_name.len, di->offset, 164 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), 165 dentry->d_inode->i_mode >> 12); 166 167 if (last) { 168 if (err < 0) { 169 /* remember our position */ 170 fi->dentry = last; 171 fi->next_offset = di->offset; 172 } else { 173 dput(last); 174 } 175 } 176 last = dentry; 177 178 if (err < 0) 179 goto out; 180 181 filp->f_pos++; 182 183 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 184 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { 185 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); 186 err = -EAGAIN; 187 goto out; 188 } 189 190 spin_lock(&parent->d_lock); 191 p = p->prev; /* advance to next dentry */ 192 goto more; 193 194 out_unlock: 195 spin_unlock(&parent->d_lock); 196 out: 197 if (last) 198 dput(last); 199 return err; 200 } 201 202 /* 203 * make note of the last dentry we read, so we can 204 * continue at the same lexicographical point, 205 * regardless of what dir changes take place on the 206 * server. 207 */ 208 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 209 int len) 210 { 211 kfree(fi->last_name); 212 fi->last_name = kmalloc(len+1, GFP_NOFS); 213 if (!fi->last_name) 214 return -ENOMEM; 215 memcpy(fi->last_name, name, len); 216 fi->last_name[len] = 0; 217 dout("note_last_dentry '%s'\n", fi->last_name); 218 return 0; 219 } 220 221 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) 222 { 223 struct ceph_file_info *fi = filp->private_data; 224 struct inode *inode = filp->f_dentry->d_inode; 225 struct ceph_inode_info *ci = ceph_inode(inode); 226 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 227 struct ceph_mds_client *mdsc = fsc->mdsc; 228 unsigned frag = fpos_frag(filp->f_pos); 229 int off = fpos_off(filp->f_pos); 230 int err; 231 u32 ftype; 232 struct ceph_mds_reply_info_parsed *rinfo; 233 const int max_entries = fsc->mount_options->max_readdir; 234 const int max_bytes = fsc->mount_options->max_readdir_bytes; 235 236 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 237 if (fi->at_end) 238 return 0; 239 240 /* always start with . and .. */ 241 if (filp->f_pos == 0) { 242 /* note dir version at start of readdir so we can tell 243 * if any dentries get dropped */ 244 fi->dir_release_count = ci->i_release_count; 245 246 dout("readdir off 0 -> '.'\n"); 247 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), 248 ceph_translate_ino(inode->i_sb, inode->i_ino), 249 inode->i_mode >> 12) < 0) 250 return 0; 251 filp->f_pos = 1; 252 off = 1; 253 } 254 if (filp->f_pos == 1) { 255 ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino; 256 dout("readdir off 1 -> '..'\n"); 257 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 258 ceph_translate_ino(inode->i_sb, ino), 259 inode->i_mode >> 12) < 0) 260 return 0; 261 filp->f_pos = 2; 262 off = 2; 263 } 264 265 /* can we use the dcache? */ 266 spin_lock(&inode->i_lock); 267 if ((filp->f_pos == 2 || fi->dentry) && 268 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 269 ceph_snap(inode) != CEPH_SNAPDIR && 270 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 271 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 272 spin_unlock(&inode->i_lock); 273 err = __dcache_readdir(filp, dirent, filldir); 274 if (err != -EAGAIN) 275 return err; 276 } else { 277 spin_unlock(&inode->i_lock); 278 } 279 if (fi->dentry) { 280 err = note_last_dentry(fi, fi->dentry->d_name.name, 281 fi->dentry->d_name.len); 282 if (err) 283 return err; 284 dput(fi->dentry); 285 fi->dentry = NULL; 286 } 287 288 /* proceed with a normal readdir */ 289 290 more: 291 /* do we have the correct frag content buffered? */ 292 if (fi->frag != frag || fi->last_readdir == NULL) { 293 struct ceph_mds_request *req; 294 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 295 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 296 297 /* discard old result, if any */ 298 if (fi->last_readdir) { 299 ceph_mdsc_put_request(fi->last_readdir); 300 fi->last_readdir = NULL; 301 } 302 303 /* requery frag tree, as the frag topology may have changed */ 304 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); 305 306 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 307 ceph_vinop(inode), frag, fi->last_name); 308 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 309 if (IS_ERR(req)) 310 return PTR_ERR(req); 311 req->r_inode = inode; 312 ihold(inode); 313 req->r_dentry = dget(filp->f_dentry); 314 /* hints to request -> mds selection code */ 315 req->r_direct_mode = USE_AUTH_MDS; 316 req->r_direct_hash = ceph_frag_value(frag); 317 req->r_direct_is_hash = true; 318 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 319 req->r_readdir_offset = fi->next_offset; 320 req->r_args.readdir.frag = cpu_to_le32(frag); 321 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 322 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); 323 req->r_num_caps = max_entries + 1; 324 err = ceph_mdsc_do_request(mdsc, NULL, req); 325 if (err < 0) { 326 ceph_mdsc_put_request(req); 327 return err; 328 } 329 dout("readdir got and parsed readdir result=%d" 330 " on frag %x, end=%d, complete=%d\n", err, frag, 331 (int)req->r_reply_info.dir_end, 332 (int)req->r_reply_info.dir_complete); 333 334 if (!req->r_did_prepopulate) { 335 dout("readdir !did_prepopulate"); 336 fi->dir_release_count--; /* preclude I_COMPLETE */ 337 } 338 339 /* note next offset and last dentry name */ 340 fi->offset = fi->next_offset; 341 fi->last_readdir = req; 342 343 if (req->r_reply_info.dir_end) { 344 kfree(fi->last_name); 345 fi->last_name = NULL; 346 if (ceph_frag_is_rightmost(frag)) 347 fi->next_offset = 2; 348 else 349 fi->next_offset = 0; 350 } else { 351 rinfo = &req->r_reply_info; 352 err = note_last_dentry(fi, 353 rinfo->dir_dname[rinfo->dir_nr-1], 354 rinfo->dir_dname_len[rinfo->dir_nr-1]); 355 if (err) 356 return err; 357 fi->next_offset += rinfo->dir_nr; 358 } 359 } 360 361 rinfo = &fi->last_readdir->r_reply_info; 362 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 363 rinfo->dir_nr, off, fi->offset); 364 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 365 u64 pos = ceph_make_fpos(frag, off); 366 struct ceph_mds_reply_inode *in = 367 rinfo->dir_in[off - fi->offset].in; 368 struct ceph_vino vino; 369 ino_t ino; 370 371 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 372 off, off - fi->offset, rinfo->dir_nr, pos, 373 rinfo->dir_dname_len[off - fi->offset], 374 rinfo->dir_dname[off - fi->offset], in); 375 BUG_ON(!in); 376 ftype = le32_to_cpu(in->mode) >> 12; 377 vino.ino = le64_to_cpu(in->ino); 378 vino.snap = le64_to_cpu(in->snapid); 379 ino = ceph_vino_to_ino(vino); 380 if (filldir(dirent, 381 rinfo->dir_dname[off - fi->offset], 382 rinfo->dir_dname_len[off - fi->offset], 383 pos, 384 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) { 385 dout("filldir stopping us...\n"); 386 return 0; 387 } 388 off++; 389 filp->f_pos = pos + 1; 390 } 391 392 if (fi->last_name) { 393 ceph_mdsc_put_request(fi->last_readdir); 394 fi->last_readdir = NULL; 395 goto more; 396 } 397 398 /* more frags? */ 399 if (!ceph_frag_is_rightmost(frag)) { 400 frag = ceph_frag_next(frag); 401 off = 0; 402 filp->f_pos = ceph_make_fpos(frag, off); 403 dout("readdir next frag is %x\n", frag); 404 goto more; 405 } 406 fi->at_end = 1; 407 408 /* 409 * if dir_release_count still matches the dir, no dentries 410 * were released during the whole readdir, and we should have 411 * the complete dir contents in our cache. 412 */ 413 spin_lock(&inode->i_lock); 414 if (ci->i_release_count == fi->dir_release_count) { 415 dout(" marking %p complete\n", inode); 416 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ 417 ci->i_max_offset = filp->f_pos; 418 } 419 spin_unlock(&inode->i_lock); 420 421 dout("readdir %p filp %p done.\n", inode, filp); 422 return 0; 423 } 424 425 static void reset_readdir(struct ceph_file_info *fi) 426 { 427 if (fi->last_readdir) { 428 ceph_mdsc_put_request(fi->last_readdir); 429 fi->last_readdir = NULL; 430 } 431 kfree(fi->last_name); 432 fi->last_name = NULL; 433 fi->next_offset = 2; /* compensate for . and .. */ 434 if (fi->dentry) { 435 dput(fi->dentry); 436 fi->dentry = NULL; 437 } 438 fi->at_end = 0; 439 } 440 441 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) 442 { 443 struct ceph_file_info *fi = file->private_data; 444 struct inode *inode = file->f_mapping->host; 445 loff_t old_offset = offset; 446 loff_t retval; 447 448 mutex_lock(&inode->i_mutex); 449 switch (origin) { 450 case SEEK_END: 451 offset += inode->i_size + 2; /* FIXME */ 452 break; 453 case SEEK_CUR: 454 offset += file->f_pos; 455 } 456 retval = -EINVAL; 457 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 458 if (offset != file->f_pos) { 459 file->f_pos = offset; 460 file->f_version = 0; 461 fi->at_end = 0; 462 } 463 retval = offset; 464 465 /* 466 * discard buffered readdir content on seekdir(0), or 467 * seek to new frag, or seek prior to current chunk. 468 */ 469 if (offset == 0 || 470 fpos_frag(offset) != fpos_frag(old_offset) || 471 fpos_off(offset) < fi->offset) { 472 dout("dir_llseek dropping %p content\n", file); 473 reset_readdir(fi); 474 } 475 476 /* bump dir_release_count if we did a forward seek */ 477 if (offset > old_offset) 478 fi->dir_release_count--; 479 } 480 mutex_unlock(&inode->i_mutex); 481 return retval; 482 } 483 484 /* 485 * Process result of a lookup/open request. 486 * 487 * Mainly, make sure we return the final req->r_dentry (if it already 488 * existed) in place of the original VFS-provided dentry when they 489 * differ. 490 * 491 * Gracefully handle the case where the MDS replies with -ENOENT and 492 * no trace (which it may do, at its discretion, e.g., if it doesn't 493 * care to issue a lease on the negative dentry). 494 */ 495 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 496 struct dentry *dentry, int err) 497 { 498 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 499 struct inode *parent = dentry->d_parent->d_inode; 500 501 /* .snap dir? */ 502 if (err == -ENOENT && 503 ceph_snap(parent) == CEPH_NOSNAP && 504 strcmp(dentry->d_name.name, 505 fsc->mount_options->snapdir_name) == 0) { 506 struct inode *inode = ceph_get_snapdir(parent); 507 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 508 dentry, dentry->d_name.len, dentry->d_name.name, inode); 509 BUG_ON(!d_unhashed(dentry)); 510 d_add(dentry, inode); 511 err = 0; 512 } 513 514 if (err == -ENOENT) { 515 /* no trace? */ 516 err = 0; 517 if (!req->r_reply_info.head->is_dentry) { 518 dout("ENOENT and no trace, dentry %p inode %p\n", 519 dentry, dentry->d_inode); 520 if (dentry->d_inode) { 521 d_drop(dentry); 522 err = -ENOENT; 523 } else { 524 d_add(dentry, NULL); 525 } 526 } 527 } 528 if (err) 529 dentry = ERR_PTR(err); 530 else if (dentry != req->r_dentry) 531 dentry = dget(req->r_dentry); /* we got spliced */ 532 else 533 dentry = NULL; 534 return dentry; 535 } 536 537 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 538 { 539 return ceph_ino(inode) == CEPH_INO_ROOT && 540 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 541 } 542 543 /* 544 * Look up a single dir entry. If there is a lookup intent, inform 545 * the MDS so that it gets our 'caps wanted' value in a single op. 546 */ 547 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 548 struct nameidata *nd) 549 { 550 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 551 struct ceph_mds_client *mdsc = fsc->mdsc; 552 struct ceph_mds_request *req; 553 int op; 554 int err; 555 556 dout("lookup %p dentry %p '%.*s'\n", 557 dir, dentry, dentry->d_name.len, dentry->d_name.name); 558 559 if (dentry->d_name.len > NAME_MAX) 560 return ERR_PTR(-ENAMETOOLONG); 561 562 err = ceph_init_dentry(dentry); 563 if (err < 0) 564 return ERR_PTR(err); 565 566 /* open (but not create!) intent? */ 567 if (nd && 568 (nd->flags & LOOKUP_OPEN) && 569 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ 570 !(nd->intent.open.flags & O_CREAT)) { 571 int mode = nd->intent.open.create_mode & ~current->fs->umask; 572 return ceph_lookup_open(dir, dentry, nd, mode, 1); 573 } 574 575 /* can we conclude ENOENT locally? */ 576 if (dentry->d_inode == NULL) { 577 struct ceph_inode_info *ci = ceph_inode(dir); 578 struct ceph_dentry_info *di = ceph_dentry(dentry); 579 580 spin_lock(&dir->i_lock); 581 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 582 if (strncmp(dentry->d_name.name, 583 fsc->mount_options->snapdir_name, 584 dentry->d_name.len) && 585 !is_root_ceph_dentry(dir, dentry) && 586 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 587 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 588 spin_unlock(&dir->i_lock); 589 dout(" dir %p complete, -ENOENT\n", dir); 590 d_add(dentry, NULL); 591 di->lease_shared_gen = ci->i_shared_gen; 592 return NULL; 593 } 594 spin_unlock(&dir->i_lock); 595 } 596 597 op = ceph_snap(dir) == CEPH_SNAPDIR ? 598 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 599 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 600 if (IS_ERR(req)) 601 return ERR_CAST(req); 602 req->r_dentry = dget(dentry); 603 req->r_num_caps = 2; 604 /* we only need inode linkage */ 605 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 606 req->r_locked_dir = dir; 607 err = ceph_mdsc_do_request(mdsc, NULL, req); 608 dentry = ceph_finish_lookup(req, dentry, err); 609 ceph_mdsc_put_request(req); /* will dput(dentry) */ 610 dout("lookup result=%p\n", dentry); 611 return dentry; 612 } 613 614 /* 615 * If we do a create but get no trace back from the MDS, follow up with 616 * a lookup (the VFS expects us to link up the provided dentry). 617 */ 618 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 619 { 620 struct dentry *result = ceph_lookup(dir, dentry, NULL); 621 622 if (result && !IS_ERR(result)) { 623 /* 624 * We created the item, then did a lookup, and found 625 * it was already linked to another inode we already 626 * had in our cache (and thus got spliced). Link our 627 * dentry to that inode, but don't hash it, just in 628 * case the VFS wants to dereference it. 629 */ 630 BUG_ON(!result->d_inode); 631 d_instantiate(dentry, result->d_inode); 632 return 0; 633 } 634 return PTR_ERR(result); 635 } 636 637 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 638 int mode, dev_t rdev) 639 { 640 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 641 struct ceph_mds_client *mdsc = fsc->mdsc; 642 struct ceph_mds_request *req; 643 int err; 644 645 if (ceph_snap(dir) != CEPH_NOSNAP) 646 return -EROFS; 647 648 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n", 649 dir, dentry, mode, rdev); 650 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 651 if (IS_ERR(req)) { 652 d_drop(dentry); 653 return PTR_ERR(req); 654 } 655 req->r_dentry = dget(dentry); 656 req->r_num_caps = 2; 657 req->r_locked_dir = dir; 658 req->r_args.mknod.mode = cpu_to_le32(mode); 659 req->r_args.mknod.rdev = cpu_to_le32(rdev); 660 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 661 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 662 err = ceph_mdsc_do_request(mdsc, dir, req); 663 if (!err && !req->r_reply_info.head->is_dentry) 664 err = ceph_handle_notrace_create(dir, dentry); 665 ceph_mdsc_put_request(req); 666 if (err) 667 d_drop(dentry); 668 return err; 669 } 670 671 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode, 672 struct nameidata *nd) 673 { 674 dout("create in dir %p dentry %p name '%.*s'\n", 675 dir, dentry, dentry->d_name.len, dentry->d_name.name); 676 677 if (ceph_snap(dir) != CEPH_NOSNAP) 678 return -EROFS; 679 680 if (nd) { 681 BUG_ON((nd->flags & LOOKUP_OPEN) == 0); 682 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); 683 /* hrm, what should i do here if we get aliased? */ 684 if (IS_ERR(dentry)) 685 return PTR_ERR(dentry); 686 return 0; 687 } 688 689 /* fall back to mknod */ 690 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); 691 } 692 693 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 694 const char *dest) 695 { 696 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 697 struct ceph_mds_client *mdsc = fsc->mdsc; 698 struct ceph_mds_request *req; 699 int err; 700 701 if (ceph_snap(dir) != CEPH_NOSNAP) 702 return -EROFS; 703 704 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 705 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 706 if (IS_ERR(req)) { 707 d_drop(dentry); 708 return PTR_ERR(req); 709 } 710 req->r_dentry = dget(dentry); 711 req->r_num_caps = 2; 712 req->r_path2 = kstrdup(dest, GFP_NOFS); 713 req->r_locked_dir = dir; 714 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 715 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 716 err = ceph_mdsc_do_request(mdsc, dir, req); 717 if (!err && !req->r_reply_info.head->is_dentry) 718 err = ceph_handle_notrace_create(dir, dentry); 719 ceph_mdsc_put_request(req); 720 if (err) 721 d_drop(dentry); 722 return err; 723 } 724 725 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) 726 { 727 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 728 struct ceph_mds_client *mdsc = fsc->mdsc; 729 struct ceph_mds_request *req; 730 int err = -EROFS; 731 int op; 732 733 if (ceph_snap(dir) == CEPH_SNAPDIR) { 734 /* mkdir .snap/foo is a MKSNAP */ 735 op = CEPH_MDS_OP_MKSNAP; 736 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 737 dentry->d_name.len, dentry->d_name.name, dentry); 738 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 739 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode); 740 op = CEPH_MDS_OP_MKDIR; 741 } else { 742 goto out; 743 } 744 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 745 if (IS_ERR(req)) { 746 err = PTR_ERR(req); 747 goto out; 748 } 749 750 req->r_dentry = dget(dentry); 751 req->r_num_caps = 2; 752 req->r_locked_dir = dir; 753 req->r_args.mkdir.mode = cpu_to_le32(mode); 754 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 755 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 756 err = ceph_mdsc_do_request(mdsc, dir, req); 757 if (!err && !req->r_reply_info.head->is_dentry) 758 err = ceph_handle_notrace_create(dir, dentry); 759 ceph_mdsc_put_request(req); 760 out: 761 if (err < 0) 762 d_drop(dentry); 763 return err; 764 } 765 766 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 767 struct dentry *dentry) 768 { 769 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 770 struct ceph_mds_client *mdsc = fsc->mdsc; 771 struct ceph_mds_request *req; 772 int err; 773 774 if (ceph_snap(dir) != CEPH_NOSNAP) 775 return -EROFS; 776 777 dout("link in dir %p old_dentry %p dentry %p\n", dir, 778 old_dentry, dentry); 779 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 780 if (IS_ERR(req)) { 781 d_drop(dentry); 782 return PTR_ERR(req); 783 } 784 req->r_dentry = dget(dentry); 785 req->r_num_caps = 2; 786 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ 787 req->r_locked_dir = dir; 788 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 789 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 790 err = ceph_mdsc_do_request(mdsc, dir, req); 791 if (err) { 792 d_drop(dentry); 793 } else if (!req->r_reply_info.head->is_dentry) { 794 ihold(old_dentry->d_inode); 795 d_instantiate(dentry, old_dentry->d_inode); 796 } 797 ceph_mdsc_put_request(req); 798 return err; 799 } 800 801 /* 802 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 803 * looks like the link count will hit 0, drop any other caps (other 804 * than PIN) we don't specifically want (due to the file still being 805 * open). 806 */ 807 static int drop_caps_for_unlink(struct inode *inode) 808 { 809 struct ceph_inode_info *ci = ceph_inode(inode); 810 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 811 812 spin_lock(&inode->i_lock); 813 if (inode->i_nlink == 1) { 814 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 815 ci->i_ceph_flags |= CEPH_I_NODELAY; 816 } 817 spin_unlock(&inode->i_lock); 818 return drop; 819 } 820 821 /* 822 * rmdir and unlink are differ only by the metadata op code 823 */ 824 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 825 { 826 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 827 struct ceph_mds_client *mdsc = fsc->mdsc; 828 struct inode *inode = dentry->d_inode; 829 struct ceph_mds_request *req; 830 int err = -EROFS; 831 int op; 832 833 if (ceph_snap(dir) == CEPH_SNAPDIR) { 834 /* rmdir .snap/foo is RMSNAP */ 835 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 836 dentry->d_name.name, dentry); 837 op = CEPH_MDS_OP_RMSNAP; 838 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 839 dout("unlink/rmdir dir %p dn %p inode %p\n", 840 dir, dentry, inode); 841 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ? 842 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 843 } else 844 goto out; 845 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 846 if (IS_ERR(req)) { 847 err = PTR_ERR(req); 848 goto out; 849 } 850 req->r_dentry = dget(dentry); 851 req->r_num_caps = 2; 852 req->r_locked_dir = dir; 853 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 854 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 855 req->r_inode_drop = drop_caps_for_unlink(inode); 856 err = ceph_mdsc_do_request(mdsc, dir, req); 857 if (!err && !req->r_reply_info.head->is_dentry) 858 d_delete(dentry); 859 ceph_mdsc_put_request(req); 860 out: 861 return err; 862 } 863 864 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 865 struct inode *new_dir, struct dentry *new_dentry) 866 { 867 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 868 struct ceph_mds_client *mdsc = fsc->mdsc; 869 struct ceph_mds_request *req; 870 int err; 871 872 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 873 return -EXDEV; 874 if (ceph_snap(old_dir) != CEPH_NOSNAP || 875 ceph_snap(new_dir) != CEPH_NOSNAP) 876 return -EROFS; 877 dout("rename dir %p dentry %p to dir %p dentry %p\n", 878 old_dir, old_dentry, new_dir, new_dentry); 879 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 880 if (IS_ERR(req)) 881 return PTR_ERR(req); 882 req->r_dentry = dget(new_dentry); 883 req->r_num_caps = 2; 884 req->r_old_dentry = dget(old_dentry); 885 req->r_locked_dir = new_dir; 886 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 887 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 888 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 889 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 890 /* release LINK_RDCACHE on source inode (mds will lock it) */ 891 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 892 if (new_dentry->d_inode) 893 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 894 err = ceph_mdsc_do_request(mdsc, old_dir, req); 895 if (!err && !req->r_reply_info.head->is_dentry) { 896 /* 897 * Normally d_move() is done by fill_trace (called by 898 * do_request, above). If there is no trace, we need 899 * to do it here. 900 */ 901 902 /* d_move screws up d_subdirs order */ 903 ceph_i_clear(new_dir, CEPH_I_COMPLETE); 904 905 d_move(old_dentry, new_dentry); 906 907 /* ensure target dentry is invalidated, despite 908 rehashing bug in vfs_rename_dir */ 909 ceph_invalidate_dentry_lease(new_dentry); 910 } 911 ceph_mdsc_put_request(req); 912 return err; 913 } 914 915 /* 916 * Ensure a dentry lease will no longer revalidate. 917 */ 918 void ceph_invalidate_dentry_lease(struct dentry *dentry) 919 { 920 spin_lock(&dentry->d_lock); 921 dentry->d_time = jiffies; 922 ceph_dentry(dentry)->lease_shared_gen = 0; 923 spin_unlock(&dentry->d_lock); 924 } 925 926 /* 927 * Check if dentry lease is valid. If not, delete the lease. Try to 928 * renew if the least is more than half up. 929 */ 930 static int dentry_lease_is_valid(struct dentry *dentry) 931 { 932 struct ceph_dentry_info *di; 933 struct ceph_mds_session *s; 934 int valid = 0; 935 u32 gen; 936 unsigned long ttl; 937 struct ceph_mds_session *session = NULL; 938 struct inode *dir = NULL; 939 u32 seq = 0; 940 941 spin_lock(&dentry->d_lock); 942 di = ceph_dentry(dentry); 943 if (di && di->lease_session) { 944 s = di->lease_session; 945 spin_lock(&s->s_cap_lock); 946 gen = s->s_cap_gen; 947 ttl = s->s_cap_ttl; 948 spin_unlock(&s->s_cap_lock); 949 950 if (di->lease_gen == gen && 951 time_before(jiffies, dentry->d_time) && 952 time_before(jiffies, ttl)) { 953 valid = 1; 954 if (di->lease_renew_after && 955 time_after(jiffies, di->lease_renew_after)) { 956 /* we should renew */ 957 dir = dentry->d_parent->d_inode; 958 session = ceph_get_mds_session(s); 959 seq = di->lease_seq; 960 di->lease_renew_after = 0; 961 di->lease_renew_from = jiffies; 962 } 963 } 964 } 965 spin_unlock(&dentry->d_lock); 966 967 if (session) { 968 ceph_mdsc_lease_send_msg(session, dir, dentry, 969 CEPH_MDS_LEASE_RENEW, seq); 970 ceph_put_mds_session(session); 971 } 972 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 973 return valid; 974 } 975 976 /* 977 * Check if directory-wide content lease/cap is valid. 978 */ 979 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 980 { 981 struct ceph_inode_info *ci = ceph_inode(dir); 982 struct ceph_dentry_info *di = ceph_dentry(dentry); 983 int valid = 0; 984 985 spin_lock(&dir->i_lock); 986 if (ci->i_shared_gen == di->lease_shared_gen) 987 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 988 spin_unlock(&dir->i_lock); 989 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 990 dir, (unsigned)ci->i_shared_gen, dentry, 991 (unsigned)di->lease_shared_gen, valid); 992 return valid; 993 } 994 995 /* 996 * Check if cached dentry can be trusted. 997 */ 998 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) 999 { 1000 struct inode *dir; 1001 1002 if (nd && nd->flags & LOOKUP_RCU) 1003 return -ECHILD; 1004 1005 dir = dentry->d_parent->d_inode; 1006 1007 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 1008 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 1009 ceph_dentry(dentry)->offset); 1010 1011 /* always trust cached snapped dentries, snapdir dentry */ 1012 if (ceph_snap(dir) != CEPH_NOSNAP) { 1013 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 1014 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 1015 goto out_touch; 1016 } 1017 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) 1018 goto out_touch; 1019 1020 if (dentry_lease_is_valid(dentry) || 1021 dir_lease_is_valid(dir, dentry)) 1022 goto out_touch; 1023 1024 dout("d_revalidate %p invalid\n", dentry); 1025 d_drop(dentry); 1026 return 0; 1027 out_touch: 1028 ceph_dentry_lru_touch(dentry); 1029 return 1; 1030 } 1031 1032 /* 1033 * Release our ceph_dentry_info. 1034 */ 1035 static void ceph_d_release(struct dentry *dentry) 1036 { 1037 struct ceph_dentry_info *di = ceph_dentry(dentry); 1038 1039 dout("d_release %p\n", dentry); 1040 if (di) { 1041 ceph_dentry_lru_del(dentry); 1042 if (di->lease_session) 1043 ceph_put_mds_session(di->lease_session); 1044 kmem_cache_free(ceph_dentry_cachep, di); 1045 dentry->d_fsdata = NULL; 1046 } 1047 } 1048 1049 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1050 struct nameidata *nd) 1051 { 1052 /* 1053 * Eventually, we'll want to revalidate snapped metadata 1054 * too... probably... 1055 */ 1056 return 1; 1057 } 1058 1059 1060 1061 /* 1062 * read() on a dir. This weird interface hack only works if mounted 1063 * with '-o dirstat'. 1064 */ 1065 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1066 loff_t *ppos) 1067 { 1068 struct ceph_file_info *cf = file->private_data; 1069 struct inode *inode = file->f_dentry->d_inode; 1070 struct ceph_inode_info *ci = ceph_inode(inode); 1071 int left; 1072 const int bufsize = 1024; 1073 1074 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1075 return -EISDIR; 1076 1077 if (!cf->dir_info) { 1078 cf->dir_info = kmalloc(bufsize, GFP_NOFS); 1079 if (!cf->dir_info) 1080 return -ENOMEM; 1081 cf->dir_info_len = 1082 snprintf(cf->dir_info, bufsize, 1083 "entries: %20lld\n" 1084 " files: %20lld\n" 1085 " subdirs: %20lld\n" 1086 "rentries: %20lld\n" 1087 " rfiles: %20lld\n" 1088 " rsubdirs: %20lld\n" 1089 "rbytes: %20lld\n" 1090 "rctime: %10ld.%09ld\n", 1091 ci->i_files + ci->i_subdirs, 1092 ci->i_files, 1093 ci->i_subdirs, 1094 ci->i_rfiles + ci->i_rsubdirs, 1095 ci->i_rfiles, 1096 ci->i_rsubdirs, 1097 ci->i_rbytes, 1098 (long)ci->i_rctime.tv_sec, 1099 (long)ci->i_rctime.tv_nsec); 1100 } 1101 1102 if (*ppos >= cf->dir_info_len) 1103 return 0; 1104 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1105 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1106 if (left == size) 1107 return -EFAULT; 1108 *ppos += (size - left); 1109 return size - left; 1110 } 1111 1112 /* 1113 * an fsync() on a dir will wait for any uncommitted directory 1114 * operations to commit. 1115 */ 1116 static int ceph_dir_fsync(struct file *file, int datasync) 1117 { 1118 struct inode *inode = file->f_path.dentry->d_inode; 1119 struct ceph_inode_info *ci = ceph_inode(inode); 1120 struct list_head *head = &ci->i_unsafe_dirops; 1121 struct ceph_mds_request *req; 1122 u64 last_tid; 1123 int ret = 0; 1124 1125 dout("dir_fsync %p\n", inode); 1126 spin_lock(&ci->i_unsafe_lock); 1127 if (list_empty(head)) 1128 goto out; 1129 1130 req = list_entry(head->prev, 1131 struct ceph_mds_request, r_unsafe_dir_item); 1132 last_tid = req->r_tid; 1133 1134 do { 1135 ceph_mdsc_get_request(req); 1136 spin_unlock(&ci->i_unsafe_lock); 1137 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1138 inode, req->r_tid, last_tid); 1139 if (req->r_timeout) { 1140 ret = wait_for_completion_timeout( 1141 &req->r_safe_completion, req->r_timeout); 1142 if (ret > 0) 1143 ret = 0; 1144 else if (ret == 0) 1145 ret = -EIO; /* timed out */ 1146 } else { 1147 wait_for_completion(&req->r_safe_completion); 1148 } 1149 spin_lock(&ci->i_unsafe_lock); 1150 ceph_mdsc_put_request(req); 1151 1152 if (ret || list_empty(head)) 1153 break; 1154 req = list_entry(head->next, 1155 struct ceph_mds_request, r_unsafe_dir_item); 1156 } while (req->r_tid < last_tid); 1157 out: 1158 spin_unlock(&ci->i_unsafe_lock); 1159 return ret; 1160 } 1161 1162 /* 1163 * We maintain a private dentry LRU. 1164 * 1165 * FIXME: this needs to be changed to a per-mds lru to be useful. 1166 */ 1167 void ceph_dentry_lru_add(struct dentry *dn) 1168 { 1169 struct ceph_dentry_info *di = ceph_dentry(dn); 1170 struct ceph_mds_client *mdsc; 1171 1172 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1173 dn->d_name.len, dn->d_name.name); 1174 if (di) { 1175 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1176 spin_lock(&mdsc->dentry_lru_lock); 1177 list_add_tail(&di->lru, &mdsc->dentry_lru); 1178 mdsc->num_dentry++; 1179 spin_unlock(&mdsc->dentry_lru_lock); 1180 } 1181 } 1182 1183 void ceph_dentry_lru_touch(struct dentry *dn) 1184 { 1185 struct ceph_dentry_info *di = ceph_dentry(dn); 1186 struct ceph_mds_client *mdsc; 1187 1188 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1189 dn->d_name.len, dn->d_name.name, di->offset); 1190 if (di) { 1191 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1192 spin_lock(&mdsc->dentry_lru_lock); 1193 list_move_tail(&di->lru, &mdsc->dentry_lru); 1194 spin_unlock(&mdsc->dentry_lru_lock); 1195 } 1196 } 1197 1198 void ceph_dentry_lru_del(struct dentry *dn) 1199 { 1200 struct ceph_dentry_info *di = ceph_dentry(dn); 1201 struct ceph_mds_client *mdsc; 1202 1203 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1204 dn->d_name.len, dn->d_name.name); 1205 if (di) { 1206 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1207 spin_lock(&mdsc->dentry_lru_lock); 1208 list_del_init(&di->lru); 1209 mdsc->num_dentry--; 1210 spin_unlock(&mdsc->dentry_lru_lock); 1211 } 1212 } 1213 1214 /* 1215 * Return name hash for a given dentry. This is dependent on 1216 * the parent directory's hash function. 1217 */ 1218 unsigned ceph_dentry_hash(struct dentry *dn) 1219 { 1220 struct inode *dir = dn->d_parent->d_inode; 1221 struct ceph_inode_info *dci = ceph_inode(dir); 1222 1223 switch (dci->i_dir_layout.dl_dir_hash) { 1224 case 0: /* for backward compat */ 1225 case CEPH_STR_HASH_LINUX: 1226 return dn->d_name.hash; 1227 1228 default: 1229 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1230 dn->d_name.name, dn->d_name.len); 1231 } 1232 } 1233 1234 const struct file_operations ceph_dir_fops = { 1235 .read = ceph_read_dir, 1236 .readdir = ceph_readdir, 1237 .llseek = ceph_dir_llseek, 1238 .open = ceph_open, 1239 .release = ceph_release, 1240 .unlocked_ioctl = ceph_ioctl, 1241 .fsync = ceph_dir_fsync, 1242 }; 1243 1244 const struct inode_operations ceph_dir_iops = { 1245 .lookup = ceph_lookup, 1246 .permission = ceph_permission, 1247 .getattr = ceph_getattr, 1248 .setattr = ceph_setattr, 1249 .setxattr = ceph_setxattr, 1250 .getxattr = ceph_getxattr, 1251 .listxattr = ceph_listxattr, 1252 .removexattr = ceph_removexattr, 1253 .mknod = ceph_mknod, 1254 .symlink = ceph_symlink, 1255 .mkdir = ceph_mkdir, 1256 .link = ceph_link, 1257 .unlink = ceph_unlink, 1258 .rmdir = ceph_unlink, 1259 .rename = ceph_rename, 1260 .create = ceph_create, 1261 }; 1262 1263 const struct dentry_operations ceph_dentry_ops = { 1264 .d_revalidate = ceph_d_revalidate, 1265 .d_release = ceph_d_release, 1266 }; 1267 1268 const struct dentry_operations ceph_snapdir_dentry_ops = { 1269 .d_revalidate = ceph_snapdir_d_revalidate, 1270 .d_release = ceph_d_release, 1271 }; 1272 1273 const struct dentry_operations ceph_snap_dentry_ops = { 1274 .d_release = ceph_d_release, 1275 }; 1276