1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct inode_operations ceph_dir_iops; 30 const struct file_operations ceph_dir_fops; 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 int ceph_init_dentry(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 if (dentry->d_fsdata) 41 return 0; 42 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 44 if (!di) 45 return -ENOMEM; /* oh well */ 46 47 spin_lock(&dentry->d_lock); 48 if (dentry->d_fsdata) { 49 /* lost a race */ 50 kmem_cache_free(ceph_dentry_cachep, di); 51 goto out_unlock; 52 } 53 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 55 d_set_d_op(dentry, &ceph_dentry_ops); 56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 58 else 59 d_set_d_op(dentry, &ceph_snap_dentry_ops); 60 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 dentry->d_time = jiffies; 64 /* avoid reordering d_fsdata setup so that the check above is safe */ 65 smp_mb(); 66 dentry->d_fsdata = di; 67 ceph_dentry_lru_add(dentry); 68 out_unlock: 69 spin_unlock(&dentry->d_lock); 70 return 0; 71 } 72 73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) 74 { 75 struct inode *inode = NULL; 76 77 if (!dentry) 78 return NULL; 79 80 spin_lock(&dentry->d_lock); 81 if (!IS_ROOT(dentry)) { 82 inode = dentry->d_parent->d_inode; 83 ihold(inode); 84 } 85 spin_unlock(&dentry->d_lock); 86 return inode; 87 } 88 89 90 /* 91 * for readdir, we encode the directory frag and offset within that 92 * frag into f_pos. 93 */ 94 static unsigned fpos_frag(loff_t p) 95 { 96 return p >> 32; 97 } 98 static unsigned fpos_off(loff_t p) 99 { 100 return p & 0xffffffff; 101 } 102 103 /* 104 * When possible, we try to satisfy a readdir by peeking at the 105 * dcache. We make this work by carefully ordering dentries on 106 * d_u.d_child when we initially get results back from the MDS, and 107 * falling back to a "normal" sync readdir if any dentries in the dir 108 * are dropped. 109 * 110 * Complete dir indicates that we have all dentries in the dir. It is 111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 112 * the MDS if/when the directory is modified). 113 */ 114 static int __dcache_readdir(struct file *filp, 115 void *dirent, filldir_t filldir) 116 { 117 struct ceph_file_info *fi = filp->private_data; 118 struct dentry *parent = filp->f_dentry; 119 struct inode *dir = parent->d_inode; 120 struct list_head *p; 121 struct dentry *dentry, *last; 122 struct ceph_dentry_info *di; 123 int err = 0; 124 125 /* claim ref on last dentry we returned */ 126 last = fi->dentry; 127 fi->dentry = NULL; 128 129 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, 130 last); 131 132 spin_lock(&parent->d_lock); 133 134 /* start at beginning? */ 135 if (filp->f_pos == 2 || last == NULL || 136 filp->f_pos < ceph_dentry(last)->offset) { 137 if (list_empty(&parent->d_subdirs)) 138 goto out_unlock; 139 p = parent->d_subdirs.prev; 140 dout(" initial p %p/%p\n", p->prev, p->next); 141 } else { 142 p = last->d_u.d_child.prev; 143 } 144 145 more: 146 dentry = list_entry(p, struct dentry, d_u.d_child); 147 di = ceph_dentry(dentry); 148 while (1) { 149 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 150 d_unhashed(dentry) ? "!hashed" : "hashed", 151 parent->d_subdirs.prev, parent->d_subdirs.next); 152 if (p == &parent->d_subdirs) { 153 fi->flags |= CEPH_F_ATEND; 154 goto out_unlock; 155 } 156 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 157 if (!d_unhashed(dentry) && dentry->d_inode && 158 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 159 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 160 filp->f_pos <= di->offset) 161 break; 162 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 163 dentry->d_name.len, dentry->d_name.name, di->offset, 164 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", 165 !dentry->d_inode ? " null" : ""); 166 spin_unlock(&dentry->d_lock); 167 p = p->prev; 168 dentry = list_entry(p, struct dentry, d_u.d_child); 169 di = ceph_dentry(dentry); 170 } 171 172 dget_dlock(dentry); 173 spin_unlock(&dentry->d_lock); 174 spin_unlock(&parent->d_lock); 175 176 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 177 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 178 filp->f_pos = di->offset; 179 err = filldir(dirent, dentry->d_name.name, 180 dentry->d_name.len, di->offset, 181 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), 182 dentry->d_inode->i_mode >> 12); 183 184 if (last) { 185 if (err < 0) { 186 /* remember our position */ 187 fi->dentry = last; 188 fi->next_offset = di->offset; 189 } else { 190 dput(last); 191 } 192 } 193 last = dentry; 194 195 if (err < 0) 196 goto out; 197 198 filp->f_pos++; 199 200 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 201 if (!ceph_dir_is_complete(dir)) { 202 dout(" lost dir complete on %p; falling back to mds\n", dir); 203 err = -EAGAIN; 204 goto out; 205 } 206 207 spin_lock(&parent->d_lock); 208 p = p->prev; /* advance to next dentry */ 209 goto more; 210 211 out_unlock: 212 spin_unlock(&parent->d_lock); 213 out: 214 if (last) 215 dput(last); 216 return err; 217 } 218 219 /* 220 * make note of the last dentry we read, so we can 221 * continue at the same lexicographical point, 222 * regardless of what dir changes take place on the 223 * server. 224 */ 225 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 226 int len) 227 { 228 kfree(fi->last_name); 229 fi->last_name = kmalloc(len+1, GFP_NOFS); 230 if (!fi->last_name) 231 return -ENOMEM; 232 memcpy(fi->last_name, name, len); 233 fi->last_name[len] = 0; 234 dout("note_last_dentry '%s'\n", fi->last_name); 235 return 0; 236 } 237 238 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) 239 { 240 struct ceph_file_info *fi = filp->private_data; 241 struct inode *inode = file_inode(filp); 242 struct ceph_inode_info *ci = ceph_inode(inode); 243 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 244 struct ceph_mds_client *mdsc = fsc->mdsc; 245 unsigned frag = fpos_frag(filp->f_pos); 246 int off = fpos_off(filp->f_pos); 247 int err; 248 u32 ftype; 249 struct ceph_mds_reply_info_parsed *rinfo; 250 const int max_entries = fsc->mount_options->max_readdir; 251 const int max_bytes = fsc->mount_options->max_readdir_bytes; 252 253 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 254 if (fi->flags & CEPH_F_ATEND) 255 return 0; 256 257 /* always start with . and .. */ 258 if (filp->f_pos == 0) { 259 /* note dir version at start of readdir so we can tell 260 * if any dentries get dropped */ 261 fi->dir_release_count = atomic_read(&ci->i_release_count); 262 263 dout("readdir off 0 -> '.'\n"); 264 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), 265 ceph_translate_ino(inode->i_sb, inode->i_ino), 266 inode->i_mode >> 12) < 0) 267 return 0; 268 filp->f_pos = 1; 269 off = 1; 270 } 271 if (filp->f_pos == 1) { 272 ino_t ino = parent_ino(filp->f_dentry); 273 dout("readdir off 1 -> '..'\n"); 274 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 275 ceph_translate_ino(inode->i_sb, ino), 276 inode->i_mode >> 12) < 0) 277 return 0; 278 filp->f_pos = 2; 279 off = 2; 280 } 281 282 /* can we use the dcache? */ 283 spin_lock(&ci->i_ceph_lock); 284 if ((filp->f_pos == 2 || fi->dentry) && 285 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 286 ceph_snap(inode) != CEPH_SNAPDIR && 287 __ceph_dir_is_complete(ci) && 288 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 289 spin_unlock(&ci->i_ceph_lock); 290 err = __dcache_readdir(filp, dirent, filldir); 291 if (err != -EAGAIN) 292 return err; 293 } else { 294 spin_unlock(&ci->i_ceph_lock); 295 } 296 if (fi->dentry) { 297 err = note_last_dentry(fi, fi->dentry->d_name.name, 298 fi->dentry->d_name.len); 299 if (err) 300 return err; 301 dput(fi->dentry); 302 fi->dentry = NULL; 303 } 304 305 /* proceed with a normal readdir */ 306 307 more: 308 /* do we have the correct frag content buffered? */ 309 if (fi->frag != frag || fi->last_readdir == NULL) { 310 struct ceph_mds_request *req; 311 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 312 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 313 314 /* discard old result, if any */ 315 if (fi->last_readdir) { 316 ceph_mdsc_put_request(fi->last_readdir); 317 fi->last_readdir = NULL; 318 } 319 320 /* requery frag tree, as the frag topology may have changed */ 321 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); 322 323 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 324 ceph_vinop(inode), frag, fi->last_name); 325 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 326 if (IS_ERR(req)) 327 return PTR_ERR(req); 328 req->r_inode = inode; 329 ihold(inode); 330 req->r_dentry = dget(filp->f_dentry); 331 /* hints to request -> mds selection code */ 332 req->r_direct_mode = USE_AUTH_MDS; 333 req->r_direct_hash = ceph_frag_value(frag); 334 req->r_direct_is_hash = true; 335 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 336 req->r_readdir_offset = fi->next_offset; 337 req->r_args.readdir.frag = cpu_to_le32(frag); 338 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 339 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); 340 req->r_num_caps = max_entries + 1; 341 err = ceph_mdsc_do_request(mdsc, NULL, req); 342 if (err < 0) { 343 ceph_mdsc_put_request(req); 344 return err; 345 } 346 dout("readdir got and parsed readdir result=%d" 347 " on frag %x, end=%d, complete=%d\n", err, frag, 348 (int)req->r_reply_info.dir_end, 349 (int)req->r_reply_info.dir_complete); 350 351 if (!req->r_did_prepopulate) { 352 dout("readdir !did_prepopulate"); 353 /* preclude from marking dir complete */ 354 fi->dir_release_count--; 355 } 356 357 /* note next offset and last dentry name */ 358 fi->offset = fi->next_offset; 359 fi->last_readdir = req; 360 361 if (req->r_reply_info.dir_end) { 362 kfree(fi->last_name); 363 fi->last_name = NULL; 364 if (ceph_frag_is_rightmost(frag)) 365 fi->next_offset = 2; 366 else 367 fi->next_offset = 0; 368 } else { 369 rinfo = &req->r_reply_info; 370 err = note_last_dentry(fi, 371 rinfo->dir_dname[rinfo->dir_nr-1], 372 rinfo->dir_dname_len[rinfo->dir_nr-1]); 373 if (err) 374 return err; 375 fi->next_offset += rinfo->dir_nr; 376 } 377 } 378 379 rinfo = &fi->last_readdir->r_reply_info; 380 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 381 rinfo->dir_nr, off, fi->offset); 382 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 383 u64 pos = ceph_make_fpos(frag, off); 384 struct ceph_mds_reply_inode *in = 385 rinfo->dir_in[off - fi->offset].in; 386 struct ceph_vino vino; 387 ino_t ino; 388 389 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 390 off, off - fi->offset, rinfo->dir_nr, pos, 391 rinfo->dir_dname_len[off - fi->offset], 392 rinfo->dir_dname[off - fi->offset], in); 393 BUG_ON(!in); 394 ftype = le32_to_cpu(in->mode) >> 12; 395 vino.ino = le64_to_cpu(in->ino); 396 vino.snap = le64_to_cpu(in->snapid); 397 ino = ceph_vino_to_ino(vino); 398 if (filldir(dirent, 399 rinfo->dir_dname[off - fi->offset], 400 rinfo->dir_dname_len[off - fi->offset], 401 pos, 402 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) { 403 dout("filldir stopping us...\n"); 404 return 0; 405 } 406 off++; 407 filp->f_pos = pos + 1; 408 } 409 410 if (fi->last_name) { 411 ceph_mdsc_put_request(fi->last_readdir); 412 fi->last_readdir = NULL; 413 goto more; 414 } 415 416 /* more frags? */ 417 if (!ceph_frag_is_rightmost(frag)) { 418 frag = ceph_frag_next(frag); 419 off = 0; 420 filp->f_pos = ceph_make_fpos(frag, off); 421 dout("readdir next frag is %x\n", frag); 422 goto more; 423 } 424 fi->flags |= CEPH_F_ATEND; 425 426 /* 427 * if dir_release_count still matches the dir, no dentries 428 * were released during the whole readdir, and we should have 429 * the complete dir contents in our cache. 430 */ 431 spin_lock(&ci->i_ceph_lock); 432 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { 433 dout(" marking %p complete\n", inode); 434 __ceph_dir_set_complete(ci, fi->dir_release_count); 435 ci->i_max_offset = filp->f_pos; 436 } 437 spin_unlock(&ci->i_ceph_lock); 438 439 dout("readdir %p filp %p done.\n", inode, filp); 440 return 0; 441 } 442 443 static void reset_readdir(struct ceph_file_info *fi) 444 { 445 if (fi->last_readdir) { 446 ceph_mdsc_put_request(fi->last_readdir); 447 fi->last_readdir = NULL; 448 } 449 kfree(fi->last_name); 450 fi->last_name = NULL; 451 fi->next_offset = 2; /* compensate for . and .. */ 452 if (fi->dentry) { 453 dput(fi->dentry); 454 fi->dentry = NULL; 455 } 456 fi->flags &= ~CEPH_F_ATEND; 457 } 458 459 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 460 { 461 struct ceph_file_info *fi = file->private_data; 462 struct inode *inode = file->f_mapping->host; 463 loff_t old_offset = offset; 464 loff_t retval; 465 466 mutex_lock(&inode->i_mutex); 467 retval = -EINVAL; 468 switch (whence) { 469 case SEEK_END: 470 offset += inode->i_size + 2; /* FIXME */ 471 break; 472 case SEEK_CUR: 473 offset += file->f_pos; 474 case SEEK_SET: 475 break; 476 default: 477 goto out; 478 } 479 480 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 481 if (offset != file->f_pos) { 482 file->f_pos = offset; 483 file->f_version = 0; 484 fi->flags &= ~CEPH_F_ATEND; 485 } 486 retval = offset; 487 488 /* 489 * discard buffered readdir content on seekdir(0), or 490 * seek to new frag, or seek prior to current chunk. 491 */ 492 if (offset == 0 || 493 fpos_frag(offset) != fpos_frag(old_offset) || 494 fpos_off(offset) < fi->offset) { 495 dout("dir_llseek dropping %p content\n", file); 496 reset_readdir(fi); 497 } 498 499 /* bump dir_release_count if we did a forward seek */ 500 if (offset > old_offset) 501 fi->dir_release_count--; 502 } 503 out: 504 mutex_unlock(&inode->i_mutex); 505 return retval; 506 } 507 508 /* 509 * Handle lookups for the hidden .snap directory. 510 */ 511 int ceph_handle_snapdir(struct ceph_mds_request *req, 512 struct dentry *dentry, int err) 513 { 514 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 515 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ 516 517 /* .snap dir? */ 518 if (err == -ENOENT && 519 ceph_snap(parent) == CEPH_NOSNAP && 520 strcmp(dentry->d_name.name, 521 fsc->mount_options->snapdir_name) == 0) { 522 struct inode *inode = ceph_get_snapdir(parent); 523 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 524 dentry, dentry->d_name.len, dentry->d_name.name, inode); 525 BUG_ON(!d_unhashed(dentry)); 526 d_add(dentry, inode); 527 err = 0; 528 } 529 return err; 530 } 531 532 /* 533 * Figure out final result of a lookup/open request. 534 * 535 * Mainly, make sure we return the final req->r_dentry (if it already 536 * existed) in place of the original VFS-provided dentry when they 537 * differ. 538 * 539 * Gracefully handle the case where the MDS replies with -ENOENT and 540 * no trace (which it may do, at its discretion, e.g., if it doesn't 541 * care to issue a lease on the negative dentry). 542 */ 543 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 544 struct dentry *dentry, int err) 545 { 546 if (err == -ENOENT) { 547 /* no trace? */ 548 err = 0; 549 if (!req->r_reply_info.head->is_dentry) { 550 dout("ENOENT and no trace, dentry %p inode %p\n", 551 dentry, dentry->d_inode); 552 if (dentry->d_inode) { 553 d_drop(dentry); 554 err = -ENOENT; 555 } else { 556 d_add(dentry, NULL); 557 } 558 } 559 } 560 if (err) 561 dentry = ERR_PTR(err); 562 else if (dentry != req->r_dentry) 563 dentry = dget(req->r_dentry); /* we got spliced */ 564 else 565 dentry = NULL; 566 return dentry; 567 } 568 569 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 570 { 571 return ceph_ino(inode) == CEPH_INO_ROOT && 572 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 573 } 574 575 /* 576 * Look up a single dir entry. If there is a lookup intent, inform 577 * the MDS so that it gets our 'caps wanted' value in a single op. 578 */ 579 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 580 unsigned int flags) 581 { 582 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 583 struct ceph_mds_client *mdsc = fsc->mdsc; 584 struct ceph_mds_request *req; 585 int op; 586 int err; 587 588 dout("lookup %p dentry %p '%.*s'\n", 589 dir, dentry, dentry->d_name.len, dentry->d_name.name); 590 591 if (dentry->d_name.len > NAME_MAX) 592 return ERR_PTR(-ENAMETOOLONG); 593 594 err = ceph_init_dentry(dentry); 595 if (err < 0) 596 return ERR_PTR(err); 597 598 /* can we conclude ENOENT locally? */ 599 if (dentry->d_inode == NULL) { 600 struct ceph_inode_info *ci = ceph_inode(dir); 601 struct ceph_dentry_info *di = ceph_dentry(dentry); 602 603 spin_lock(&ci->i_ceph_lock); 604 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 605 if (strncmp(dentry->d_name.name, 606 fsc->mount_options->snapdir_name, 607 dentry->d_name.len) && 608 !is_root_ceph_dentry(dir, dentry) && 609 __ceph_dir_is_complete(ci) && 610 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 611 spin_unlock(&ci->i_ceph_lock); 612 dout(" dir %p complete, -ENOENT\n", dir); 613 d_add(dentry, NULL); 614 di->lease_shared_gen = ci->i_shared_gen; 615 return NULL; 616 } 617 spin_unlock(&ci->i_ceph_lock); 618 } 619 620 op = ceph_snap(dir) == CEPH_SNAPDIR ? 621 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 622 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 623 if (IS_ERR(req)) 624 return ERR_CAST(req); 625 req->r_dentry = dget(dentry); 626 req->r_num_caps = 2; 627 /* we only need inode linkage */ 628 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 629 req->r_locked_dir = dir; 630 err = ceph_mdsc_do_request(mdsc, NULL, req); 631 err = ceph_handle_snapdir(req, dentry, err); 632 dentry = ceph_finish_lookup(req, dentry, err); 633 ceph_mdsc_put_request(req); /* will dput(dentry) */ 634 dout("lookup result=%p\n", dentry); 635 return dentry; 636 } 637 638 /* 639 * If we do a create but get no trace back from the MDS, follow up with 640 * a lookup (the VFS expects us to link up the provided dentry). 641 */ 642 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 643 { 644 struct dentry *result = ceph_lookup(dir, dentry, 0); 645 646 if (result && !IS_ERR(result)) { 647 /* 648 * We created the item, then did a lookup, and found 649 * it was already linked to another inode we already 650 * had in our cache (and thus got spliced). Link our 651 * dentry to that inode, but don't hash it, just in 652 * case the VFS wants to dereference it. 653 */ 654 BUG_ON(!result->d_inode); 655 d_instantiate(dentry, result->d_inode); 656 return 0; 657 } 658 return PTR_ERR(result); 659 } 660 661 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 662 umode_t mode, dev_t rdev) 663 { 664 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 665 struct ceph_mds_client *mdsc = fsc->mdsc; 666 struct ceph_mds_request *req; 667 int err; 668 669 if (ceph_snap(dir) != CEPH_NOSNAP) 670 return -EROFS; 671 672 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 673 dir, dentry, mode, rdev); 674 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 675 if (IS_ERR(req)) { 676 d_drop(dentry); 677 return PTR_ERR(req); 678 } 679 req->r_dentry = dget(dentry); 680 req->r_num_caps = 2; 681 req->r_locked_dir = dir; 682 req->r_args.mknod.mode = cpu_to_le32(mode); 683 req->r_args.mknod.rdev = cpu_to_le32(rdev); 684 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 685 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 686 err = ceph_mdsc_do_request(mdsc, dir, req); 687 if (!err && !req->r_reply_info.head->is_dentry) 688 err = ceph_handle_notrace_create(dir, dentry); 689 ceph_mdsc_put_request(req); 690 if (err) 691 d_drop(dentry); 692 return err; 693 } 694 695 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 696 bool excl) 697 { 698 return ceph_mknod(dir, dentry, mode, 0); 699 } 700 701 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 702 const char *dest) 703 { 704 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 705 struct ceph_mds_client *mdsc = fsc->mdsc; 706 struct ceph_mds_request *req; 707 int err; 708 709 if (ceph_snap(dir) != CEPH_NOSNAP) 710 return -EROFS; 711 712 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 713 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 714 if (IS_ERR(req)) { 715 d_drop(dentry); 716 return PTR_ERR(req); 717 } 718 req->r_dentry = dget(dentry); 719 req->r_num_caps = 2; 720 req->r_path2 = kstrdup(dest, GFP_NOFS); 721 req->r_locked_dir = dir; 722 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 723 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 724 err = ceph_mdsc_do_request(mdsc, dir, req); 725 if (!err && !req->r_reply_info.head->is_dentry) 726 err = ceph_handle_notrace_create(dir, dentry); 727 ceph_mdsc_put_request(req); 728 if (err) 729 d_drop(dentry); 730 return err; 731 } 732 733 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 734 { 735 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 736 struct ceph_mds_client *mdsc = fsc->mdsc; 737 struct ceph_mds_request *req; 738 int err = -EROFS; 739 int op; 740 741 if (ceph_snap(dir) == CEPH_SNAPDIR) { 742 /* mkdir .snap/foo is a MKSNAP */ 743 op = CEPH_MDS_OP_MKSNAP; 744 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 745 dentry->d_name.len, dentry->d_name.name, dentry); 746 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 747 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 748 op = CEPH_MDS_OP_MKDIR; 749 } else { 750 goto out; 751 } 752 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 753 if (IS_ERR(req)) { 754 err = PTR_ERR(req); 755 goto out; 756 } 757 758 req->r_dentry = dget(dentry); 759 req->r_num_caps = 2; 760 req->r_locked_dir = dir; 761 req->r_args.mkdir.mode = cpu_to_le32(mode); 762 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 763 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 764 err = ceph_mdsc_do_request(mdsc, dir, req); 765 if (!err && !req->r_reply_info.head->is_dentry) 766 err = ceph_handle_notrace_create(dir, dentry); 767 ceph_mdsc_put_request(req); 768 out: 769 if (err < 0) 770 d_drop(dentry); 771 return err; 772 } 773 774 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 775 struct dentry *dentry) 776 { 777 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 778 struct ceph_mds_client *mdsc = fsc->mdsc; 779 struct ceph_mds_request *req; 780 int err; 781 782 if (ceph_snap(dir) != CEPH_NOSNAP) 783 return -EROFS; 784 785 dout("link in dir %p old_dentry %p dentry %p\n", dir, 786 old_dentry, dentry); 787 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 788 if (IS_ERR(req)) { 789 d_drop(dentry); 790 return PTR_ERR(req); 791 } 792 req->r_dentry = dget(dentry); 793 req->r_num_caps = 2; 794 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ 795 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); 796 req->r_locked_dir = dir; 797 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 798 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 799 err = ceph_mdsc_do_request(mdsc, dir, req); 800 if (err) { 801 d_drop(dentry); 802 } else if (!req->r_reply_info.head->is_dentry) { 803 ihold(old_dentry->d_inode); 804 d_instantiate(dentry, old_dentry->d_inode); 805 } 806 ceph_mdsc_put_request(req); 807 return err; 808 } 809 810 /* 811 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 812 * looks like the link count will hit 0, drop any other caps (other 813 * than PIN) we don't specifically want (due to the file still being 814 * open). 815 */ 816 static int drop_caps_for_unlink(struct inode *inode) 817 { 818 struct ceph_inode_info *ci = ceph_inode(inode); 819 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 820 821 spin_lock(&ci->i_ceph_lock); 822 if (inode->i_nlink == 1) { 823 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 824 ci->i_ceph_flags |= CEPH_I_NODELAY; 825 } 826 spin_unlock(&ci->i_ceph_lock); 827 return drop; 828 } 829 830 /* 831 * rmdir and unlink are differ only by the metadata op code 832 */ 833 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 834 { 835 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 836 struct ceph_mds_client *mdsc = fsc->mdsc; 837 struct inode *inode = dentry->d_inode; 838 struct ceph_mds_request *req; 839 int err = -EROFS; 840 int op; 841 842 if (ceph_snap(dir) == CEPH_SNAPDIR) { 843 /* rmdir .snap/foo is RMSNAP */ 844 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 845 dentry->d_name.name, dentry); 846 op = CEPH_MDS_OP_RMSNAP; 847 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 848 dout("unlink/rmdir dir %p dn %p inode %p\n", 849 dir, dentry, inode); 850 op = S_ISDIR(dentry->d_inode->i_mode) ? 851 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 852 } else 853 goto out; 854 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 855 if (IS_ERR(req)) { 856 err = PTR_ERR(req); 857 goto out; 858 } 859 req->r_dentry = dget(dentry); 860 req->r_num_caps = 2; 861 req->r_locked_dir = dir; 862 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 863 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 864 req->r_inode_drop = drop_caps_for_unlink(inode); 865 err = ceph_mdsc_do_request(mdsc, dir, req); 866 if (!err && !req->r_reply_info.head->is_dentry) 867 d_delete(dentry); 868 ceph_mdsc_put_request(req); 869 out: 870 return err; 871 } 872 873 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 874 struct inode *new_dir, struct dentry *new_dentry) 875 { 876 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 877 struct ceph_mds_client *mdsc = fsc->mdsc; 878 struct ceph_mds_request *req; 879 int err; 880 881 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 882 return -EXDEV; 883 if (ceph_snap(old_dir) != CEPH_NOSNAP || 884 ceph_snap(new_dir) != CEPH_NOSNAP) 885 return -EROFS; 886 dout("rename dir %p dentry %p to dir %p dentry %p\n", 887 old_dir, old_dentry, new_dir, new_dentry); 888 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 889 if (IS_ERR(req)) 890 return PTR_ERR(req); 891 req->r_dentry = dget(new_dentry); 892 req->r_num_caps = 2; 893 req->r_old_dentry = dget(old_dentry); 894 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); 895 req->r_locked_dir = new_dir; 896 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 897 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 898 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 899 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 900 /* release LINK_RDCACHE on source inode (mds will lock it) */ 901 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 902 if (new_dentry->d_inode) 903 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 904 err = ceph_mdsc_do_request(mdsc, old_dir, req); 905 if (!err && !req->r_reply_info.head->is_dentry) { 906 /* 907 * Normally d_move() is done by fill_trace (called by 908 * do_request, above). If there is no trace, we need 909 * to do it here. 910 */ 911 912 /* d_move screws up d_subdirs order */ 913 ceph_dir_clear_complete(new_dir); 914 915 d_move(old_dentry, new_dentry); 916 917 /* ensure target dentry is invalidated, despite 918 rehashing bug in vfs_rename_dir */ 919 ceph_invalidate_dentry_lease(new_dentry); 920 } 921 ceph_mdsc_put_request(req); 922 return err; 923 } 924 925 /* 926 * Ensure a dentry lease will no longer revalidate. 927 */ 928 void ceph_invalidate_dentry_lease(struct dentry *dentry) 929 { 930 spin_lock(&dentry->d_lock); 931 dentry->d_time = jiffies; 932 ceph_dentry(dentry)->lease_shared_gen = 0; 933 spin_unlock(&dentry->d_lock); 934 } 935 936 /* 937 * Check if dentry lease is valid. If not, delete the lease. Try to 938 * renew if the least is more than half up. 939 */ 940 static int dentry_lease_is_valid(struct dentry *dentry) 941 { 942 struct ceph_dentry_info *di; 943 struct ceph_mds_session *s; 944 int valid = 0; 945 u32 gen; 946 unsigned long ttl; 947 struct ceph_mds_session *session = NULL; 948 struct inode *dir = NULL; 949 u32 seq = 0; 950 951 spin_lock(&dentry->d_lock); 952 di = ceph_dentry(dentry); 953 if (di->lease_session) { 954 s = di->lease_session; 955 spin_lock(&s->s_gen_ttl_lock); 956 gen = s->s_cap_gen; 957 ttl = s->s_cap_ttl; 958 spin_unlock(&s->s_gen_ttl_lock); 959 960 if (di->lease_gen == gen && 961 time_before(jiffies, dentry->d_time) && 962 time_before(jiffies, ttl)) { 963 valid = 1; 964 if (di->lease_renew_after && 965 time_after(jiffies, di->lease_renew_after)) { 966 /* we should renew */ 967 dir = dentry->d_parent->d_inode; 968 session = ceph_get_mds_session(s); 969 seq = di->lease_seq; 970 di->lease_renew_after = 0; 971 di->lease_renew_from = jiffies; 972 } 973 } 974 } 975 spin_unlock(&dentry->d_lock); 976 977 if (session) { 978 ceph_mdsc_lease_send_msg(session, dir, dentry, 979 CEPH_MDS_LEASE_RENEW, seq); 980 ceph_put_mds_session(session); 981 } 982 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 983 return valid; 984 } 985 986 /* 987 * Check if directory-wide content lease/cap is valid. 988 */ 989 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 990 { 991 struct ceph_inode_info *ci = ceph_inode(dir); 992 struct ceph_dentry_info *di = ceph_dentry(dentry); 993 int valid = 0; 994 995 spin_lock(&ci->i_ceph_lock); 996 if (ci->i_shared_gen == di->lease_shared_gen) 997 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 998 spin_unlock(&ci->i_ceph_lock); 999 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 1000 dir, (unsigned)ci->i_shared_gen, dentry, 1001 (unsigned)di->lease_shared_gen, valid); 1002 return valid; 1003 } 1004 1005 /* 1006 * Check if cached dentry can be trusted. 1007 */ 1008 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1009 { 1010 int valid = 0; 1011 struct inode *dir; 1012 1013 if (flags & LOOKUP_RCU) 1014 return -ECHILD; 1015 1016 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 1017 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 1018 ceph_dentry(dentry)->offset); 1019 1020 dir = ceph_get_dentry_parent_inode(dentry); 1021 1022 /* always trust cached snapped dentries, snapdir dentry */ 1023 if (ceph_snap(dir) != CEPH_NOSNAP) { 1024 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 1025 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 1026 valid = 1; 1027 } else if (dentry->d_inode && 1028 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { 1029 valid = 1; 1030 } else if (dentry_lease_is_valid(dentry) || 1031 dir_lease_is_valid(dir, dentry)) { 1032 valid = 1; 1033 } 1034 1035 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1036 if (valid) 1037 ceph_dentry_lru_touch(dentry); 1038 else 1039 d_drop(dentry); 1040 iput(dir); 1041 return valid; 1042 } 1043 1044 /* 1045 * Release our ceph_dentry_info. 1046 */ 1047 static void ceph_d_release(struct dentry *dentry) 1048 { 1049 struct ceph_dentry_info *di = ceph_dentry(dentry); 1050 1051 dout("d_release %p\n", dentry); 1052 ceph_dentry_lru_del(dentry); 1053 if (di->lease_session) 1054 ceph_put_mds_session(di->lease_session); 1055 kmem_cache_free(ceph_dentry_cachep, di); 1056 dentry->d_fsdata = NULL; 1057 } 1058 1059 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1060 unsigned int flags) 1061 { 1062 /* 1063 * Eventually, we'll want to revalidate snapped metadata 1064 * too... probably... 1065 */ 1066 return 1; 1067 } 1068 1069 /* 1070 * When the VFS prunes a dentry from the cache, we need to clear the 1071 * complete flag on the parent directory. 1072 * 1073 * Called under dentry->d_lock. 1074 */ 1075 static void ceph_d_prune(struct dentry *dentry) 1076 { 1077 dout("ceph_d_prune %p\n", dentry); 1078 1079 /* do we have a valid parent? */ 1080 if (IS_ROOT(dentry)) 1081 return; 1082 1083 /* if we are not hashed, we don't affect dir's completeness */ 1084 if (d_unhashed(dentry)) 1085 return; 1086 1087 /* 1088 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1089 * cleared until d_release 1090 */ 1091 ceph_dir_clear_complete(dentry->d_parent->d_inode); 1092 } 1093 1094 /* 1095 * read() on a dir. This weird interface hack only works if mounted 1096 * with '-o dirstat'. 1097 */ 1098 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1099 loff_t *ppos) 1100 { 1101 struct ceph_file_info *cf = file->private_data; 1102 struct inode *inode = file_inode(file); 1103 struct ceph_inode_info *ci = ceph_inode(inode); 1104 int left; 1105 const int bufsize = 1024; 1106 1107 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1108 return -EISDIR; 1109 1110 if (!cf->dir_info) { 1111 cf->dir_info = kmalloc(bufsize, GFP_NOFS); 1112 if (!cf->dir_info) 1113 return -ENOMEM; 1114 cf->dir_info_len = 1115 snprintf(cf->dir_info, bufsize, 1116 "entries: %20lld\n" 1117 " files: %20lld\n" 1118 " subdirs: %20lld\n" 1119 "rentries: %20lld\n" 1120 " rfiles: %20lld\n" 1121 " rsubdirs: %20lld\n" 1122 "rbytes: %20lld\n" 1123 "rctime: %10ld.%09ld\n", 1124 ci->i_files + ci->i_subdirs, 1125 ci->i_files, 1126 ci->i_subdirs, 1127 ci->i_rfiles + ci->i_rsubdirs, 1128 ci->i_rfiles, 1129 ci->i_rsubdirs, 1130 ci->i_rbytes, 1131 (long)ci->i_rctime.tv_sec, 1132 (long)ci->i_rctime.tv_nsec); 1133 } 1134 1135 if (*ppos >= cf->dir_info_len) 1136 return 0; 1137 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1138 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1139 if (left == size) 1140 return -EFAULT; 1141 *ppos += (size - left); 1142 return size - left; 1143 } 1144 1145 /* 1146 * an fsync() on a dir will wait for any uncommitted directory 1147 * operations to commit. 1148 */ 1149 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, 1150 int datasync) 1151 { 1152 struct inode *inode = file_inode(file); 1153 struct ceph_inode_info *ci = ceph_inode(inode); 1154 struct list_head *head = &ci->i_unsafe_dirops; 1155 struct ceph_mds_request *req; 1156 u64 last_tid; 1157 int ret = 0; 1158 1159 dout("dir_fsync %p\n", inode); 1160 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1161 if (ret) 1162 return ret; 1163 mutex_lock(&inode->i_mutex); 1164 1165 spin_lock(&ci->i_unsafe_lock); 1166 if (list_empty(head)) 1167 goto out; 1168 1169 req = list_entry(head->prev, 1170 struct ceph_mds_request, r_unsafe_dir_item); 1171 last_tid = req->r_tid; 1172 1173 do { 1174 ceph_mdsc_get_request(req); 1175 spin_unlock(&ci->i_unsafe_lock); 1176 1177 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1178 inode, req->r_tid, last_tid); 1179 if (req->r_timeout) { 1180 ret = wait_for_completion_timeout( 1181 &req->r_safe_completion, req->r_timeout); 1182 if (ret > 0) 1183 ret = 0; 1184 else if (ret == 0) 1185 ret = -EIO; /* timed out */ 1186 } else { 1187 wait_for_completion(&req->r_safe_completion); 1188 } 1189 ceph_mdsc_put_request(req); 1190 1191 spin_lock(&ci->i_unsafe_lock); 1192 if (ret || list_empty(head)) 1193 break; 1194 req = list_entry(head->next, 1195 struct ceph_mds_request, r_unsafe_dir_item); 1196 } while (req->r_tid < last_tid); 1197 out: 1198 spin_unlock(&ci->i_unsafe_lock); 1199 mutex_unlock(&inode->i_mutex); 1200 1201 return ret; 1202 } 1203 1204 /* 1205 * We maintain a private dentry LRU. 1206 * 1207 * FIXME: this needs to be changed to a per-mds lru to be useful. 1208 */ 1209 void ceph_dentry_lru_add(struct dentry *dn) 1210 { 1211 struct ceph_dentry_info *di = ceph_dentry(dn); 1212 struct ceph_mds_client *mdsc; 1213 1214 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1215 dn->d_name.len, dn->d_name.name); 1216 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1217 spin_lock(&mdsc->dentry_lru_lock); 1218 list_add_tail(&di->lru, &mdsc->dentry_lru); 1219 mdsc->num_dentry++; 1220 spin_unlock(&mdsc->dentry_lru_lock); 1221 } 1222 1223 void ceph_dentry_lru_touch(struct dentry *dn) 1224 { 1225 struct ceph_dentry_info *di = ceph_dentry(dn); 1226 struct ceph_mds_client *mdsc; 1227 1228 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1229 dn->d_name.len, dn->d_name.name, di->offset); 1230 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1231 spin_lock(&mdsc->dentry_lru_lock); 1232 list_move_tail(&di->lru, &mdsc->dentry_lru); 1233 spin_unlock(&mdsc->dentry_lru_lock); 1234 } 1235 1236 void ceph_dentry_lru_del(struct dentry *dn) 1237 { 1238 struct ceph_dentry_info *di = ceph_dentry(dn); 1239 struct ceph_mds_client *mdsc; 1240 1241 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1242 dn->d_name.len, dn->d_name.name); 1243 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1244 spin_lock(&mdsc->dentry_lru_lock); 1245 list_del_init(&di->lru); 1246 mdsc->num_dentry--; 1247 spin_unlock(&mdsc->dentry_lru_lock); 1248 } 1249 1250 /* 1251 * Return name hash for a given dentry. This is dependent on 1252 * the parent directory's hash function. 1253 */ 1254 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1255 { 1256 struct ceph_inode_info *dci = ceph_inode(dir); 1257 1258 switch (dci->i_dir_layout.dl_dir_hash) { 1259 case 0: /* for backward compat */ 1260 case CEPH_STR_HASH_LINUX: 1261 return dn->d_name.hash; 1262 1263 default: 1264 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1265 dn->d_name.name, dn->d_name.len); 1266 } 1267 } 1268 1269 const struct file_operations ceph_dir_fops = { 1270 .read = ceph_read_dir, 1271 .readdir = ceph_readdir, 1272 .llseek = ceph_dir_llseek, 1273 .open = ceph_open, 1274 .release = ceph_release, 1275 .unlocked_ioctl = ceph_ioctl, 1276 .fsync = ceph_dir_fsync, 1277 }; 1278 1279 const struct inode_operations ceph_dir_iops = { 1280 .lookup = ceph_lookup, 1281 .permission = ceph_permission, 1282 .getattr = ceph_getattr, 1283 .setattr = ceph_setattr, 1284 .setxattr = ceph_setxattr, 1285 .getxattr = ceph_getxattr, 1286 .listxattr = ceph_listxattr, 1287 .removexattr = ceph_removexattr, 1288 .mknod = ceph_mknod, 1289 .symlink = ceph_symlink, 1290 .mkdir = ceph_mkdir, 1291 .link = ceph_link, 1292 .unlink = ceph_unlink, 1293 .rmdir = ceph_unlink, 1294 .rename = ceph_rename, 1295 .create = ceph_create, 1296 .atomic_open = ceph_atomic_open, 1297 }; 1298 1299 const struct dentry_operations ceph_dentry_ops = { 1300 .d_revalidate = ceph_d_revalidate, 1301 .d_release = ceph_d_release, 1302 .d_prune = ceph_d_prune, 1303 }; 1304 1305 const struct dentry_operations ceph_snapdir_dentry_ops = { 1306 .d_revalidate = ceph_snapdir_d_revalidate, 1307 .d_release = ceph_d_release, 1308 }; 1309 1310 const struct dentry_operations ceph_snap_dentry_ops = { 1311 .d_release = ceph_d_release, 1312 .d_prune = ceph_d_prune, 1313 }; 1314