1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct inode_operations ceph_dir_iops; 30 const struct file_operations ceph_dir_fops; 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 int ceph_init_dentry(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 if (dentry->d_fsdata) 41 return 0; 42 43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 44 if (!di) 45 return -ENOMEM; /* oh well */ 46 47 spin_lock(&dentry->d_lock); 48 if (dentry->d_fsdata) { 49 /* lost a race */ 50 kmem_cache_free(ceph_dentry_cachep, di); 51 goto out_unlock; 52 } 53 54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 55 d_set_d_op(dentry, &ceph_dentry_ops); 56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 58 else 59 d_set_d_op(dentry, &ceph_snap_dentry_ops); 60 61 di->dentry = dentry; 62 di->lease_session = NULL; 63 dentry->d_time = jiffies; 64 /* avoid reordering d_fsdata setup so that the check above is safe */ 65 smp_mb(); 66 dentry->d_fsdata = di; 67 ceph_dentry_lru_add(dentry); 68 out_unlock: 69 spin_unlock(&dentry->d_lock); 70 return 0; 71 } 72 73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) 74 { 75 struct inode *inode = NULL; 76 77 if (!dentry) 78 return NULL; 79 80 spin_lock(&dentry->d_lock); 81 if (!IS_ROOT(dentry)) { 82 inode = dentry->d_parent->d_inode; 83 ihold(inode); 84 } 85 spin_unlock(&dentry->d_lock); 86 return inode; 87 } 88 89 90 /* 91 * for readdir, we encode the directory frag and offset within that 92 * frag into f_pos. 93 */ 94 static unsigned fpos_frag(loff_t p) 95 { 96 return p >> 32; 97 } 98 static unsigned fpos_off(loff_t p) 99 { 100 return p & 0xffffffff; 101 } 102 103 /* 104 * When possible, we try to satisfy a readdir by peeking at the 105 * dcache. We make this work by carefully ordering dentries on 106 * d_u.d_child when we initially get results back from the MDS, and 107 * falling back to a "normal" sync readdir if any dentries in the dir 108 * are dropped. 109 * 110 * Complete dir indicates that we have all dentries in the dir. It is 111 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 112 * the MDS if/when the directory is modified). 113 */ 114 static int __dcache_readdir(struct file *file, struct dir_context *ctx) 115 { 116 struct ceph_file_info *fi = file->private_data; 117 struct dentry *parent = file->f_dentry; 118 struct inode *dir = parent->d_inode; 119 struct list_head *p; 120 struct dentry *dentry, *last; 121 struct ceph_dentry_info *di; 122 int err = 0; 123 124 /* claim ref on last dentry we returned */ 125 last = fi->dentry; 126 fi->dentry = NULL; 127 128 dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos, 129 last); 130 131 spin_lock(&parent->d_lock); 132 133 /* start at beginning? */ 134 if (ctx->pos == 2 || last == NULL || 135 ctx->pos < ceph_dentry(last)->offset) { 136 if (list_empty(&parent->d_subdirs)) 137 goto out_unlock; 138 p = parent->d_subdirs.prev; 139 dout(" initial p %p/%p\n", p->prev, p->next); 140 } else { 141 p = last->d_u.d_child.prev; 142 } 143 144 more: 145 dentry = list_entry(p, struct dentry, d_u.d_child); 146 di = ceph_dentry(dentry); 147 while (1) { 148 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 149 d_unhashed(dentry) ? "!hashed" : "hashed", 150 parent->d_subdirs.prev, parent->d_subdirs.next); 151 if (p == &parent->d_subdirs) { 152 fi->flags |= CEPH_F_ATEND; 153 goto out_unlock; 154 } 155 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); 156 if (!d_unhashed(dentry) && dentry->d_inode && 157 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 158 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 159 ctx->pos <= di->offset) 160 break; 161 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 162 dentry->d_name.len, dentry->d_name.name, di->offset, 163 ctx->pos, d_unhashed(dentry) ? " unhashed" : "", 164 !dentry->d_inode ? " null" : ""); 165 spin_unlock(&dentry->d_lock); 166 p = p->prev; 167 dentry = list_entry(p, struct dentry, d_u.d_child); 168 di = ceph_dentry(dentry); 169 } 170 171 dget_dlock(dentry); 172 spin_unlock(&dentry->d_lock); 173 spin_unlock(&parent->d_lock); 174 175 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos, 176 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 177 ctx->pos = di->offset; 178 if (!dir_emit(ctx, dentry->d_name.name, 179 dentry->d_name.len, 180 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino), 181 dentry->d_inode->i_mode >> 12)) { 182 if (last) { 183 /* remember our position */ 184 fi->dentry = last; 185 fi->next_offset = di->offset; 186 } 187 dput(dentry); 188 return 0; 189 } 190 191 if (last) 192 dput(last); 193 last = dentry; 194 195 ctx->pos++; 196 197 /* make sure a dentry wasn't dropped while we didn't have parent lock */ 198 if (!ceph_dir_is_complete(dir)) { 199 dout(" lost dir complete on %p; falling back to mds\n", dir); 200 err = -EAGAIN; 201 goto out; 202 } 203 204 spin_lock(&parent->d_lock); 205 p = p->prev; /* advance to next dentry */ 206 goto more; 207 208 out_unlock: 209 spin_unlock(&parent->d_lock); 210 out: 211 if (last) 212 dput(last); 213 return err; 214 } 215 216 /* 217 * make note of the last dentry we read, so we can 218 * continue at the same lexicographical point, 219 * regardless of what dir changes take place on the 220 * server. 221 */ 222 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 223 int len) 224 { 225 kfree(fi->last_name); 226 fi->last_name = kmalloc(len+1, GFP_NOFS); 227 if (!fi->last_name) 228 return -ENOMEM; 229 memcpy(fi->last_name, name, len); 230 fi->last_name[len] = 0; 231 dout("note_last_dentry '%s'\n", fi->last_name); 232 return 0; 233 } 234 235 static int ceph_readdir(struct file *file, struct dir_context *ctx) 236 { 237 struct ceph_file_info *fi = file->private_data; 238 struct inode *inode = file_inode(file); 239 struct ceph_inode_info *ci = ceph_inode(inode); 240 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 241 struct ceph_mds_client *mdsc = fsc->mdsc; 242 unsigned frag = fpos_frag(ctx->pos); 243 int off = fpos_off(ctx->pos); 244 int err; 245 u32 ftype; 246 struct ceph_mds_reply_info_parsed *rinfo; 247 const int max_entries = fsc->mount_options->max_readdir; 248 const int max_bytes = fsc->mount_options->max_readdir_bytes; 249 250 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off); 251 if (fi->flags & CEPH_F_ATEND) 252 return 0; 253 254 /* always start with . and .. */ 255 if (ctx->pos == 0) { 256 /* note dir version at start of readdir so we can tell 257 * if any dentries get dropped */ 258 fi->dir_release_count = atomic_read(&ci->i_release_count); 259 260 dout("readdir off 0 -> '.'\n"); 261 if (!dir_emit(ctx, ".", 1, 262 ceph_translate_ino(inode->i_sb, inode->i_ino), 263 inode->i_mode >> 12)) 264 return 0; 265 ctx->pos = 1; 266 off = 1; 267 } 268 if (ctx->pos == 1) { 269 ino_t ino = parent_ino(file->f_dentry); 270 dout("readdir off 1 -> '..'\n"); 271 if (!dir_emit(ctx, "..", 2, 272 ceph_translate_ino(inode->i_sb, ino), 273 inode->i_mode >> 12)) 274 return 0; 275 ctx->pos = 2; 276 off = 2; 277 } 278 279 /* can we use the dcache? */ 280 spin_lock(&ci->i_ceph_lock); 281 if ((ctx->pos == 2 || fi->dentry) && 282 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 283 ceph_snap(inode) != CEPH_SNAPDIR && 284 __ceph_dir_is_complete(ci) && 285 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 286 spin_unlock(&ci->i_ceph_lock); 287 err = __dcache_readdir(file, ctx); 288 if (err != -EAGAIN) 289 return err; 290 } else { 291 spin_unlock(&ci->i_ceph_lock); 292 } 293 if (fi->dentry) { 294 err = note_last_dentry(fi, fi->dentry->d_name.name, 295 fi->dentry->d_name.len); 296 if (err) 297 return err; 298 dput(fi->dentry); 299 fi->dentry = NULL; 300 } 301 302 /* proceed with a normal readdir */ 303 304 more: 305 /* do we have the correct frag content buffered? */ 306 if (fi->frag != frag || fi->last_readdir == NULL) { 307 struct ceph_mds_request *req; 308 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 309 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 310 311 /* discard old result, if any */ 312 if (fi->last_readdir) { 313 ceph_mdsc_put_request(fi->last_readdir); 314 fi->last_readdir = NULL; 315 } 316 317 /* requery frag tree, as the frag topology may have changed */ 318 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); 319 320 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 321 ceph_vinop(inode), frag, fi->last_name); 322 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 323 if (IS_ERR(req)) 324 return PTR_ERR(req); 325 req->r_inode = inode; 326 ihold(inode); 327 req->r_dentry = dget(file->f_dentry); 328 /* hints to request -> mds selection code */ 329 req->r_direct_mode = USE_AUTH_MDS; 330 req->r_direct_hash = ceph_frag_value(frag); 331 req->r_direct_is_hash = true; 332 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 333 req->r_readdir_offset = fi->next_offset; 334 req->r_args.readdir.frag = cpu_to_le32(frag); 335 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 336 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); 337 req->r_num_caps = max_entries + 1; 338 err = ceph_mdsc_do_request(mdsc, NULL, req); 339 if (err < 0) { 340 ceph_mdsc_put_request(req); 341 return err; 342 } 343 dout("readdir got and parsed readdir result=%d" 344 " on frag %x, end=%d, complete=%d\n", err, frag, 345 (int)req->r_reply_info.dir_end, 346 (int)req->r_reply_info.dir_complete); 347 348 if (!req->r_did_prepopulate) { 349 dout("readdir !did_prepopulate"); 350 /* preclude from marking dir complete */ 351 fi->dir_release_count--; 352 } 353 354 /* note next offset and last dentry name */ 355 fi->offset = fi->next_offset; 356 fi->last_readdir = req; 357 358 if (req->r_reply_info.dir_end) { 359 kfree(fi->last_name); 360 fi->last_name = NULL; 361 if (ceph_frag_is_rightmost(frag)) 362 fi->next_offset = 2; 363 else 364 fi->next_offset = 0; 365 } else { 366 rinfo = &req->r_reply_info; 367 err = note_last_dentry(fi, 368 rinfo->dir_dname[rinfo->dir_nr-1], 369 rinfo->dir_dname_len[rinfo->dir_nr-1]); 370 if (err) 371 return err; 372 fi->next_offset += rinfo->dir_nr; 373 } 374 } 375 376 rinfo = &fi->last_readdir->r_reply_info; 377 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 378 rinfo->dir_nr, off, fi->offset); 379 380 ctx->pos = ceph_make_fpos(frag, off); 381 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 382 struct ceph_mds_reply_inode *in = 383 rinfo->dir_in[off - fi->offset].in; 384 struct ceph_vino vino; 385 ino_t ino; 386 387 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 388 off, off - fi->offset, rinfo->dir_nr, ctx->pos, 389 rinfo->dir_dname_len[off - fi->offset], 390 rinfo->dir_dname[off - fi->offset], in); 391 BUG_ON(!in); 392 ftype = le32_to_cpu(in->mode) >> 12; 393 vino.ino = le64_to_cpu(in->ino); 394 vino.snap = le64_to_cpu(in->snapid); 395 ino = ceph_vino_to_ino(vino); 396 if (!dir_emit(ctx, 397 rinfo->dir_dname[off - fi->offset], 398 rinfo->dir_dname_len[off - fi->offset], 399 ceph_translate_ino(inode->i_sb, ino), ftype)) { 400 dout("filldir stopping us...\n"); 401 return 0; 402 } 403 off++; 404 ctx->pos++; 405 } 406 407 if (fi->last_name) { 408 ceph_mdsc_put_request(fi->last_readdir); 409 fi->last_readdir = NULL; 410 goto more; 411 } 412 413 /* more frags? */ 414 if (!ceph_frag_is_rightmost(frag)) { 415 frag = ceph_frag_next(frag); 416 off = 0; 417 ctx->pos = ceph_make_fpos(frag, off); 418 dout("readdir next frag is %x\n", frag); 419 goto more; 420 } 421 fi->flags |= CEPH_F_ATEND; 422 423 /* 424 * if dir_release_count still matches the dir, no dentries 425 * were released during the whole readdir, and we should have 426 * the complete dir contents in our cache. 427 */ 428 spin_lock(&ci->i_ceph_lock); 429 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { 430 dout(" marking %p complete\n", inode); 431 __ceph_dir_set_complete(ci, fi->dir_release_count); 432 ci->i_max_offset = ctx->pos; 433 } 434 spin_unlock(&ci->i_ceph_lock); 435 436 dout("readdir %p file %p done.\n", inode, file); 437 return 0; 438 } 439 440 static void reset_readdir(struct ceph_file_info *fi) 441 { 442 if (fi->last_readdir) { 443 ceph_mdsc_put_request(fi->last_readdir); 444 fi->last_readdir = NULL; 445 } 446 kfree(fi->last_name); 447 fi->last_name = NULL; 448 fi->next_offset = 2; /* compensate for . and .. */ 449 if (fi->dentry) { 450 dput(fi->dentry); 451 fi->dentry = NULL; 452 } 453 fi->flags &= ~CEPH_F_ATEND; 454 } 455 456 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 457 { 458 struct ceph_file_info *fi = file->private_data; 459 struct inode *inode = file->f_mapping->host; 460 loff_t old_offset = offset; 461 loff_t retval; 462 463 mutex_lock(&inode->i_mutex); 464 retval = -EINVAL; 465 switch (whence) { 466 case SEEK_END: 467 offset += inode->i_size + 2; /* FIXME */ 468 break; 469 case SEEK_CUR: 470 offset += file->f_pos; 471 case SEEK_SET: 472 break; 473 default: 474 goto out; 475 } 476 477 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 478 if (offset != file->f_pos) { 479 file->f_pos = offset; 480 file->f_version = 0; 481 fi->flags &= ~CEPH_F_ATEND; 482 } 483 retval = offset; 484 485 /* 486 * discard buffered readdir content on seekdir(0), or 487 * seek to new frag, or seek prior to current chunk. 488 */ 489 if (offset == 0 || 490 fpos_frag(offset) != fpos_frag(old_offset) || 491 fpos_off(offset) < fi->offset) { 492 dout("dir_llseek dropping %p content\n", file); 493 reset_readdir(fi); 494 } 495 496 /* bump dir_release_count if we did a forward seek */ 497 if (offset > old_offset) 498 fi->dir_release_count--; 499 } 500 out: 501 mutex_unlock(&inode->i_mutex); 502 return retval; 503 } 504 505 /* 506 * Handle lookups for the hidden .snap directory. 507 */ 508 int ceph_handle_snapdir(struct ceph_mds_request *req, 509 struct dentry *dentry, int err) 510 { 511 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 512 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */ 513 514 /* .snap dir? */ 515 if (err == -ENOENT && 516 ceph_snap(parent) == CEPH_NOSNAP && 517 strcmp(dentry->d_name.name, 518 fsc->mount_options->snapdir_name) == 0) { 519 struct inode *inode = ceph_get_snapdir(parent); 520 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 521 dentry, dentry->d_name.len, dentry->d_name.name, inode); 522 BUG_ON(!d_unhashed(dentry)); 523 d_add(dentry, inode); 524 err = 0; 525 } 526 return err; 527 } 528 529 /* 530 * Figure out final result of a lookup/open request. 531 * 532 * Mainly, make sure we return the final req->r_dentry (if it already 533 * existed) in place of the original VFS-provided dentry when they 534 * differ. 535 * 536 * Gracefully handle the case where the MDS replies with -ENOENT and 537 * no trace (which it may do, at its discretion, e.g., if it doesn't 538 * care to issue a lease on the negative dentry). 539 */ 540 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 541 struct dentry *dentry, int err) 542 { 543 if (err == -ENOENT) { 544 /* no trace? */ 545 err = 0; 546 if (!req->r_reply_info.head->is_dentry) { 547 dout("ENOENT and no trace, dentry %p inode %p\n", 548 dentry, dentry->d_inode); 549 if (dentry->d_inode) { 550 d_drop(dentry); 551 err = -ENOENT; 552 } else { 553 d_add(dentry, NULL); 554 } 555 } 556 } 557 if (err) 558 dentry = ERR_PTR(err); 559 else if (dentry != req->r_dentry) 560 dentry = dget(req->r_dentry); /* we got spliced */ 561 else 562 dentry = NULL; 563 return dentry; 564 } 565 566 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 567 { 568 return ceph_ino(inode) == CEPH_INO_ROOT && 569 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 570 } 571 572 /* 573 * Look up a single dir entry. If there is a lookup intent, inform 574 * the MDS so that it gets our 'caps wanted' value in a single op. 575 */ 576 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 577 unsigned int flags) 578 { 579 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 580 struct ceph_mds_client *mdsc = fsc->mdsc; 581 struct ceph_mds_request *req; 582 int op; 583 int err; 584 585 dout("lookup %p dentry %p '%.*s'\n", 586 dir, dentry, dentry->d_name.len, dentry->d_name.name); 587 588 if (dentry->d_name.len > NAME_MAX) 589 return ERR_PTR(-ENAMETOOLONG); 590 591 err = ceph_init_dentry(dentry); 592 if (err < 0) 593 return ERR_PTR(err); 594 595 /* can we conclude ENOENT locally? */ 596 if (dentry->d_inode == NULL) { 597 struct ceph_inode_info *ci = ceph_inode(dir); 598 struct ceph_dentry_info *di = ceph_dentry(dentry); 599 600 spin_lock(&ci->i_ceph_lock); 601 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 602 if (strncmp(dentry->d_name.name, 603 fsc->mount_options->snapdir_name, 604 dentry->d_name.len) && 605 !is_root_ceph_dentry(dir, dentry) && 606 __ceph_dir_is_complete(ci) && 607 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 608 spin_unlock(&ci->i_ceph_lock); 609 dout(" dir %p complete, -ENOENT\n", dir); 610 d_add(dentry, NULL); 611 di->lease_shared_gen = ci->i_shared_gen; 612 return NULL; 613 } 614 spin_unlock(&ci->i_ceph_lock); 615 } 616 617 op = ceph_snap(dir) == CEPH_SNAPDIR ? 618 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 619 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 620 if (IS_ERR(req)) 621 return ERR_CAST(req); 622 req->r_dentry = dget(dentry); 623 req->r_num_caps = 2; 624 /* we only need inode linkage */ 625 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 626 req->r_locked_dir = dir; 627 err = ceph_mdsc_do_request(mdsc, NULL, req); 628 err = ceph_handle_snapdir(req, dentry, err); 629 dentry = ceph_finish_lookup(req, dentry, err); 630 ceph_mdsc_put_request(req); /* will dput(dentry) */ 631 dout("lookup result=%p\n", dentry); 632 return dentry; 633 } 634 635 /* 636 * If we do a create but get no trace back from the MDS, follow up with 637 * a lookup (the VFS expects us to link up the provided dentry). 638 */ 639 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 640 { 641 struct dentry *result = ceph_lookup(dir, dentry, 0); 642 643 if (result && !IS_ERR(result)) { 644 /* 645 * We created the item, then did a lookup, and found 646 * it was already linked to another inode we already 647 * had in our cache (and thus got spliced). Link our 648 * dentry to that inode, but don't hash it, just in 649 * case the VFS wants to dereference it. 650 */ 651 BUG_ON(!result->d_inode); 652 d_instantiate(dentry, result->d_inode); 653 return 0; 654 } 655 return PTR_ERR(result); 656 } 657 658 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 659 umode_t mode, dev_t rdev) 660 { 661 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 662 struct ceph_mds_client *mdsc = fsc->mdsc; 663 struct ceph_mds_request *req; 664 int err; 665 666 if (ceph_snap(dir) != CEPH_NOSNAP) 667 return -EROFS; 668 669 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 670 dir, dentry, mode, rdev); 671 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 672 if (IS_ERR(req)) { 673 d_drop(dentry); 674 return PTR_ERR(req); 675 } 676 req->r_dentry = dget(dentry); 677 req->r_num_caps = 2; 678 req->r_locked_dir = dir; 679 req->r_args.mknod.mode = cpu_to_le32(mode); 680 req->r_args.mknod.rdev = cpu_to_le32(rdev); 681 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 682 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 683 err = ceph_mdsc_do_request(mdsc, dir, req); 684 if (!err && !req->r_reply_info.head->is_dentry) 685 err = ceph_handle_notrace_create(dir, dentry); 686 ceph_mdsc_put_request(req); 687 if (err) 688 d_drop(dentry); 689 return err; 690 } 691 692 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 693 bool excl) 694 { 695 return ceph_mknod(dir, dentry, mode, 0); 696 } 697 698 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 699 const char *dest) 700 { 701 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 702 struct ceph_mds_client *mdsc = fsc->mdsc; 703 struct ceph_mds_request *req; 704 int err; 705 706 if (ceph_snap(dir) != CEPH_NOSNAP) 707 return -EROFS; 708 709 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 710 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 711 if (IS_ERR(req)) { 712 d_drop(dentry); 713 return PTR_ERR(req); 714 } 715 req->r_dentry = dget(dentry); 716 req->r_num_caps = 2; 717 req->r_path2 = kstrdup(dest, GFP_NOFS); 718 req->r_locked_dir = dir; 719 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 720 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 721 err = ceph_mdsc_do_request(mdsc, dir, req); 722 if (!err && !req->r_reply_info.head->is_dentry) 723 err = ceph_handle_notrace_create(dir, dentry); 724 ceph_mdsc_put_request(req); 725 if (err) 726 d_drop(dentry); 727 return err; 728 } 729 730 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 731 { 732 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 733 struct ceph_mds_client *mdsc = fsc->mdsc; 734 struct ceph_mds_request *req; 735 int err = -EROFS; 736 int op; 737 738 if (ceph_snap(dir) == CEPH_SNAPDIR) { 739 /* mkdir .snap/foo is a MKSNAP */ 740 op = CEPH_MDS_OP_MKSNAP; 741 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 742 dentry->d_name.len, dentry->d_name.name, dentry); 743 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 744 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 745 op = CEPH_MDS_OP_MKDIR; 746 } else { 747 goto out; 748 } 749 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 750 if (IS_ERR(req)) { 751 err = PTR_ERR(req); 752 goto out; 753 } 754 755 req->r_dentry = dget(dentry); 756 req->r_num_caps = 2; 757 req->r_locked_dir = dir; 758 req->r_args.mkdir.mode = cpu_to_le32(mode); 759 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 760 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 761 err = ceph_mdsc_do_request(mdsc, dir, req); 762 if (!err && !req->r_reply_info.head->is_dentry) 763 err = ceph_handle_notrace_create(dir, dentry); 764 ceph_mdsc_put_request(req); 765 out: 766 if (err < 0) 767 d_drop(dentry); 768 return err; 769 } 770 771 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 772 struct dentry *dentry) 773 { 774 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 775 struct ceph_mds_client *mdsc = fsc->mdsc; 776 struct ceph_mds_request *req; 777 int err; 778 779 if (ceph_snap(dir) != CEPH_NOSNAP) 780 return -EROFS; 781 782 dout("link in dir %p old_dentry %p dentry %p\n", dir, 783 old_dentry, dentry); 784 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 785 if (IS_ERR(req)) { 786 d_drop(dentry); 787 return PTR_ERR(req); 788 } 789 req->r_dentry = dget(dentry); 790 req->r_num_caps = 2; 791 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ 792 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); 793 req->r_locked_dir = dir; 794 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 795 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 796 /* release LINK_SHARED on source inode (mds will lock it) */ 797 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 798 err = ceph_mdsc_do_request(mdsc, dir, req); 799 if (err) { 800 d_drop(dentry); 801 } else if (!req->r_reply_info.head->is_dentry) { 802 ihold(old_dentry->d_inode); 803 d_instantiate(dentry, old_dentry->d_inode); 804 } 805 ceph_mdsc_put_request(req); 806 return err; 807 } 808 809 /* 810 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 811 * looks like the link count will hit 0, drop any other caps (other 812 * than PIN) we don't specifically want (due to the file still being 813 * open). 814 */ 815 static int drop_caps_for_unlink(struct inode *inode) 816 { 817 struct ceph_inode_info *ci = ceph_inode(inode); 818 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 819 820 spin_lock(&ci->i_ceph_lock); 821 if (inode->i_nlink == 1) { 822 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 823 ci->i_ceph_flags |= CEPH_I_NODELAY; 824 } 825 spin_unlock(&ci->i_ceph_lock); 826 return drop; 827 } 828 829 /* 830 * rmdir and unlink are differ only by the metadata op code 831 */ 832 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 833 { 834 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 835 struct ceph_mds_client *mdsc = fsc->mdsc; 836 struct inode *inode = dentry->d_inode; 837 struct ceph_mds_request *req; 838 int err = -EROFS; 839 int op; 840 841 if (ceph_snap(dir) == CEPH_SNAPDIR) { 842 /* rmdir .snap/foo is RMSNAP */ 843 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 844 dentry->d_name.name, dentry); 845 op = CEPH_MDS_OP_RMSNAP; 846 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 847 dout("unlink/rmdir dir %p dn %p inode %p\n", 848 dir, dentry, inode); 849 op = S_ISDIR(dentry->d_inode->i_mode) ? 850 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 851 } else 852 goto out; 853 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 854 if (IS_ERR(req)) { 855 err = PTR_ERR(req); 856 goto out; 857 } 858 req->r_dentry = dget(dentry); 859 req->r_num_caps = 2; 860 req->r_locked_dir = dir; 861 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 862 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 863 req->r_inode_drop = drop_caps_for_unlink(inode); 864 err = ceph_mdsc_do_request(mdsc, dir, req); 865 if (!err && !req->r_reply_info.head->is_dentry) 866 d_delete(dentry); 867 ceph_mdsc_put_request(req); 868 out: 869 return err; 870 } 871 872 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 873 struct inode *new_dir, struct dentry *new_dentry) 874 { 875 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 876 struct ceph_mds_client *mdsc = fsc->mdsc; 877 struct ceph_mds_request *req; 878 int err; 879 880 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 881 return -EXDEV; 882 if (ceph_snap(old_dir) != CEPH_NOSNAP || 883 ceph_snap(new_dir) != CEPH_NOSNAP) 884 return -EROFS; 885 dout("rename dir %p dentry %p to dir %p dentry %p\n", 886 old_dir, old_dentry, new_dir, new_dentry); 887 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 888 if (IS_ERR(req)) 889 return PTR_ERR(req); 890 req->r_dentry = dget(new_dentry); 891 req->r_num_caps = 2; 892 req->r_old_dentry = dget(old_dentry); 893 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry); 894 req->r_locked_dir = new_dir; 895 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 896 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 897 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 898 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 899 /* release LINK_RDCACHE on source inode (mds will lock it) */ 900 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 901 if (new_dentry->d_inode) 902 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 903 err = ceph_mdsc_do_request(mdsc, old_dir, req); 904 if (!err && !req->r_reply_info.head->is_dentry) { 905 /* 906 * Normally d_move() is done by fill_trace (called by 907 * do_request, above). If there is no trace, we need 908 * to do it here. 909 */ 910 911 /* d_move screws up d_subdirs order */ 912 ceph_dir_clear_complete(new_dir); 913 914 d_move(old_dentry, new_dentry); 915 916 /* ensure target dentry is invalidated, despite 917 rehashing bug in vfs_rename_dir */ 918 ceph_invalidate_dentry_lease(new_dentry); 919 } 920 ceph_mdsc_put_request(req); 921 return err; 922 } 923 924 /* 925 * Ensure a dentry lease will no longer revalidate. 926 */ 927 void ceph_invalidate_dentry_lease(struct dentry *dentry) 928 { 929 spin_lock(&dentry->d_lock); 930 dentry->d_time = jiffies; 931 ceph_dentry(dentry)->lease_shared_gen = 0; 932 spin_unlock(&dentry->d_lock); 933 } 934 935 /* 936 * Check if dentry lease is valid. If not, delete the lease. Try to 937 * renew if the least is more than half up. 938 */ 939 static int dentry_lease_is_valid(struct dentry *dentry) 940 { 941 struct ceph_dentry_info *di; 942 struct ceph_mds_session *s; 943 int valid = 0; 944 u32 gen; 945 unsigned long ttl; 946 struct ceph_mds_session *session = NULL; 947 struct inode *dir = NULL; 948 u32 seq = 0; 949 950 spin_lock(&dentry->d_lock); 951 di = ceph_dentry(dentry); 952 if (di->lease_session) { 953 s = di->lease_session; 954 spin_lock(&s->s_gen_ttl_lock); 955 gen = s->s_cap_gen; 956 ttl = s->s_cap_ttl; 957 spin_unlock(&s->s_gen_ttl_lock); 958 959 if (di->lease_gen == gen && 960 time_before(jiffies, dentry->d_time) && 961 time_before(jiffies, ttl)) { 962 valid = 1; 963 if (di->lease_renew_after && 964 time_after(jiffies, di->lease_renew_after)) { 965 /* we should renew */ 966 dir = dentry->d_parent->d_inode; 967 session = ceph_get_mds_session(s); 968 seq = di->lease_seq; 969 di->lease_renew_after = 0; 970 di->lease_renew_from = jiffies; 971 } 972 } 973 } 974 spin_unlock(&dentry->d_lock); 975 976 if (session) { 977 ceph_mdsc_lease_send_msg(session, dir, dentry, 978 CEPH_MDS_LEASE_RENEW, seq); 979 ceph_put_mds_session(session); 980 } 981 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 982 return valid; 983 } 984 985 /* 986 * Check if directory-wide content lease/cap is valid. 987 */ 988 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 989 { 990 struct ceph_inode_info *ci = ceph_inode(dir); 991 struct ceph_dentry_info *di = ceph_dentry(dentry); 992 int valid = 0; 993 994 spin_lock(&ci->i_ceph_lock); 995 if (ci->i_shared_gen == di->lease_shared_gen) 996 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 997 spin_unlock(&ci->i_ceph_lock); 998 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 999 dir, (unsigned)ci->i_shared_gen, dentry, 1000 (unsigned)di->lease_shared_gen, valid); 1001 return valid; 1002 } 1003 1004 /* 1005 * Check if cached dentry can be trusted. 1006 */ 1007 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1008 { 1009 int valid = 0; 1010 struct inode *dir; 1011 1012 if (flags & LOOKUP_RCU) 1013 return -ECHILD; 1014 1015 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 1016 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 1017 ceph_dentry(dentry)->offset); 1018 1019 dir = ceph_get_dentry_parent_inode(dentry); 1020 1021 /* always trust cached snapped dentries, snapdir dentry */ 1022 if (ceph_snap(dir) != CEPH_NOSNAP) { 1023 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 1024 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 1025 valid = 1; 1026 } else if (dentry->d_inode && 1027 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) { 1028 valid = 1; 1029 } else if (dentry_lease_is_valid(dentry) || 1030 dir_lease_is_valid(dir, dentry)) { 1031 valid = 1; 1032 } 1033 1034 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1035 if (valid) 1036 ceph_dentry_lru_touch(dentry); 1037 else 1038 d_drop(dentry); 1039 iput(dir); 1040 return valid; 1041 } 1042 1043 /* 1044 * Release our ceph_dentry_info. 1045 */ 1046 static void ceph_d_release(struct dentry *dentry) 1047 { 1048 struct ceph_dentry_info *di = ceph_dentry(dentry); 1049 1050 dout("d_release %p\n", dentry); 1051 ceph_dentry_lru_del(dentry); 1052 if (di->lease_session) 1053 ceph_put_mds_session(di->lease_session); 1054 kmem_cache_free(ceph_dentry_cachep, di); 1055 dentry->d_fsdata = NULL; 1056 } 1057 1058 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1059 unsigned int flags) 1060 { 1061 /* 1062 * Eventually, we'll want to revalidate snapped metadata 1063 * too... probably... 1064 */ 1065 return 1; 1066 } 1067 1068 /* 1069 * When the VFS prunes a dentry from the cache, we need to clear the 1070 * complete flag on the parent directory. 1071 * 1072 * Called under dentry->d_lock. 1073 */ 1074 static void ceph_d_prune(struct dentry *dentry) 1075 { 1076 dout("ceph_d_prune %p\n", dentry); 1077 1078 /* do we have a valid parent? */ 1079 if (IS_ROOT(dentry)) 1080 return; 1081 1082 /* if we are not hashed, we don't affect dir's completeness */ 1083 if (d_unhashed(dentry)) 1084 return; 1085 1086 /* 1087 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1088 * cleared until d_release 1089 */ 1090 ceph_dir_clear_complete(dentry->d_parent->d_inode); 1091 } 1092 1093 /* 1094 * read() on a dir. This weird interface hack only works if mounted 1095 * with '-o dirstat'. 1096 */ 1097 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1098 loff_t *ppos) 1099 { 1100 struct ceph_file_info *cf = file->private_data; 1101 struct inode *inode = file_inode(file); 1102 struct ceph_inode_info *ci = ceph_inode(inode); 1103 int left; 1104 const int bufsize = 1024; 1105 1106 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1107 return -EISDIR; 1108 1109 if (!cf->dir_info) { 1110 cf->dir_info = kmalloc(bufsize, GFP_NOFS); 1111 if (!cf->dir_info) 1112 return -ENOMEM; 1113 cf->dir_info_len = 1114 snprintf(cf->dir_info, bufsize, 1115 "entries: %20lld\n" 1116 " files: %20lld\n" 1117 " subdirs: %20lld\n" 1118 "rentries: %20lld\n" 1119 " rfiles: %20lld\n" 1120 " rsubdirs: %20lld\n" 1121 "rbytes: %20lld\n" 1122 "rctime: %10ld.%09ld\n", 1123 ci->i_files + ci->i_subdirs, 1124 ci->i_files, 1125 ci->i_subdirs, 1126 ci->i_rfiles + ci->i_rsubdirs, 1127 ci->i_rfiles, 1128 ci->i_rsubdirs, 1129 ci->i_rbytes, 1130 (long)ci->i_rctime.tv_sec, 1131 (long)ci->i_rctime.tv_nsec); 1132 } 1133 1134 if (*ppos >= cf->dir_info_len) 1135 return 0; 1136 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1137 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1138 if (left == size) 1139 return -EFAULT; 1140 *ppos += (size - left); 1141 return size - left; 1142 } 1143 1144 /* 1145 * an fsync() on a dir will wait for any uncommitted directory 1146 * operations to commit. 1147 */ 1148 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, 1149 int datasync) 1150 { 1151 struct inode *inode = file_inode(file); 1152 struct ceph_inode_info *ci = ceph_inode(inode); 1153 struct list_head *head = &ci->i_unsafe_dirops; 1154 struct ceph_mds_request *req; 1155 u64 last_tid; 1156 int ret = 0; 1157 1158 dout("dir_fsync %p\n", inode); 1159 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 1160 if (ret) 1161 return ret; 1162 mutex_lock(&inode->i_mutex); 1163 1164 spin_lock(&ci->i_unsafe_lock); 1165 if (list_empty(head)) 1166 goto out; 1167 1168 req = list_entry(head->prev, 1169 struct ceph_mds_request, r_unsafe_dir_item); 1170 last_tid = req->r_tid; 1171 1172 do { 1173 ceph_mdsc_get_request(req); 1174 spin_unlock(&ci->i_unsafe_lock); 1175 1176 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1177 inode, req->r_tid, last_tid); 1178 if (req->r_timeout) { 1179 ret = wait_for_completion_timeout( 1180 &req->r_safe_completion, req->r_timeout); 1181 if (ret > 0) 1182 ret = 0; 1183 else if (ret == 0) 1184 ret = -EIO; /* timed out */ 1185 } else { 1186 wait_for_completion(&req->r_safe_completion); 1187 } 1188 ceph_mdsc_put_request(req); 1189 1190 spin_lock(&ci->i_unsafe_lock); 1191 if (ret || list_empty(head)) 1192 break; 1193 req = list_entry(head->next, 1194 struct ceph_mds_request, r_unsafe_dir_item); 1195 } while (req->r_tid < last_tid); 1196 out: 1197 spin_unlock(&ci->i_unsafe_lock); 1198 mutex_unlock(&inode->i_mutex); 1199 1200 return ret; 1201 } 1202 1203 /* 1204 * We maintain a private dentry LRU. 1205 * 1206 * FIXME: this needs to be changed to a per-mds lru to be useful. 1207 */ 1208 void ceph_dentry_lru_add(struct dentry *dn) 1209 { 1210 struct ceph_dentry_info *di = ceph_dentry(dn); 1211 struct ceph_mds_client *mdsc; 1212 1213 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1214 dn->d_name.len, dn->d_name.name); 1215 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1216 spin_lock(&mdsc->dentry_lru_lock); 1217 list_add_tail(&di->lru, &mdsc->dentry_lru); 1218 mdsc->num_dentry++; 1219 spin_unlock(&mdsc->dentry_lru_lock); 1220 } 1221 1222 void ceph_dentry_lru_touch(struct dentry *dn) 1223 { 1224 struct ceph_dentry_info *di = ceph_dentry(dn); 1225 struct ceph_mds_client *mdsc; 1226 1227 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1228 dn->d_name.len, dn->d_name.name, di->offset); 1229 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1230 spin_lock(&mdsc->dentry_lru_lock); 1231 list_move_tail(&di->lru, &mdsc->dentry_lru); 1232 spin_unlock(&mdsc->dentry_lru_lock); 1233 } 1234 1235 void ceph_dentry_lru_del(struct dentry *dn) 1236 { 1237 struct ceph_dentry_info *di = ceph_dentry(dn); 1238 struct ceph_mds_client *mdsc; 1239 1240 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1241 dn->d_name.len, dn->d_name.name); 1242 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1243 spin_lock(&mdsc->dentry_lru_lock); 1244 list_del_init(&di->lru); 1245 mdsc->num_dentry--; 1246 spin_unlock(&mdsc->dentry_lru_lock); 1247 } 1248 1249 /* 1250 * Return name hash for a given dentry. This is dependent on 1251 * the parent directory's hash function. 1252 */ 1253 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1254 { 1255 struct ceph_inode_info *dci = ceph_inode(dir); 1256 1257 switch (dci->i_dir_layout.dl_dir_hash) { 1258 case 0: /* for backward compat */ 1259 case CEPH_STR_HASH_LINUX: 1260 return dn->d_name.hash; 1261 1262 default: 1263 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1264 dn->d_name.name, dn->d_name.len); 1265 } 1266 } 1267 1268 const struct file_operations ceph_dir_fops = { 1269 .read = ceph_read_dir, 1270 .iterate = ceph_readdir, 1271 .llseek = ceph_dir_llseek, 1272 .open = ceph_open, 1273 .release = ceph_release, 1274 .unlocked_ioctl = ceph_ioctl, 1275 .fsync = ceph_dir_fsync, 1276 }; 1277 1278 const struct inode_operations ceph_dir_iops = { 1279 .lookup = ceph_lookup, 1280 .permission = ceph_permission, 1281 .getattr = ceph_getattr, 1282 .setattr = ceph_setattr, 1283 .setxattr = ceph_setxattr, 1284 .getxattr = ceph_getxattr, 1285 .listxattr = ceph_listxattr, 1286 .removexattr = ceph_removexattr, 1287 .mknod = ceph_mknod, 1288 .symlink = ceph_symlink, 1289 .mkdir = ceph_mkdir, 1290 .link = ceph_link, 1291 .unlink = ceph_unlink, 1292 .rmdir = ceph_unlink, 1293 .rename = ceph_rename, 1294 .create = ceph_create, 1295 .atomic_open = ceph_atomic_open, 1296 }; 1297 1298 const struct dentry_operations ceph_dentry_ops = { 1299 .d_revalidate = ceph_d_revalidate, 1300 .d_release = ceph_d_release, 1301 .d_prune = ceph_d_prune, 1302 }; 1303 1304 const struct dentry_operations ceph_snapdir_dentry_ops = { 1305 .d_revalidate = ceph_snapdir_d_revalidate, 1306 .d_release = ceph_d_release, 1307 }; 1308 1309 const struct dentry_operations ceph_snap_dentry_ops = { 1310 .d_release = ceph_d_release, 1311 .d_prune = ceph_d_prune, 1312 }; 1313