1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/spinlock.h> 5 #include <linux/fs_struct.h> 6 #include <linux/namei.h> 7 #include <linux/slab.h> 8 #include <linux/sched.h> 9 #include <linux/xattr.h> 10 11 #include "super.h" 12 #include "mds_client.h" 13 14 /* 15 * Directory operations: readdir, lookup, create, link, unlink, 16 * rename, etc. 17 */ 18 19 /* 20 * Ceph MDS operations are specified in terms of a base ino and 21 * relative path. Thus, the client can specify an operation on a 22 * specific inode (e.g., a getattr due to fstat(2)), or as a path 23 * relative to, say, the root directory. 24 * 25 * Normally, we limit ourselves to strict inode ops (no path component) 26 * or dentry operations (a single path component relative to an ino). The 27 * exception to this is open_root_dentry(), which will open the mount 28 * point by name. 29 */ 30 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 static int ceph_d_init(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL); 41 if (!di) 42 return -ENOMEM; /* oh well */ 43 44 di->dentry = dentry; 45 di->lease_session = NULL; 46 di->time = jiffies; 47 dentry->d_fsdata = di; 48 ceph_dentry_lru_add(dentry); 49 return 0; 50 } 51 52 /* 53 * for f_pos for readdir: 54 * - hash order: 55 * (0xff << 52) | ((24 bits hash) << 28) | 56 * (the nth entry has hash collision); 57 * - frag+name order; 58 * ((frag value) << 28) | (the nth entry in frag); 59 */ 60 #define OFFSET_BITS 28 61 #define OFFSET_MASK ((1 << OFFSET_BITS) - 1) 62 #define HASH_ORDER (0xffull << (OFFSET_BITS + 24)) 63 loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order) 64 { 65 loff_t fpos = ((loff_t)high << 28) | (loff_t)off; 66 if (hash_order) 67 fpos |= HASH_ORDER; 68 return fpos; 69 } 70 71 static bool is_hash_order(loff_t p) 72 { 73 return (p & HASH_ORDER) == HASH_ORDER; 74 } 75 76 static unsigned fpos_frag(loff_t p) 77 { 78 return p >> OFFSET_BITS; 79 } 80 81 static unsigned fpos_hash(loff_t p) 82 { 83 return ceph_frag_value(fpos_frag(p)); 84 } 85 86 static unsigned fpos_off(loff_t p) 87 { 88 return p & OFFSET_MASK; 89 } 90 91 static int fpos_cmp(loff_t l, loff_t r) 92 { 93 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 94 if (v) 95 return v; 96 return (int)(fpos_off(l) - fpos_off(r)); 97 } 98 99 /* 100 * make note of the last dentry we read, so we can 101 * continue at the same lexicographical point, 102 * regardless of what dir changes take place on the 103 * server. 104 */ 105 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 106 int len, unsigned next_offset) 107 { 108 char *buf = kmalloc(len+1, GFP_KERNEL); 109 if (!buf) 110 return -ENOMEM; 111 kfree(fi->last_name); 112 fi->last_name = buf; 113 memcpy(fi->last_name, name, len); 114 fi->last_name[len] = 0; 115 fi->next_offset = next_offset; 116 dout("note_last_dentry '%s'\n", fi->last_name); 117 return 0; 118 } 119 120 121 static struct dentry * 122 __dcache_find_get_entry(struct dentry *parent, u64 idx, 123 struct ceph_readdir_cache_control *cache_ctl) 124 { 125 struct inode *dir = d_inode(parent); 126 struct dentry *dentry; 127 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1; 128 loff_t ptr_pos = idx * sizeof(struct dentry *); 129 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT; 130 131 if (ptr_pos >= i_size_read(dir)) 132 return NULL; 133 134 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) { 135 ceph_readdir_cache_release(cache_ctl); 136 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff); 137 if (!cache_ctl->page) { 138 dout(" page %lu not found\n", ptr_pgoff); 139 return ERR_PTR(-EAGAIN); 140 } 141 /* reading/filling the cache are serialized by 142 i_mutex, no need to use page lock */ 143 unlock_page(cache_ctl->page); 144 cache_ctl->dentries = kmap(cache_ctl->page); 145 } 146 147 cache_ctl->index = idx & idx_mask; 148 149 rcu_read_lock(); 150 spin_lock(&parent->d_lock); 151 /* check i_size again here, because empty directory can be 152 * marked as complete while not holding the i_mutex. */ 153 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir)) 154 dentry = cache_ctl->dentries[cache_ctl->index]; 155 else 156 dentry = NULL; 157 spin_unlock(&parent->d_lock); 158 if (dentry && !lockref_get_not_dead(&dentry->d_lockref)) 159 dentry = NULL; 160 rcu_read_unlock(); 161 return dentry ? : ERR_PTR(-EAGAIN); 162 } 163 164 /* 165 * When possible, we try to satisfy a readdir by peeking at the 166 * dcache. We make this work by carefully ordering dentries on 167 * d_child when we initially get results back from the MDS, and 168 * falling back to a "normal" sync readdir if any dentries in the dir 169 * are dropped. 170 * 171 * Complete dir indicates that we have all dentries in the dir. It is 172 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 173 * the MDS if/when the directory is modified). 174 */ 175 static int __dcache_readdir(struct file *file, struct dir_context *ctx, 176 int shared_gen) 177 { 178 struct ceph_file_info *fi = file->private_data; 179 struct dentry *parent = file->f_path.dentry; 180 struct inode *dir = d_inode(parent); 181 struct dentry *dentry, *last = NULL; 182 struct ceph_dentry_info *di; 183 struct ceph_readdir_cache_control cache_ctl = {}; 184 u64 idx = 0; 185 int err = 0; 186 187 dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos); 188 189 /* search start position */ 190 if (ctx->pos > 2) { 191 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *)); 192 while (count > 0) { 193 u64 step = count >> 1; 194 dentry = __dcache_find_get_entry(parent, idx + step, 195 &cache_ctl); 196 if (!dentry) { 197 /* use linar search */ 198 idx = 0; 199 break; 200 } 201 if (IS_ERR(dentry)) { 202 err = PTR_ERR(dentry); 203 goto out; 204 } 205 di = ceph_dentry(dentry); 206 spin_lock(&dentry->d_lock); 207 if (fpos_cmp(di->offset, ctx->pos) < 0) { 208 idx += step + 1; 209 count -= step + 1; 210 } else { 211 count = step; 212 } 213 spin_unlock(&dentry->d_lock); 214 dput(dentry); 215 } 216 217 dout("__dcache_readdir %p cache idx %llu\n", dir, idx); 218 } 219 220 221 for (;;) { 222 bool emit_dentry = false; 223 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl); 224 if (!dentry) { 225 fi->flags |= CEPH_F_ATEND; 226 err = 0; 227 break; 228 } 229 if (IS_ERR(dentry)) { 230 err = PTR_ERR(dentry); 231 goto out; 232 } 233 234 di = ceph_dentry(dentry); 235 spin_lock(&dentry->d_lock); 236 if (di->lease_shared_gen == shared_gen && 237 d_really_is_positive(dentry) && 238 fpos_cmp(ctx->pos, di->offset) <= 0) { 239 emit_dentry = true; 240 } 241 spin_unlock(&dentry->d_lock); 242 243 if (emit_dentry) { 244 dout(" %llx dentry %p %pd %p\n", di->offset, 245 dentry, dentry, d_inode(dentry)); 246 ctx->pos = di->offset; 247 if (!dir_emit(ctx, dentry->d_name.name, 248 dentry->d_name.len, 249 ceph_translate_ino(dentry->d_sb, 250 d_inode(dentry)->i_ino), 251 d_inode(dentry)->i_mode >> 12)) { 252 dput(dentry); 253 err = 0; 254 break; 255 } 256 ctx->pos++; 257 258 if (last) 259 dput(last); 260 last = dentry; 261 } else { 262 dput(dentry); 263 } 264 } 265 out: 266 ceph_readdir_cache_release(&cache_ctl); 267 if (last) { 268 int ret; 269 di = ceph_dentry(last); 270 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len, 271 fpos_off(di->offset) + 1); 272 if (ret < 0) 273 err = ret; 274 dput(last); 275 /* last_name no longer match cache index */ 276 if (fi->readdir_cache_idx >= 0) { 277 fi->readdir_cache_idx = -1; 278 fi->dir_release_count = 0; 279 } 280 } 281 return err; 282 } 283 284 static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos) 285 { 286 if (!fi->last_readdir) 287 return true; 288 if (is_hash_order(pos)) 289 return !ceph_frag_contains_value(fi->frag, fpos_hash(pos)); 290 else 291 return fi->frag != fpos_frag(pos); 292 } 293 294 static int ceph_readdir(struct file *file, struct dir_context *ctx) 295 { 296 struct ceph_file_info *fi = file->private_data; 297 struct inode *inode = file_inode(file); 298 struct ceph_inode_info *ci = ceph_inode(inode); 299 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 300 struct ceph_mds_client *mdsc = fsc->mdsc; 301 int i; 302 int err; 303 unsigned frag = -1; 304 struct ceph_mds_reply_info_parsed *rinfo; 305 306 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos); 307 if (fi->flags & CEPH_F_ATEND) 308 return 0; 309 310 /* always start with . and .. */ 311 if (ctx->pos == 0) { 312 dout("readdir off 0 -> '.'\n"); 313 if (!dir_emit(ctx, ".", 1, 314 ceph_translate_ino(inode->i_sb, inode->i_ino), 315 inode->i_mode >> 12)) 316 return 0; 317 ctx->pos = 1; 318 } 319 if (ctx->pos == 1) { 320 ino_t ino = parent_ino(file->f_path.dentry); 321 dout("readdir off 1 -> '..'\n"); 322 if (!dir_emit(ctx, "..", 2, 323 ceph_translate_ino(inode->i_sb, ino), 324 inode->i_mode >> 12)) 325 return 0; 326 ctx->pos = 2; 327 } 328 329 /* can we use the dcache? */ 330 spin_lock(&ci->i_ceph_lock); 331 if (ceph_test_mount_opt(fsc, DCACHE) && 332 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 333 ceph_snap(inode) != CEPH_SNAPDIR && 334 __ceph_dir_is_complete_ordered(ci) && 335 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 336 int shared_gen = atomic_read(&ci->i_shared_gen); 337 spin_unlock(&ci->i_ceph_lock); 338 err = __dcache_readdir(file, ctx, shared_gen); 339 if (err != -EAGAIN) 340 return err; 341 } else { 342 spin_unlock(&ci->i_ceph_lock); 343 } 344 345 /* proceed with a normal readdir */ 346 more: 347 /* do we have the correct frag content buffered? */ 348 if (need_send_readdir(fi, ctx->pos)) { 349 struct ceph_mds_request *req; 350 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 351 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 352 353 /* discard old result, if any */ 354 if (fi->last_readdir) { 355 ceph_mdsc_put_request(fi->last_readdir); 356 fi->last_readdir = NULL; 357 } 358 359 if (is_hash_order(ctx->pos)) { 360 /* fragtree isn't always accurate. choose frag 361 * based on previous reply when possible. */ 362 if (frag == (unsigned)-1) 363 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos), 364 NULL, NULL); 365 } else { 366 frag = fpos_frag(ctx->pos); 367 } 368 369 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 370 ceph_vinop(inode), frag, fi->last_name); 371 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 372 if (IS_ERR(req)) 373 return PTR_ERR(req); 374 err = ceph_alloc_readdir_reply_buffer(req, inode); 375 if (err) { 376 ceph_mdsc_put_request(req); 377 return err; 378 } 379 /* hints to request -> mds selection code */ 380 req->r_direct_mode = USE_AUTH_MDS; 381 if (op == CEPH_MDS_OP_READDIR) { 382 req->r_direct_hash = ceph_frag_value(frag); 383 __set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); 384 req->r_inode_drop = CEPH_CAP_FILE_EXCL; 385 } 386 if (fi->last_name) { 387 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL); 388 if (!req->r_path2) { 389 ceph_mdsc_put_request(req); 390 return -ENOMEM; 391 } 392 } else if (is_hash_order(ctx->pos)) { 393 req->r_args.readdir.offset_hash = 394 cpu_to_le32(fpos_hash(ctx->pos)); 395 } 396 397 req->r_dir_release_cnt = fi->dir_release_count; 398 req->r_dir_ordered_cnt = fi->dir_ordered_count; 399 req->r_readdir_cache_idx = fi->readdir_cache_idx; 400 req->r_readdir_offset = fi->next_offset; 401 req->r_args.readdir.frag = cpu_to_le32(frag); 402 req->r_args.readdir.flags = 403 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS); 404 405 req->r_inode = inode; 406 ihold(inode); 407 req->r_dentry = dget(file->f_path.dentry); 408 err = ceph_mdsc_do_request(mdsc, NULL, req); 409 if (err < 0) { 410 ceph_mdsc_put_request(req); 411 return err; 412 } 413 dout("readdir got and parsed readdir result=%d on " 414 "frag %x, end=%d, complete=%d, hash_order=%d\n", 415 err, frag, 416 (int)req->r_reply_info.dir_end, 417 (int)req->r_reply_info.dir_complete, 418 (int)req->r_reply_info.hash_order); 419 420 rinfo = &req->r_reply_info; 421 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { 422 frag = le32_to_cpu(rinfo->dir_dir->frag); 423 if (!rinfo->hash_order) { 424 fi->next_offset = req->r_readdir_offset; 425 /* adjust ctx->pos to beginning of frag */ 426 ctx->pos = ceph_make_fpos(frag, 427 fi->next_offset, 428 false); 429 } 430 } 431 432 fi->frag = frag; 433 fi->last_readdir = req; 434 435 if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) { 436 fi->readdir_cache_idx = req->r_readdir_cache_idx; 437 if (fi->readdir_cache_idx < 0) { 438 /* preclude from marking dir ordered */ 439 fi->dir_ordered_count = 0; 440 } else if (ceph_frag_is_leftmost(frag) && 441 fi->next_offset == 2) { 442 /* note dir version at start of readdir so 443 * we can tell if any dentries get dropped */ 444 fi->dir_release_count = req->r_dir_release_cnt; 445 fi->dir_ordered_count = req->r_dir_ordered_cnt; 446 } 447 } else { 448 dout("readdir !did_prepopulate"); 449 /* disable readdir cache */ 450 fi->readdir_cache_idx = -1; 451 /* preclude from marking dir complete */ 452 fi->dir_release_count = 0; 453 } 454 455 /* note next offset and last dentry name */ 456 if (rinfo->dir_nr > 0) { 457 struct ceph_mds_reply_dir_entry *rde = 458 rinfo->dir_entries + (rinfo->dir_nr-1); 459 unsigned next_offset = req->r_reply_info.dir_end ? 460 2 : (fpos_off(rde->offset) + 1); 461 err = note_last_dentry(fi, rde->name, rde->name_len, 462 next_offset); 463 if (err) 464 return err; 465 } else if (req->r_reply_info.dir_end) { 466 fi->next_offset = 2; 467 /* keep last name */ 468 } 469 } 470 471 rinfo = &fi->last_readdir->r_reply_info; 472 dout("readdir frag %x num %d pos %llx chunk first %llx\n", 473 fi->frag, rinfo->dir_nr, ctx->pos, 474 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL); 475 476 i = 0; 477 /* search start position */ 478 if (rinfo->dir_nr > 0) { 479 int step, nr = rinfo->dir_nr; 480 while (nr > 0) { 481 step = nr >> 1; 482 if (rinfo->dir_entries[i + step].offset < ctx->pos) { 483 i += step + 1; 484 nr -= step + 1; 485 } else { 486 nr = step; 487 } 488 } 489 } 490 for (; i < rinfo->dir_nr; i++) { 491 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 492 struct ceph_vino vino; 493 ino_t ino; 494 u32 ftype; 495 496 BUG_ON(rde->offset < ctx->pos); 497 498 ctx->pos = rde->offset; 499 dout("readdir (%d/%d) -> %llx '%.*s' %p\n", 500 i, rinfo->dir_nr, ctx->pos, 501 rde->name_len, rde->name, &rde->inode.in); 502 503 BUG_ON(!rde->inode.in); 504 ftype = le32_to_cpu(rde->inode.in->mode) >> 12; 505 vino.ino = le64_to_cpu(rde->inode.in->ino); 506 vino.snap = le64_to_cpu(rde->inode.in->snapid); 507 ino = ceph_vino_to_ino(vino); 508 509 if (!dir_emit(ctx, rde->name, rde->name_len, 510 ceph_translate_ino(inode->i_sb, ino), ftype)) { 511 dout("filldir stopping us...\n"); 512 return 0; 513 } 514 ctx->pos++; 515 } 516 517 ceph_mdsc_put_request(fi->last_readdir); 518 fi->last_readdir = NULL; 519 520 if (fi->next_offset > 2) { 521 frag = fi->frag; 522 goto more; 523 } 524 525 /* more frags? */ 526 if (!ceph_frag_is_rightmost(fi->frag)) { 527 frag = ceph_frag_next(fi->frag); 528 if (is_hash_order(ctx->pos)) { 529 loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag), 530 fi->next_offset, true); 531 if (new_pos > ctx->pos) 532 ctx->pos = new_pos; 533 /* keep last_name */ 534 } else { 535 ctx->pos = ceph_make_fpos(frag, fi->next_offset, false); 536 kfree(fi->last_name); 537 fi->last_name = NULL; 538 } 539 dout("readdir next frag is %x\n", frag); 540 goto more; 541 } 542 fi->flags |= CEPH_F_ATEND; 543 544 /* 545 * if dir_release_count still matches the dir, no dentries 546 * were released during the whole readdir, and we should have 547 * the complete dir contents in our cache. 548 */ 549 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) { 550 spin_lock(&ci->i_ceph_lock); 551 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) { 552 dout(" marking %p complete and ordered\n", inode); 553 /* use i_size to track number of entries in 554 * readdir cache */ 555 BUG_ON(fi->readdir_cache_idx < 0); 556 i_size_write(inode, fi->readdir_cache_idx * 557 sizeof(struct dentry*)); 558 } else { 559 dout(" marking %p complete\n", inode); 560 } 561 __ceph_dir_set_complete(ci, fi->dir_release_count, 562 fi->dir_ordered_count); 563 spin_unlock(&ci->i_ceph_lock); 564 } 565 566 dout("readdir %p file %p done.\n", inode, file); 567 return 0; 568 } 569 570 static void reset_readdir(struct ceph_file_info *fi) 571 { 572 if (fi->last_readdir) { 573 ceph_mdsc_put_request(fi->last_readdir); 574 fi->last_readdir = NULL; 575 } 576 kfree(fi->last_name); 577 fi->last_name = NULL; 578 fi->dir_release_count = 0; 579 fi->readdir_cache_idx = -1; 580 fi->next_offset = 2; /* compensate for . and .. */ 581 fi->flags &= ~CEPH_F_ATEND; 582 } 583 584 /* 585 * discard buffered readdir content on seekdir(0), or seek to new frag, 586 * or seek prior to current chunk 587 */ 588 static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos) 589 { 590 struct ceph_mds_reply_info_parsed *rinfo; 591 loff_t chunk_offset; 592 if (new_pos == 0) 593 return true; 594 if (is_hash_order(new_pos)) { 595 /* no need to reset last_name for a forward seek when 596 * dentries are sotred in hash order */ 597 } else if (fi->frag != fpos_frag(new_pos)) { 598 return true; 599 } 600 rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL; 601 if (!rinfo || !rinfo->dir_nr) 602 return true; 603 chunk_offset = rinfo->dir_entries[0].offset; 604 return new_pos < chunk_offset || 605 is_hash_order(new_pos) != is_hash_order(chunk_offset); 606 } 607 608 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 609 { 610 struct ceph_file_info *fi = file->private_data; 611 struct inode *inode = file->f_mapping->host; 612 loff_t retval; 613 614 inode_lock(inode); 615 retval = -EINVAL; 616 switch (whence) { 617 case SEEK_CUR: 618 offset += file->f_pos; 619 case SEEK_SET: 620 break; 621 case SEEK_END: 622 retval = -EOPNOTSUPP; 623 default: 624 goto out; 625 } 626 627 if (offset >= 0) { 628 if (need_reset_readdir(fi, offset)) { 629 dout("dir_llseek dropping %p content\n", file); 630 reset_readdir(fi); 631 } else if (is_hash_order(offset) && offset > file->f_pos) { 632 /* for hash offset, we don't know if a forward seek 633 * is within same frag */ 634 fi->dir_release_count = 0; 635 fi->readdir_cache_idx = -1; 636 } 637 638 if (offset != file->f_pos) { 639 file->f_pos = offset; 640 file->f_version = 0; 641 fi->flags &= ~CEPH_F_ATEND; 642 } 643 retval = offset; 644 } 645 out: 646 inode_unlock(inode); 647 return retval; 648 } 649 650 /* 651 * Handle lookups for the hidden .snap directory. 652 */ 653 int ceph_handle_snapdir(struct ceph_mds_request *req, 654 struct dentry *dentry, int err) 655 { 656 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 657 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */ 658 659 /* .snap dir? */ 660 if (err == -ENOENT && 661 ceph_snap(parent) == CEPH_NOSNAP && 662 strcmp(dentry->d_name.name, 663 fsc->mount_options->snapdir_name) == 0) { 664 struct inode *inode = ceph_get_snapdir(parent); 665 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n", 666 dentry, dentry, inode); 667 BUG_ON(!d_unhashed(dentry)); 668 d_add(dentry, inode); 669 err = 0; 670 } 671 return err; 672 } 673 674 /* 675 * Figure out final result of a lookup/open request. 676 * 677 * Mainly, make sure we return the final req->r_dentry (if it already 678 * existed) in place of the original VFS-provided dentry when they 679 * differ. 680 * 681 * Gracefully handle the case where the MDS replies with -ENOENT and 682 * no trace (which it may do, at its discretion, e.g., if it doesn't 683 * care to issue a lease on the negative dentry). 684 */ 685 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 686 struct dentry *dentry, int err) 687 { 688 if (err == -ENOENT) { 689 /* no trace? */ 690 err = 0; 691 if (!req->r_reply_info.head->is_dentry) { 692 dout("ENOENT and no trace, dentry %p inode %p\n", 693 dentry, d_inode(dentry)); 694 if (d_really_is_positive(dentry)) { 695 d_drop(dentry); 696 err = -ENOENT; 697 } else { 698 d_add(dentry, NULL); 699 } 700 } 701 } 702 if (err) 703 dentry = ERR_PTR(err); 704 else if (dentry != req->r_dentry) 705 dentry = dget(req->r_dentry); /* we got spliced */ 706 else 707 dentry = NULL; 708 return dentry; 709 } 710 711 static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 712 { 713 return ceph_ino(inode) == CEPH_INO_ROOT && 714 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 715 } 716 717 /* 718 * Look up a single dir entry. If there is a lookup intent, inform 719 * the MDS so that it gets our 'caps wanted' value in a single op. 720 */ 721 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 722 unsigned int flags) 723 { 724 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 725 struct ceph_mds_client *mdsc = fsc->mdsc; 726 struct ceph_mds_request *req; 727 int op; 728 int mask; 729 int err; 730 731 dout("lookup %p dentry %p '%pd'\n", 732 dir, dentry, dentry); 733 734 if (dentry->d_name.len > NAME_MAX) 735 return ERR_PTR(-ENAMETOOLONG); 736 737 /* can we conclude ENOENT locally? */ 738 if (d_really_is_negative(dentry)) { 739 struct ceph_inode_info *ci = ceph_inode(dir); 740 struct ceph_dentry_info *di = ceph_dentry(dentry); 741 742 spin_lock(&ci->i_ceph_lock); 743 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 744 if (strncmp(dentry->d_name.name, 745 fsc->mount_options->snapdir_name, 746 dentry->d_name.len) && 747 !is_root_ceph_dentry(dir, dentry) && 748 ceph_test_mount_opt(fsc, DCACHE) && 749 __ceph_dir_is_complete(ci) && 750 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 751 spin_unlock(&ci->i_ceph_lock); 752 dout(" dir %p complete, -ENOENT\n", dir); 753 d_add(dentry, NULL); 754 di->lease_shared_gen = atomic_read(&ci->i_shared_gen); 755 return NULL; 756 } 757 spin_unlock(&ci->i_ceph_lock); 758 } 759 760 op = ceph_snap(dir) == CEPH_SNAPDIR ? 761 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 762 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 763 if (IS_ERR(req)) 764 return ERR_CAST(req); 765 req->r_dentry = dget(dentry); 766 req->r_num_caps = 2; 767 768 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 769 if (ceph_security_xattr_wanted(dir)) 770 mask |= CEPH_CAP_XATTR_SHARED; 771 req->r_args.getattr.mask = cpu_to_le32(mask); 772 773 req->r_parent = dir; 774 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 775 err = ceph_mdsc_do_request(mdsc, NULL, req); 776 err = ceph_handle_snapdir(req, dentry, err); 777 dentry = ceph_finish_lookup(req, dentry, err); 778 ceph_mdsc_put_request(req); /* will dput(dentry) */ 779 dout("lookup result=%p\n", dentry); 780 return dentry; 781 } 782 783 /* 784 * If we do a create but get no trace back from the MDS, follow up with 785 * a lookup (the VFS expects us to link up the provided dentry). 786 */ 787 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 788 { 789 struct dentry *result = ceph_lookup(dir, dentry, 0); 790 791 if (result && !IS_ERR(result)) { 792 /* 793 * We created the item, then did a lookup, and found 794 * it was already linked to another inode we already 795 * had in our cache (and thus got spliced). To not 796 * confuse VFS (especially when inode is a directory), 797 * we don't link our dentry to that inode, return an 798 * error instead. 799 * 800 * This event should be rare and it happens only when 801 * we talk to old MDS. Recent MDS does not send traceless 802 * reply for request that creates new inode. 803 */ 804 d_drop(result); 805 return -ESTALE; 806 } 807 return PTR_ERR(result); 808 } 809 810 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 811 umode_t mode, dev_t rdev) 812 { 813 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 814 struct ceph_mds_client *mdsc = fsc->mdsc; 815 struct ceph_mds_request *req; 816 struct ceph_acls_info acls = {}; 817 int err; 818 819 if (ceph_snap(dir) != CEPH_NOSNAP) 820 return -EROFS; 821 822 err = ceph_pre_init_acls(dir, &mode, &acls); 823 if (err < 0) 824 return err; 825 826 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 827 dir, dentry, mode, rdev); 828 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 829 if (IS_ERR(req)) { 830 err = PTR_ERR(req); 831 goto out; 832 } 833 req->r_dentry = dget(dentry); 834 req->r_num_caps = 2; 835 req->r_parent = dir; 836 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 837 req->r_args.mknod.mode = cpu_to_le32(mode); 838 req->r_args.mknod.rdev = cpu_to_le32(rdev); 839 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 840 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 841 if (acls.pagelist) { 842 req->r_pagelist = acls.pagelist; 843 acls.pagelist = NULL; 844 } 845 err = ceph_mdsc_do_request(mdsc, dir, req); 846 if (!err && !req->r_reply_info.head->is_dentry) 847 err = ceph_handle_notrace_create(dir, dentry); 848 ceph_mdsc_put_request(req); 849 out: 850 if (!err) 851 ceph_init_inode_acls(d_inode(dentry), &acls); 852 else 853 d_drop(dentry); 854 ceph_release_acls_info(&acls); 855 return err; 856 } 857 858 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 859 bool excl) 860 { 861 return ceph_mknod(dir, dentry, mode, 0); 862 } 863 864 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 865 const char *dest) 866 { 867 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 868 struct ceph_mds_client *mdsc = fsc->mdsc; 869 struct ceph_mds_request *req; 870 int err; 871 872 if (ceph_snap(dir) != CEPH_NOSNAP) 873 return -EROFS; 874 875 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 876 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 877 if (IS_ERR(req)) { 878 err = PTR_ERR(req); 879 goto out; 880 } 881 req->r_path2 = kstrdup(dest, GFP_KERNEL); 882 if (!req->r_path2) { 883 err = -ENOMEM; 884 ceph_mdsc_put_request(req); 885 goto out; 886 } 887 req->r_parent = dir; 888 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 889 req->r_dentry = dget(dentry); 890 req->r_num_caps = 2; 891 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 892 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 893 err = ceph_mdsc_do_request(mdsc, dir, req); 894 if (!err && !req->r_reply_info.head->is_dentry) 895 err = ceph_handle_notrace_create(dir, dentry); 896 ceph_mdsc_put_request(req); 897 out: 898 if (err) 899 d_drop(dentry); 900 return err; 901 } 902 903 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 904 { 905 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 906 struct ceph_mds_client *mdsc = fsc->mdsc; 907 struct ceph_mds_request *req; 908 struct ceph_acls_info acls = {}; 909 int err = -EROFS; 910 int op; 911 912 if (ceph_snap(dir) == CEPH_SNAPDIR) { 913 /* mkdir .snap/foo is a MKSNAP */ 914 op = CEPH_MDS_OP_MKSNAP; 915 dout("mksnap dir %p snap '%pd' dn %p\n", dir, 916 dentry, dentry); 917 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 918 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 919 op = CEPH_MDS_OP_MKDIR; 920 } else { 921 goto out; 922 } 923 924 mode |= S_IFDIR; 925 err = ceph_pre_init_acls(dir, &mode, &acls); 926 if (err < 0) 927 goto out; 928 929 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 930 if (IS_ERR(req)) { 931 err = PTR_ERR(req); 932 goto out; 933 } 934 935 req->r_dentry = dget(dentry); 936 req->r_num_caps = 2; 937 req->r_parent = dir; 938 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 939 req->r_args.mkdir.mode = cpu_to_le32(mode); 940 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 941 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 942 if (acls.pagelist) { 943 req->r_pagelist = acls.pagelist; 944 acls.pagelist = NULL; 945 } 946 err = ceph_mdsc_do_request(mdsc, dir, req); 947 if (!err && 948 !req->r_reply_info.head->is_target && 949 !req->r_reply_info.head->is_dentry) 950 err = ceph_handle_notrace_create(dir, dentry); 951 ceph_mdsc_put_request(req); 952 out: 953 if (!err) 954 ceph_init_inode_acls(d_inode(dentry), &acls); 955 else 956 d_drop(dentry); 957 ceph_release_acls_info(&acls); 958 return err; 959 } 960 961 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 962 struct dentry *dentry) 963 { 964 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 965 struct ceph_mds_client *mdsc = fsc->mdsc; 966 struct ceph_mds_request *req; 967 int err; 968 969 if (ceph_snap(dir) != CEPH_NOSNAP) 970 return -EROFS; 971 972 dout("link in dir %p old_dentry %p dentry %p\n", dir, 973 old_dentry, dentry); 974 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 975 if (IS_ERR(req)) { 976 d_drop(dentry); 977 return PTR_ERR(req); 978 } 979 req->r_dentry = dget(dentry); 980 req->r_num_caps = 2; 981 req->r_old_dentry = dget(old_dentry); 982 req->r_parent = dir; 983 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 984 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 985 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 986 /* release LINK_SHARED on source inode (mds will lock it) */ 987 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 988 err = ceph_mdsc_do_request(mdsc, dir, req); 989 if (err) { 990 d_drop(dentry); 991 } else if (!req->r_reply_info.head->is_dentry) { 992 ihold(d_inode(old_dentry)); 993 d_instantiate(dentry, d_inode(old_dentry)); 994 } 995 ceph_mdsc_put_request(req); 996 return err; 997 } 998 999 /* 1000 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 1001 * looks like the link count will hit 0, drop any other caps (other 1002 * than PIN) we don't specifically want (due to the file still being 1003 * open). 1004 */ 1005 static int drop_caps_for_unlink(struct inode *inode) 1006 { 1007 struct ceph_inode_info *ci = ceph_inode(inode); 1008 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 1009 1010 spin_lock(&ci->i_ceph_lock); 1011 if (inode->i_nlink == 1) { 1012 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 1013 ci->i_ceph_flags |= CEPH_I_NODELAY; 1014 } 1015 spin_unlock(&ci->i_ceph_lock); 1016 return drop; 1017 } 1018 1019 /* 1020 * rmdir and unlink are differ only by the metadata op code 1021 */ 1022 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 1023 { 1024 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 1025 struct ceph_mds_client *mdsc = fsc->mdsc; 1026 struct inode *inode = d_inode(dentry); 1027 struct ceph_mds_request *req; 1028 int err = -EROFS; 1029 int op; 1030 1031 if (ceph_snap(dir) == CEPH_SNAPDIR) { 1032 /* rmdir .snap/foo is RMSNAP */ 1033 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry); 1034 op = CEPH_MDS_OP_RMSNAP; 1035 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 1036 dout("unlink/rmdir dir %p dn %p inode %p\n", 1037 dir, dentry, inode); 1038 op = d_is_dir(dentry) ? 1039 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 1040 } else 1041 goto out; 1042 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1043 if (IS_ERR(req)) { 1044 err = PTR_ERR(req); 1045 goto out; 1046 } 1047 req->r_dentry = dget(dentry); 1048 req->r_num_caps = 2; 1049 req->r_parent = dir; 1050 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 1051 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 1052 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 1053 req->r_inode_drop = drop_caps_for_unlink(inode); 1054 err = ceph_mdsc_do_request(mdsc, dir, req); 1055 if (!err && !req->r_reply_info.head->is_dentry) 1056 d_delete(dentry); 1057 ceph_mdsc_put_request(req); 1058 out: 1059 return err; 1060 } 1061 1062 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 1063 struct inode *new_dir, struct dentry *new_dentry, 1064 unsigned int flags) 1065 { 1066 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 1067 struct ceph_mds_client *mdsc = fsc->mdsc; 1068 struct ceph_mds_request *req; 1069 int op = CEPH_MDS_OP_RENAME; 1070 int err; 1071 1072 if (flags) 1073 return -EINVAL; 1074 1075 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 1076 return -EXDEV; 1077 if (ceph_snap(old_dir) != CEPH_NOSNAP) { 1078 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR) 1079 op = CEPH_MDS_OP_RENAMESNAP; 1080 else 1081 return -EROFS; 1082 } 1083 dout("rename dir %p dentry %p to dir %p dentry %p\n", 1084 old_dir, old_dentry, new_dir, new_dentry); 1085 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1086 if (IS_ERR(req)) 1087 return PTR_ERR(req); 1088 ihold(old_dir); 1089 req->r_dentry = dget(new_dentry); 1090 req->r_num_caps = 2; 1091 req->r_old_dentry = dget(old_dentry); 1092 req->r_old_dentry_dir = old_dir; 1093 req->r_parent = new_dir; 1094 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 1095 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 1096 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 1097 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 1098 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 1099 /* release LINK_RDCACHE on source inode (mds will lock it) */ 1100 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 1101 if (d_really_is_positive(new_dentry)) 1102 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry)); 1103 err = ceph_mdsc_do_request(mdsc, old_dir, req); 1104 if (!err && !req->r_reply_info.head->is_dentry) { 1105 /* 1106 * Normally d_move() is done by fill_trace (called by 1107 * do_request, above). If there is no trace, we need 1108 * to do it here. 1109 */ 1110 d_move(old_dentry, new_dentry); 1111 } 1112 ceph_mdsc_put_request(req); 1113 return err; 1114 } 1115 1116 /* 1117 * Ensure a dentry lease will no longer revalidate. 1118 */ 1119 void ceph_invalidate_dentry_lease(struct dentry *dentry) 1120 { 1121 spin_lock(&dentry->d_lock); 1122 ceph_dentry(dentry)->time = jiffies; 1123 ceph_dentry(dentry)->lease_shared_gen = 0; 1124 spin_unlock(&dentry->d_lock); 1125 } 1126 1127 /* 1128 * Check if dentry lease is valid. If not, delete the lease. Try to 1129 * renew if the least is more than half up. 1130 */ 1131 static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags, 1132 struct inode *dir) 1133 { 1134 struct ceph_dentry_info *di; 1135 struct ceph_mds_session *s; 1136 int valid = 0; 1137 u32 gen; 1138 unsigned long ttl; 1139 struct ceph_mds_session *session = NULL; 1140 u32 seq = 0; 1141 1142 spin_lock(&dentry->d_lock); 1143 di = ceph_dentry(dentry); 1144 if (di && di->lease_session) { 1145 s = di->lease_session; 1146 spin_lock(&s->s_gen_ttl_lock); 1147 gen = s->s_cap_gen; 1148 ttl = s->s_cap_ttl; 1149 spin_unlock(&s->s_gen_ttl_lock); 1150 1151 if (di->lease_gen == gen && 1152 time_before(jiffies, di->time) && 1153 time_before(jiffies, ttl)) { 1154 valid = 1; 1155 if (di->lease_renew_after && 1156 time_after(jiffies, di->lease_renew_after)) { 1157 /* 1158 * We should renew. If we're in RCU walk mode 1159 * though, we can't do that so just return 1160 * -ECHILD. 1161 */ 1162 if (flags & LOOKUP_RCU) { 1163 valid = -ECHILD; 1164 } else { 1165 session = ceph_get_mds_session(s); 1166 seq = di->lease_seq; 1167 di->lease_renew_after = 0; 1168 di->lease_renew_from = jiffies; 1169 } 1170 } 1171 } 1172 } 1173 spin_unlock(&dentry->d_lock); 1174 1175 if (session) { 1176 ceph_mdsc_lease_send_msg(session, dir, dentry, 1177 CEPH_MDS_LEASE_RENEW, seq); 1178 ceph_put_mds_session(session); 1179 } 1180 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 1181 return valid; 1182 } 1183 1184 /* 1185 * Check if directory-wide content lease/cap is valid. 1186 */ 1187 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 1188 { 1189 struct ceph_inode_info *ci = ceph_inode(dir); 1190 struct ceph_dentry_info *di = ceph_dentry(dentry); 1191 int valid = 0; 1192 1193 spin_lock(&ci->i_ceph_lock); 1194 if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen) 1195 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 1196 spin_unlock(&ci->i_ceph_lock); 1197 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 1198 dir, (unsigned)atomic_read(&ci->i_shared_gen), 1199 dentry, (unsigned)di->lease_shared_gen, valid); 1200 return valid; 1201 } 1202 1203 /* 1204 * Check if cached dentry can be trusted. 1205 */ 1206 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1207 { 1208 int valid = 0; 1209 struct dentry *parent; 1210 struct inode *dir; 1211 1212 if (flags & LOOKUP_RCU) { 1213 parent = READ_ONCE(dentry->d_parent); 1214 dir = d_inode_rcu(parent); 1215 if (!dir) 1216 return -ECHILD; 1217 } else { 1218 parent = dget_parent(dentry); 1219 dir = d_inode(parent); 1220 } 1221 1222 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, 1223 dentry, d_inode(dentry), ceph_dentry(dentry)->offset); 1224 1225 /* always trust cached snapped dentries, snapdir dentry */ 1226 if (ceph_snap(dir) != CEPH_NOSNAP) { 1227 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, 1228 dentry, d_inode(dentry)); 1229 valid = 1; 1230 } else if (d_really_is_positive(dentry) && 1231 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) { 1232 valid = 1; 1233 } else { 1234 valid = dentry_lease_is_valid(dentry, flags, dir); 1235 if (valid == -ECHILD) 1236 return valid; 1237 if (valid || dir_lease_is_valid(dir, dentry)) { 1238 if (d_really_is_positive(dentry)) 1239 valid = ceph_is_any_caps(d_inode(dentry)); 1240 else 1241 valid = 1; 1242 } 1243 } 1244 1245 if (!valid) { 1246 struct ceph_mds_client *mdsc = 1247 ceph_sb_to_client(dir->i_sb)->mdsc; 1248 struct ceph_mds_request *req; 1249 int op, err; 1250 u32 mask; 1251 1252 if (flags & LOOKUP_RCU) 1253 return -ECHILD; 1254 1255 op = ceph_snap(dir) == CEPH_SNAPDIR ? 1256 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 1257 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 1258 if (!IS_ERR(req)) { 1259 req->r_dentry = dget(dentry); 1260 req->r_num_caps = 2; 1261 req->r_parent = dir; 1262 1263 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 1264 if (ceph_security_xattr_wanted(dir)) 1265 mask |= CEPH_CAP_XATTR_SHARED; 1266 req->r_args.getattr.mask = cpu_to_le32(mask); 1267 1268 err = ceph_mdsc_do_request(mdsc, NULL, req); 1269 switch (err) { 1270 case 0: 1271 if (d_really_is_positive(dentry) && 1272 d_inode(dentry) == req->r_target_inode) 1273 valid = 1; 1274 break; 1275 case -ENOENT: 1276 if (d_really_is_negative(dentry)) 1277 valid = 1; 1278 /* Fallthrough */ 1279 default: 1280 break; 1281 } 1282 ceph_mdsc_put_request(req); 1283 dout("d_revalidate %p lookup result=%d\n", 1284 dentry, err); 1285 } 1286 } 1287 1288 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1289 if (valid) { 1290 ceph_dentry_lru_touch(dentry); 1291 } else { 1292 ceph_dir_clear_complete(dir); 1293 } 1294 1295 if (!(flags & LOOKUP_RCU)) 1296 dput(parent); 1297 return valid; 1298 } 1299 1300 /* 1301 * Release our ceph_dentry_info. 1302 */ 1303 static void ceph_d_release(struct dentry *dentry) 1304 { 1305 struct ceph_dentry_info *di = ceph_dentry(dentry); 1306 1307 dout("d_release %p\n", dentry); 1308 ceph_dentry_lru_del(dentry); 1309 1310 spin_lock(&dentry->d_lock); 1311 dentry->d_fsdata = NULL; 1312 spin_unlock(&dentry->d_lock); 1313 1314 if (di->lease_session) 1315 ceph_put_mds_session(di->lease_session); 1316 kmem_cache_free(ceph_dentry_cachep, di); 1317 } 1318 1319 /* 1320 * When the VFS prunes a dentry from the cache, we need to clear the 1321 * complete flag on the parent directory. 1322 * 1323 * Called under dentry->d_lock. 1324 */ 1325 static void ceph_d_prune(struct dentry *dentry) 1326 { 1327 dout("ceph_d_prune %p\n", dentry); 1328 1329 /* do we have a valid parent? */ 1330 if (IS_ROOT(dentry)) 1331 return; 1332 1333 /* if we are not hashed, we don't affect dir's completeness */ 1334 if (d_unhashed(dentry)) 1335 return; 1336 1337 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR) 1338 return; 1339 1340 /* 1341 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1342 * cleared until d_release 1343 */ 1344 ceph_dir_clear_complete(d_inode(dentry->d_parent)); 1345 } 1346 1347 /* 1348 * read() on a dir. This weird interface hack only works if mounted 1349 * with '-o dirstat'. 1350 */ 1351 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1352 loff_t *ppos) 1353 { 1354 struct ceph_file_info *cf = file->private_data; 1355 struct inode *inode = file_inode(file); 1356 struct ceph_inode_info *ci = ceph_inode(inode); 1357 int left; 1358 const int bufsize = 1024; 1359 1360 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1361 return -EISDIR; 1362 1363 if (!cf->dir_info) { 1364 cf->dir_info = kmalloc(bufsize, GFP_KERNEL); 1365 if (!cf->dir_info) 1366 return -ENOMEM; 1367 cf->dir_info_len = 1368 snprintf(cf->dir_info, bufsize, 1369 "entries: %20lld\n" 1370 " files: %20lld\n" 1371 " subdirs: %20lld\n" 1372 "rentries: %20lld\n" 1373 " rfiles: %20lld\n" 1374 " rsubdirs: %20lld\n" 1375 "rbytes: %20lld\n" 1376 "rctime: %10ld.%09ld\n", 1377 ci->i_files + ci->i_subdirs, 1378 ci->i_files, 1379 ci->i_subdirs, 1380 ci->i_rfiles + ci->i_rsubdirs, 1381 ci->i_rfiles, 1382 ci->i_rsubdirs, 1383 ci->i_rbytes, 1384 (long)ci->i_rctime.tv_sec, 1385 (long)ci->i_rctime.tv_nsec); 1386 } 1387 1388 if (*ppos >= cf->dir_info_len) 1389 return 0; 1390 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1391 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1392 if (left == size) 1393 return -EFAULT; 1394 *ppos += (size - left); 1395 return size - left; 1396 } 1397 1398 /* 1399 * We maintain a private dentry LRU. 1400 * 1401 * FIXME: this needs to be changed to a per-mds lru to be useful. 1402 */ 1403 void ceph_dentry_lru_add(struct dentry *dn) 1404 { 1405 struct ceph_dentry_info *di = ceph_dentry(dn); 1406 struct ceph_mds_client *mdsc; 1407 1408 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn); 1409 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1410 spin_lock(&mdsc->dentry_lru_lock); 1411 list_add_tail(&di->lru, &mdsc->dentry_lru); 1412 mdsc->num_dentry++; 1413 spin_unlock(&mdsc->dentry_lru_lock); 1414 } 1415 1416 void ceph_dentry_lru_touch(struct dentry *dn) 1417 { 1418 struct ceph_dentry_info *di = ceph_dentry(dn); 1419 struct ceph_mds_client *mdsc; 1420 1421 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn, 1422 di->offset); 1423 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1424 spin_lock(&mdsc->dentry_lru_lock); 1425 list_move_tail(&di->lru, &mdsc->dentry_lru); 1426 spin_unlock(&mdsc->dentry_lru_lock); 1427 } 1428 1429 void ceph_dentry_lru_del(struct dentry *dn) 1430 { 1431 struct ceph_dentry_info *di = ceph_dentry(dn); 1432 struct ceph_mds_client *mdsc; 1433 1434 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn); 1435 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1436 spin_lock(&mdsc->dentry_lru_lock); 1437 list_del_init(&di->lru); 1438 mdsc->num_dentry--; 1439 spin_unlock(&mdsc->dentry_lru_lock); 1440 } 1441 1442 /* 1443 * Return name hash for a given dentry. This is dependent on 1444 * the parent directory's hash function. 1445 */ 1446 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1447 { 1448 struct ceph_inode_info *dci = ceph_inode(dir); 1449 1450 switch (dci->i_dir_layout.dl_dir_hash) { 1451 case 0: /* for backward compat */ 1452 case CEPH_STR_HASH_LINUX: 1453 return dn->d_name.hash; 1454 1455 default: 1456 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1457 dn->d_name.name, dn->d_name.len); 1458 } 1459 } 1460 1461 const struct file_operations ceph_dir_fops = { 1462 .read = ceph_read_dir, 1463 .iterate = ceph_readdir, 1464 .llseek = ceph_dir_llseek, 1465 .open = ceph_open, 1466 .release = ceph_release, 1467 .unlocked_ioctl = ceph_ioctl, 1468 .fsync = ceph_fsync, 1469 }; 1470 1471 const struct file_operations ceph_snapdir_fops = { 1472 .iterate = ceph_readdir, 1473 .llseek = ceph_dir_llseek, 1474 .open = ceph_open, 1475 .release = ceph_release, 1476 }; 1477 1478 const struct inode_operations ceph_dir_iops = { 1479 .lookup = ceph_lookup, 1480 .permission = ceph_permission, 1481 .getattr = ceph_getattr, 1482 .setattr = ceph_setattr, 1483 .listxattr = ceph_listxattr, 1484 .get_acl = ceph_get_acl, 1485 .set_acl = ceph_set_acl, 1486 .mknod = ceph_mknod, 1487 .symlink = ceph_symlink, 1488 .mkdir = ceph_mkdir, 1489 .link = ceph_link, 1490 .unlink = ceph_unlink, 1491 .rmdir = ceph_unlink, 1492 .rename = ceph_rename, 1493 .create = ceph_create, 1494 .atomic_open = ceph_atomic_open, 1495 }; 1496 1497 const struct inode_operations ceph_snapdir_iops = { 1498 .lookup = ceph_lookup, 1499 .permission = ceph_permission, 1500 .getattr = ceph_getattr, 1501 .mkdir = ceph_mkdir, 1502 .rmdir = ceph_unlink, 1503 .rename = ceph_rename, 1504 }; 1505 1506 const struct dentry_operations ceph_dentry_ops = { 1507 .d_revalidate = ceph_d_revalidate, 1508 .d_release = ceph_d_release, 1509 .d_prune = ceph_d_prune, 1510 .d_init = ceph_d_init, 1511 }; 1512