1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/spinlock.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 #include <linux/xattr.h> 9 10 #include "super.h" 11 #include "mds_client.h" 12 13 /* 14 * Directory operations: readdir, lookup, create, link, unlink, 15 * rename, etc. 16 */ 17 18 /* 19 * Ceph MDS operations are specified in terms of a base ino and 20 * relative path. Thus, the client can specify an operation on a 21 * specific inode (e.g., a getattr due to fstat(2)), or as a path 22 * relative to, say, the root directory. 23 * 24 * Normally, we limit ourselves to strict inode ops (no path component) 25 * or dentry operations (a single path component relative to an ino). The 26 * exception to this is open_root_dentry(), which will open the mount 27 * point by name. 28 */ 29 30 const struct dentry_operations ceph_dentry_ops; 31 32 static bool __dentry_lease_is_valid(struct ceph_dentry_info *di); 33 static int __dir_lease_try_check(const struct dentry *dentry); 34 35 /* 36 * Initialize ceph dentry state. 37 */ 38 static int ceph_d_init(struct dentry *dentry) 39 { 40 struct ceph_dentry_info *di; 41 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 42 struct ceph_mds_client *mdsc = fsc->mdsc; 43 44 di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL); 45 if (!di) 46 return -ENOMEM; /* oh well */ 47 48 di->dentry = dentry; 49 di->lease_session = NULL; 50 di->time = jiffies; 51 dentry->d_fsdata = di; 52 INIT_LIST_HEAD(&di->lease_list); 53 54 atomic64_inc(&mdsc->metric.total_dentries); 55 56 return 0; 57 } 58 59 /* 60 * for f_pos for readdir: 61 * - hash order: 62 * (0xff << 52) | ((24 bits hash) << 28) | 63 * (the nth entry has hash collision); 64 * - frag+name order; 65 * ((frag value) << 28) | (the nth entry in frag); 66 */ 67 #define OFFSET_BITS 28 68 #define OFFSET_MASK ((1 << OFFSET_BITS) - 1) 69 #define HASH_ORDER (0xffull << (OFFSET_BITS + 24)) 70 loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order) 71 { 72 loff_t fpos = ((loff_t)high << 28) | (loff_t)off; 73 if (hash_order) 74 fpos |= HASH_ORDER; 75 return fpos; 76 } 77 78 static bool is_hash_order(loff_t p) 79 { 80 return (p & HASH_ORDER) == HASH_ORDER; 81 } 82 83 static unsigned fpos_frag(loff_t p) 84 { 85 return p >> OFFSET_BITS; 86 } 87 88 static unsigned fpos_hash(loff_t p) 89 { 90 return ceph_frag_value(fpos_frag(p)); 91 } 92 93 static unsigned fpos_off(loff_t p) 94 { 95 return p & OFFSET_MASK; 96 } 97 98 static int fpos_cmp(loff_t l, loff_t r) 99 { 100 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 101 if (v) 102 return v; 103 return (int)(fpos_off(l) - fpos_off(r)); 104 } 105 106 /* 107 * make note of the last dentry we read, so we can 108 * continue at the same lexicographical point, 109 * regardless of what dir changes take place on the 110 * server. 111 */ 112 static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name, 113 int len, unsigned next_offset) 114 { 115 char *buf = kmalloc(len+1, GFP_KERNEL); 116 if (!buf) 117 return -ENOMEM; 118 kfree(dfi->last_name); 119 dfi->last_name = buf; 120 memcpy(dfi->last_name, name, len); 121 dfi->last_name[len] = 0; 122 dfi->next_offset = next_offset; 123 dout("note_last_dentry '%s'\n", dfi->last_name); 124 return 0; 125 } 126 127 128 static struct dentry * 129 __dcache_find_get_entry(struct dentry *parent, u64 idx, 130 struct ceph_readdir_cache_control *cache_ctl) 131 { 132 struct inode *dir = d_inode(parent); 133 struct dentry *dentry; 134 unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1; 135 loff_t ptr_pos = idx * sizeof(struct dentry *); 136 pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT; 137 138 if (ptr_pos >= i_size_read(dir)) 139 return NULL; 140 141 if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) { 142 ceph_readdir_cache_release(cache_ctl); 143 cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff); 144 if (!cache_ctl->page) { 145 dout(" page %lu not found\n", ptr_pgoff); 146 return ERR_PTR(-EAGAIN); 147 } 148 /* reading/filling the cache are serialized by 149 i_mutex, no need to use page lock */ 150 unlock_page(cache_ctl->page); 151 cache_ctl->dentries = kmap(cache_ctl->page); 152 } 153 154 cache_ctl->index = idx & idx_mask; 155 156 rcu_read_lock(); 157 spin_lock(&parent->d_lock); 158 /* check i_size again here, because empty directory can be 159 * marked as complete while not holding the i_mutex. */ 160 if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir)) 161 dentry = cache_ctl->dentries[cache_ctl->index]; 162 else 163 dentry = NULL; 164 spin_unlock(&parent->d_lock); 165 if (dentry && !lockref_get_not_dead(&dentry->d_lockref)) 166 dentry = NULL; 167 rcu_read_unlock(); 168 return dentry ? : ERR_PTR(-EAGAIN); 169 } 170 171 /* 172 * When possible, we try to satisfy a readdir by peeking at the 173 * dcache. We make this work by carefully ordering dentries on 174 * d_child when we initially get results back from the MDS, and 175 * falling back to a "normal" sync readdir if any dentries in the dir 176 * are dropped. 177 * 178 * Complete dir indicates that we have all dentries in the dir. It is 179 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 180 * the MDS if/when the directory is modified). 181 */ 182 static int __dcache_readdir(struct file *file, struct dir_context *ctx, 183 int shared_gen) 184 { 185 struct ceph_dir_file_info *dfi = file->private_data; 186 struct dentry *parent = file->f_path.dentry; 187 struct inode *dir = d_inode(parent); 188 struct dentry *dentry, *last = NULL; 189 struct ceph_dentry_info *di; 190 struct ceph_readdir_cache_control cache_ctl = {}; 191 u64 idx = 0; 192 int err = 0; 193 194 dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos); 195 196 /* search start position */ 197 if (ctx->pos > 2) { 198 u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *)); 199 while (count > 0) { 200 u64 step = count >> 1; 201 dentry = __dcache_find_get_entry(parent, idx + step, 202 &cache_ctl); 203 if (!dentry) { 204 /* use linar search */ 205 idx = 0; 206 break; 207 } 208 if (IS_ERR(dentry)) { 209 err = PTR_ERR(dentry); 210 goto out; 211 } 212 di = ceph_dentry(dentry); 213 spin_lock(&dentry->d_lock); 214 if (fpos_cmp(di->offset, ctx->pos) < 0) { 215 idx += step + 1; 216 count -= step + 1; 217 } else { 218 count = step; 219 } 220 spin_unlock(&dentry->d_lock); 221 dput(dentry); 222 } 223 224 dout("__dcache_readdir %p cache idx %llu\n", dir, idx); 225 } 226 227 228 for (;;) { 229 bool emit_dentry = false; 230 dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl); 231 if (!dentry) { 232 dfi->file_info.flags |= CEPH_F_ATEND; 233 err = 0; 234 break; 235 } 236 if (IS_ERR(dentry)) { 237 err = PTR_ERR(dentry); 238 goto out; 239 } 240 241 spin_lock(&dentry->d_lock); 242 di = ceph_dentry(dentry); 243 if (d_unhashed(dentry) || 244 d_really_is_negative(dentry) || 245 di->lease_shared_gen != shared_gen) { 246 spin_unlock(&dentry->d_lock); 247 dput(dentry); 248 err = -EAGAIN; 249 goto out; 250 } 251 if (fpos_cmp(ctx->pos, di->offset) <= 0) { 252 __ceph_dentry_dir_lease_touch(di); 253 emit_dentry = true; 254 } 255 spin_unlock(&dentry->d_lock); 256 257 if (emit_dentry) { 258 dout(" %llx dentry %p %pd %p\n", di->offset, 259 dentry, dentry, d_inode(dentry)); 260 ctx->pos = di->offset; 261 if (!dir_emit(ctx, dentry->d_name.name, 262 dentry->d_name.len, ceph_present_inode(d_inode(dentry)), 263 d_inode(dentry)->i_mode >> 12)) { 264 dput(dentry); 265 err = 0; 266 break; 267 } 268 ctx->pos++; 269 270 if (last) 271 dput(last); 272 last = dentry; 273 } else { 274 dput(dentry); 275 } 276 } 277 out: 278 ceph_readdir_cache_release(&cache_ctl); 279 if (last) { 280 int ret; 281 di = ceph_dentry(last); 282 ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len, 283 fpos_off(di->offset) + 1); 284 if (ret < 0) 285 err = ret; 286 dput(last); 287 /* last_name no longer match cache index */ 288 if (dfi->readdir_cache_idx >= 0) { 289 dfi->readdir_cache_idx = -1; 290 dfi->dir_release_count = 0; 291 } 292 } 293 return err; 294 } 295 296 static bool need_send_readdir(struct ceph_dir_file_info *dfi, loff_t pos) 297 { 298 if (!dfi->last_readdir) 299 return true; 300 if (is_hash_order(pos)) 301 return !ceph_frag_contains_value(dfi->frag, fpos_hash(pos)); 302 else 303 return dfi->frag != fpos_frag(pos); 304 } 305 306 static int ceph_readdir(struct file *file, struct dir_context *ctx) 307 { 308 struct ceph_dir_file_info *dfi = file->private_data; 309 struct inode *inode = file_inode(file); 310 struct ceph_inode_info *ci = ceph_inode(inode); 311 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 312 struct ceph_mds_client *mdsc = fsc->mdsc; 313 int i; 314 int err; 315 unsigned frag = -1; 316 struct ceph_mds_reply_info_parsed *rinfo; 317 318 dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos); 319 if (dfi->file_info.flags & CEPH_F_ATEND) 320 return 0; 321 322 /* always start with . and .. */ 323 if (ctx->pos == 0) { 324 dout("readdir off 0 -> '.'\n"); 325 if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode), 326 inode->i_mode >> 12)) 327 return 0; 328 ctx->pos = 1; 329 } 330 if (ctx->pos == 1) { 331 u64 ino; 332 struct dentry *dentry = file->f_path.dentry; 333 334 spin_lock(&dentry->d_lock); 335 ino = ceph_present_inode(dentry->d_parent->d_inode); 336 spin_unlock(&dentry->d_lock); 337 338 dout("readdir off 1 -> '..'\n"); 339 if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12)) 340 return 0; 341 ctx->pos = 2; 342 } 343 344 spin_lock(&ci->i_ceph_lock); 345 /* request Fx cap. if have Fx, we don't need to release Fs cap 346 * for later create/unlink. */ 347 __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_WR); 348 /* can we use the dcache? */ 349 if (ceph_test_mount_opt(fsc, DCACHE) && 350 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 351 ceph_snap(inode) != CEPH_SNAPDIR && 352 __ceph_dir_is_complete_ordered(ci) && 353 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) { 354 int shared_gen = atomic_read(&ci->i_shared_gen); 355 356 spin_unlock(&ci->i_ceph_lock); 357 err = __dcache_readdir(file, ctx, shared_gen); 358 if (err != -EAGAIN) 359 return err; 360 } else { 361 spin_unlock(&ci->i_ceph_lock); 362 } 363 364 /* proceed with a normal readdir */ 365 more: 366 /* do we have the correct frag content buffered? */ 367 if (need_send_readdir(dfi, ctx->pos)) { 368 struct ceph_mds_request *req; 369 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 370 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 371 372 /* discard old result, if any */ 373 if (dfi->last_readdir) { 374 ceph_mdsc_put_request(dfi->last_readdir); 375 dfi->last_readdir = NULL; 376 } 377 378 if (is_hash_order(ctx->pos)) { 379 /* fragtree isn't always accurate. choose frag 380 * based on previous reply when possible. */ 381 if (frag == (unsigned)-1) 382 frag = ceph_choose_frag(ci, fpos_hash(ctx->pos), 383 NULL, NULL); 384 } else { 385 frag = fpos_frag(ctx->pos); 386 } 387 388 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 389 ceph_vinop(inode), frag, dfi->last_name); 390 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 391 if (IS_ERR(req)) 392 return PTR_ERR(req); 393 err = ceph_alloc_readdir_reply_buffer(req, inode); 394 if (err) { 395 ceph_mdsc_put_request(req); 396 return err; 397 } 398 /* hints to request -> mds selection code */ 399 req->r_direct_mode = USE_AUTH_MDS; 400 if (op == CEPH_MDS_OP_READDIR) { 401 req->r_direct_hash = ceph_frag_value(frag); 402 __set_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags); 403 req->r_inode_drop = CEPH_CAP_FILE_EXCL; 404 } 405 if (dfi->last_name) { 406 req->r_path2 = kstrdup(dfi->last_name, GFP_KERNEL); 407 if (!req->r_path2) { 408 ceph_mdsc_put_request(req); 409 return -ENOMEM; 410 } 411 } else if (is_hash_order(ctx->pos)) { 412 req->r_args.readdir.offset_hash = 413 cpu_to_le32(fpos_hash(ctx->pos)); 414 } 415 416 req->r_dir_release_cnt = dfi->dir_release_count; 417 req->r_dir_ordered_cnt = dfi->dir_ordered_count; 418 req->r_readdir_cache_idx = dfi->readdir_cache_idx; 419 req->r_readdir_offset = dfi->next_offset; 420 req->r_args.readdir.frag = cpu_to_le32(frag); 421 req->r_args.readdir.flags = 422 cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS); 423 424 req->r_inode = inode; 425 ihold(inode); 426 req->r_dentry = dget(file->f_path.dentry); 427 err = ceph_mdsc_do_request(mdsc, NULL, req); 428 if (err < 0) { 429 ceph_mdsc_put_request(req); 430 return err; 431 } 432 dout("readdir got and parsed readdir result=%d on " 433 "frag %x, end=%d, complete=%d, hash_order=%d\n", 434 err, frag, 435 (int)req->r_reply_info.dir_end, 436 (int)req->r_reply_info.dir_complete, 437 (int)req->r_reply_info.hash_order); 438 439 rinfo = &req->r_reply_info; 440 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { 441 frag = le32_to_cpu(rinfo->dir_dir->frag); 442 if (!rinfo->hash_order) { 443 dfi->next_offset = req->r_readdir_offset; 444 /* adjust ctx->pos to beginning of frag */ 445 ctx->pos = ceph_make_fpos(frag, 446 dfi->next_offset, 447 false); 448 } 449 } 450 451 dfi->frag = frag; 452 dfi->last_readdir = req; 453 454 if (test_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags)) { 455 dfi->readdir_cache_idx = req->r_readdir_cache_idx; 456 if (dfi->readdir_cache_idx < 0) { 457 /* preclude from marking dir ordered */ 458 dfi->dir_ordered_count = 0; 459 } else if (ceph_frag_is_leftmost(frag) && 460 dfi->next_offset == 2) { 461 /* note dir version at start of readdir so 462 * we can tell if any dentries get dropped */ 463 dfi->dir_release_count = req->r_dir_release_cnt; 464 dfi->dir_ordered_count = req->r_dir_ordered_cnt; 465 } 466 } else { 467 dout("readdir !did_prepopulate\n"); 468 /* disable readdir cache */ 469 dfi->readdir_cache_idx = -1; 470 /* preclude from marking dir complete */ 471 dfi->dir_release_count = 0; 472 } 473 474 /* note next offset and last dentry name */ 475 if (rinfo->dir_nr > 0) { 476 struct ceph_mds_reply_dir_entry *rde = 477 rinfo->dir_entries + (rinfo->dir_nr-1); 478 unsigned next_offset = req->r_reply_info.dir_end ? 479 2 : (fpos_off(rde->offset) + 1); 480 err = note_last_dentry(dfi, rde->name, rde->name_len, 481 next_offset); 482 if (err) 483 return err; 484 } else if (req->r_reply_info.dir_end) { 485 dfi->next_offset = 2; 486 /* keep last name */ 487 } 488 } 489 490 rinfo = &dfi->last_readdir->r_reply_info; 491 dout("readdir frag %x num %d pos %llx chunk first %llx\n", 492 dfi->frag, rinfo->dir_nr, ctx->pos, 493 rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL); 494 495 i = 0; 496 /* search start position */ 497 if (rinfo->dir_nr > 0) { 498 int step, nr = rinfo->dir_nr; 499 while (nr > 0) { 500 step = nr >> 1; 501 if (rinfo->dir_entries[i + step].offset < ctx->pos) { 502 i += step + 1; 503 nr -= step + 1; 504 } else { 505 nr = step; 506 } 507 } 508 } 509 for (; i < rinfo->dir_nr; i++) { 510 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 511 512 BUG_ON(rde->offset < ctx->pos); 513 514 ctx->pos = rde->offset; 515 dout("readdir (%d/%d) -> %llx '%.*s' %p\n", 516 i, rinfo->dir_nr, ctx->pos, 517 rde->name_len, rde->name, &rde->inode.in); 518 519 BUG_ON(!rde->inode.in); 520 521 if (!dir_emit(ctx, rde->name, rde->name_len, 522 ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)), 523 le32_to_cpu(rde->inode.in->mode) >> 12)) { 524 dout("filldir stopping us...\n"); 525 return 0; 526 } 527 ctx->pos++; 528 } 529 530 ceph_mdsc_put_request(dfi->last_readdir); 531 dfi->last_readdir = NULL; 532 533 if (dfi->next_offset > 2) { 534 frag = dfi->frag; 535 goto more; 536 } 537 538 /* more frags? */ 539 if (!ceph_frag_is_rightmost(dfi->frag)) { 540 frag = ceph_frag_next(dfi->frag); 541 if (is_hash_order(ctx->pos)) { 542 loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag), 543 dfi->next_offset, true); 544 if (new_pos > ctx->pos) 545 ctx->pos = new_pos; 546 /* keep last_name */ 547 } else { 548 ctx->pos = ceph_make_fpos(frag, dfi->next_offset, 549 false); 550 kfree(dfi->last_name); 551 dfi->last_name = NULL; 552 } 553 dout("readdir next frag is %x\n", frag); 554 goto more; 555 } 556 dfi->file_info.flags |= CEPH_F_ATEND; 557 558 /* 559 * if dir_release_count still matches the dir, no dentries 560 * were released during the whole readdir, and we should have 561 * the complete dir contents in our cache. 562 */ 563 if (atomic64_read(&ci->i_release_count) == 564 dfi->dir_release_count) { 565 spin_lock(&ci->i_ceph_lock); 566 if (dfi->dir_ordered_count == 567 atomic64_read(&ci->i_ordered_count)) { 568 dout(" marking %p complete and ordered\n", inode); 569 /* use i_size to track number of entries in 570 * readdir cache */ 571 BUG_ON(dfi->readdir_cache_idx < 0); 572 i_size_write(inode, dfi->readdir_cache_idx * 573 sizeof(struct dentry*)); 574 } else { 575 dout(" marking %p complete\n", inode); 576 } 577 __ceph_dir_set_complete(ci, dfi->dir_release_count, 578 dfi->dir_ordered_count); 579 spin_unlock(&ci->i_ceph_lock); 580 } 581 582 dout("readdir %p file %p done.\n", inode, file); 583 return 0; 584 } 585 586 static void reset_readdir(struct ceph_dir_file_info *dfi) 587 { 588 if (dfi->last_readdir) { 589 ceph_mdsc_put_request(dfi->last_readdir); 590 dfi->last_readdir = NULL; 591 } 592 kfree(dfi->last_name); 593 dfi->last_name = NULL; 594 dfi->dir_release_count = 0; 595 dfi->readdir_cache_idx = -1; 596 dfi->next_offset = 2; /* compensate for . and .. */ 597 dfi->file_info.flags &= ~CEPH_F_ATEND; 598 } 599 600 /* 601 * discard buffered readdir content on seekdir(0), or seek to new frag, 602 * or seek prior to current chunk 603 */ 604 static bool need_reset_readdir(struct ceph_dir_file_info *dfi, loff_t new_pos) 605 { 606 struct ceph_mds_reply_info_parsed *rinfo; 607 loff_t chunk_offset; 608 if (new_pos == 0) 609 return true; 610 if (is_hash_order(new_pos)) { 611 /* no need to reset last_name for a forward seek when 612 * dentries are sotred in hash order */ 613 } else if (dfi->frag != fpos_frag(new_pos)) { 614 return true; 615 } 616 rinfo = dfi->last_readdir ? &dfi->last_readdir->r_reply_info : NULL; 617 if (!rinfo || !rinfo->dir_nr) 618 return true; 619 chunk_offset = rinfo->dir_entries[0].offset; 620 return new_pos < chunk_offset || 621 is_hash_order(new_pos) != is_hash_order(chunk_offset); 622 } 623 624 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 625 { 626 struct ceph_dir_file_info *dfi = file->private_data; 627 struct inode *inode = file->f_mapping->host; 628 loff_t retval; 629 630 inode_lock(inode); 631 retval = -EINVAL; 632 switch (whence) { 633 case SEEK_CUR: 634 offset += file->f_pos; 635 case SEEK_SET: 636 break; 637 case SEEK_END: 638 retval = -EOPNOTSUPP; 639 default: 640 goto out; 641 } 642 643 if (offset >= 0) { 644 if (need_reset_readdir(dfi, offset)) { 645 dout("dir_llseek dropping %p content\n", file); 646 reset_readdir(dfi); 647 } else if (is_hash_order(offset) && offset > file->f_pos) { 648 /* for hash offset, we don't know if a forward seek 649 * is within same frag */ 650 dfi->dir_release_count = 0; 651 dfi->readdir_cache_idx = -1; 652 } 653 654 if (offset != file->f_pos) { 655 file->f_pos = offset; 656 file->f_version = 0; 657 dfi->file_info.flags &= ~CEPH_F_ATEND; 658 } 659 retval = offset; 660 } 661 out: 662 inode_unlock(inode); 663 return retval; 664 } 665 666 /* 667 * Handle lookups for the hidden .snap directory. 668 */ 669 int ceph_handle_snapdir(struct ceph_mds_request *req, 670 struct dentry *dentry, int err) 671 { 672 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 673 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */ 674 675 /* .snap dir? */ 676 if (err == -ENOENT && 677 ceph_snap(parent) == CEPH_NOSNAP && 678 strcmp(dentry->d_name.name, 679 fsc->mount_options->snapdir_name) == 0) { 680 struct inode *inode = ceph_get_snapdir(parent); 681 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n", 682 dentry, dentry, inode); 683 BUG_ON(!d_unhashed(dentry)); 684 d_add(dentry, inode); 685 err = 0; 686 } 687 return err; 688 } 689 690 /* 691 * Figure out final result of a lookup/open request. 692 * 693 * Mainly, make sure we return the final req->r_dentry (if it already 694 * existed) in place of the original VFS-provided dentry when they 695 * differ. 696 * 697 * Gracefully handle the case where the MDS replies with -ENOENT and 698 * no trace (which it may do, at its discretion, e.g., if it doesn't 699 * care to issue a lease on the negative dentry). 700 */ 701 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 702 struct dentry *dentry, int err) 703 { 704 if (err == -ENOENT) { 705 /* no trace? */ 706 err = 0; 707 if (!req->r_reply_info.head->is_dentry) { 708 dout("ENOENT and no trace, dentry %p inode %p\n", 709 dentry, d_inode(dentry)); 710 if (d_really_is_positive(dentry)) { 711 d_drop(dentry); 712 err = -ENOENT; 713 } else { 714 d_add(dentry, NULL); 715 } 716 } 717 } 718 if (err) 719 dentry = ERR_PTR(err); 720 else if (dentry != req->r_dentry) 721 dentry = dget(req->r_dentry); /* we got spliced */ 722 else 723 dentry = NULL; 724 return dentry; 725 } 726 727 static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 728 { 729 return ceph_ino(inode) == CEPH_INO_ROOT && 730 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 731 } 732 733 /* 734 * Look up a single dir entry. If there is a lookup intent, inform 735 * the MDS so that it gets our 'caps wanted' value in a single op. 736 */ 737 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 738 unsigned int flags) 739 { 740 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 741 struct ceph_mds_client *mdsc = fsc->mdsc; 742 struct ceph_mds_request *req; 743 int op; 744 int mask; 745 int err; 746 747 dout("lookup %p dentry %p '%pd'\n", 748 dir, dentry, dentry); 749 750 if (dentry->d_name.len > NAME_MAX) 751 return ERR_PTR(-ENAMETOOLONG); 752 753 /* can we conclude ENOENT locally? */ 754 if (d_really_is_negative(dentry)) { 755 struct ceph_inode_info *ci = ceph_inode(dir); 756 struct ceph_dentry_info *di = ceph_dentry(dentry); 757 758 spin_lock(&ci->i_ceph_lock); 759 dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags); 760 if (strncmp(dentry->d_name.name, 761 fsc->mount_options->snapdir_name, 762 dentry->d_name.len) && 763 !is_root_ceph_dentry(dir, dentry) && 764 ceph_test_mount_opt(fsc, DCACHE) && 765 __ceph_dir_is_complete(ci) && 766 __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) { 767 __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD); 768 spin_unlock(&ci->i_ceph_lock); 769 dout(" dir %p complete, -ENOENT\n", dir); 770 d_add(dentry, NULL); 771 di->lease_shared_gen = atomic_read(&ci->i_shared_gen); 772 return NULL; 773 } 774 spin_unlock(&ci->i_ceph_lock); 775 } 776 777 op = ceph_snap(dir) == CEPH_SNAPDIR ? 778 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 779 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 780 if (IS_ERR(req)) 781 return ERR_CAST(req); 782 req->r_dentry = dget(dentry); 783 req->r_num_caps = 2; 784 785 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 786 if (ceph_security_xattr_wanted(dir)) 787 mask |= CEPH_CAP_XATTR_SHARED; 788 req->r_args.getattr.mask = cpu_to_le32(mask); 789 790 req->r_parent = dir; 791 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 792 err = ceph_mdsc_do_request(mdsc, NULL, req); 793 err = ceph_handle_snapdir(req, dentry, err); 794 dentry = ceph_finish_lookup(req, dentry, err); 795 ceph_mdsc_put_request(req); /* will dput(dentry) */ 796 dout("lookup result=%p\n", dentry); 797 return dentry; 798 } 799 800 /* 801 * If we do a create but get no trace back from the MDS, follow up with 802 * a lookup (the VFS expects us to link up the provided dentry). 803 */ 804 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 805 { 806 struct dentry *result = ceph_lookup(dir, dentry, 0); 807 808 if (result && !IS_ERR(result)) { 809 /* 810 * We created the item, then did a lookup, and found 811 * it was already linked to another inode we already 812 * had in our cache (and thus got spliced). To not 813 * confuse VFS (especially when inode is a directory), 814 * we don't link our dentry to that inode, return an 815 * error instead. 816 * 817 * This event should be rare and it happens only when 818 * we talk to old MDS. Recent MDS does not send traceless 819 * reply for request that creates new inode. 820 */ 821 d_drop(result); 822 return -ESTALE; 823 } 824 return PTR_ERR(result); 825 } 826 827 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 828 umode_t mode, dev_t rdev) 829 { 830 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 831 struct ceph_mds_client *mdsc = fsc->mdsc; 832 struct ceph_mds_request *req; 833 struct ceph_acl_sec_ctx as_ctx = {}; 834 int err; 835 836 if (ceph_snap(dir) != CEPH_NOSNAP) 837 return -EROFS; 838 839 if (ceph_quota_is_max_files_exceeded(dir)) { 840 err = -EDQUOT; 841 goto out; 842 } 843 844 err = ceph_pre_init_acls(dir, &mode, &as_ctx); 845 if (err < 0) 846 goto out; 847 err = ceph_security_init_secctx(dentry, mode, &as_ctx); 848 if (err < 0) 849 goto out; 850 851 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 852 dir, dentry, mode, rdev); 853 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 854 if (IS_ERR(req)) { 855 err = PTR_ERR(req); 856 goto out; 857 } 858 req->r_dentry = dget(dentry); 859 req->r_num_caps = 2; 860 req->r_parent = dir; 861 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 862 req->r_args.mknod.mode = cpu_to_le32(mode); 863 req->r_args.mknod.rdev = cpu_to_le32(rdev); 864 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 865 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 866 if (as_ctx.pagelist) { 867 req->r_pagelist = as_ctx.pagelist; 868 as_ctx.pagelist = NULL; 869 } 870 err = ceph_mdsc_do_request(mdsc, dir, req); 871 if (!err && !req->r_reply_info.head->is_dentry) 872 err = ceph_handle_notrace_create(dir, dentry); 873 ceph_mdsc_put_request(req); 874 out: 875 if (!err) 876 ceph_init_inode_acls(d_inode(dentry), &as_ctx); 877 else 878 d_drop(dentry); 879 ceph_release_acl_sec_ctx(&as_ctx); 880 return err; 881 } 882 883 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 884 bool excl) 885 { 886 return ceph_mknod(dir, dentry, mode, 0); 887 } 888 889 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 890 const char *dest) 891 { 892 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 893 struct ceph_mds_client *mdsc = fsc->mdsc; 894 struct ceph_mds_request *req; 895 struct ceph_acl_sec_ctx as_ctx = {}; 896 int err; 897 898 if (ceph_snap(dir) != CEPH_NOSNAP) 899 return -EROFS; 900 901 if (ceph_quota_is_max_files_exceeded(dir)) { 902 err = -EDQUOT; 903 goto out; 904 } 905 906 err = ceph_security_init_secctx(dentry, S_IFLNK | 0777, &as_ctx); 907 if (err < 0) 908 goto out; 909 910 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 911 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 912 if (IS_ERR(req)) { 913 err = PTR_ERR(req); 914 goto out; 915 } 916 req->r_path2 = kstrdup(dest, GFP_KERNEL); 917 if (!req->r_path2) { 918 err = -ENOMEM; 919 ceph_mdsc_put_request(req); 920 goto out; 921 } 922 req->r_parent = dir; 923 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 924 req->r_dentry = dget(dentry); 925 req->r_num_caps = 2; 926 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 927 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 928 if (as_ctx.pagelist) { 929 req->r_pagelist = as_ctx.pagelist; 930 as_ctx.pagelist = NULL; 931 } 932 err = ceph_mdsc_do_request(mdsc, dir, req); 933 if (!err && !req->r_reply_info.head->is_dentry) 934 err = ceph_handle_notrace_create(dir, dentry); 935 ceph_mdsc_put_request(req); 936 out: 937 if (err) 938 d_drop(dentry); 939 ceph_release_acl_sec_ctx(&as_ctx); 940 return err; 941 } 942 943 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 944 { 945 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 946 struct ceph_mds_client *mdsc = fsc->mdsc; 947 struct ceph_mds_request *req; 948 struct ceph_acl_sec_ctx as_ctx = {}; 949 int err = -EROFS; 950 int op; 951 952 if (ceph_snap(dir) == CEPH_SNAPDIR) { 953 /* mkdir .snap/foo is a MKSNAP */ 954 op = CEPH_MDS_OP_MKSNAP; 955 dout("mksnap dir %p snap '%pd' dn %p\n", dir, 956 dentry, dentry); 957 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 958 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 959 op = CEPH_MDS_OP_MKDIR; 960 } else { 961 goto out; 962 } 963 964 if (op == CEPH_MDS_OP_MKDIR && 965 ceph_quota_is_max_files_exceeded(dir)) { 966 err = -EDQUOT; 967 goto out; 968 } 969 970 mode |= S_IFDIR; 971 err = ceph_pre_init_acls(dir, &mode, &as_ctx); 972 if (err < 0) 973 goto out; 974 err = ceph_security_init_secctx(dentry, mode, &as_ctx); 975 if (err < 0) 976 goto out; 977 978 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 979 if (IS_ERR(req)) { 980 err = PTR_ERR(req); 981 goto out; 982 } 983 984 req->r_dentry = dget(dentry); 985 req->r_num_caps = 2; 986 req->r_parent = dir; 987 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 988 req->r_args.mkdir.mode = cpu_to_le32(mode); 989 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; 990 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 991 if (as_ctx.pagelist) { 992 req->r_pagelist = as_ctx.pagelist; 993 as_ctx.pagelist = NULL; 994 } 995 err = ceph_mdsc_do_request(mdsc, dir, req); 996 if (!err && 997 !req->r_reply_info.head->is_target && 998 !req->r_reply_info.head->is_dentry) 999 err = ceph_handle_notrace_create(dir, dentry); 1000 ceph_mdsc_put_request(req); 1001 out: 1002 if (!err) 1003 ceph_init_inode_acls(d_inode(dentry), &as_ctx); 1004 else 1005 d_drop(dentry); 1006 ceph_release_acl_sec_ctx(&as_ctx); 1007 return err; 1008 } 1009 1010 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 1011 struct dentry *dentry) 1012 { 1013 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 1014 struct ceph_mds_client *mdsc = fsc->mdsc; 1015 struct ceph_mds_request *req; 1016 int err; 1017 1018 if (ceph_snap(dir) != CEPH_NOSNAP) 1019 return -EROFS; 1020 1021 dout("link in dir %p old_dentry %p dentry %p\n", dir, 1022 old_dentry, dentry); 1023 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 1024 if (IS_ERR(req)) { 1025 d_drop(dentry); 1026 return PTR_ERR(req); 1027 } 1028 req->r_dentry = dget(dentry); 1029 req->r_num_caps = 2; 1030 req->r_old_dentry = dget(old_dentry); 1031 req->r_parent = dir; 1032 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 1033 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 1034 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 1035 /* release LINK_SHARED on source inode (mds will lock it) */ 1036 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 1037 err = ceph_mdsc_do_request(mdsc, dir, req); 1038 if (err) { 1039 d_drop(dentry); 1040 } else if (!req->r_reply_info.head->is_dentry) { 1041 ihold(d_inode(old_dentry)); 1042 d_instantiate(dentry, d_inode(old_dentry)); 1043 } 1044 ceph_mdsc_put_request(req); 1045 return err; 1046 } 1047 1048 static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc, 1049 struct ceph_mds_request *req) 1050 { 1051 int result = req->r_err ? req->r_err : 1052 le32_to_cpu(req->r_reply_info.head->result); 1053 1054 if (result == -EJUKEBOX) 1055 goto out; 1056 1057 /* If op failed, mark everyone involved for errors */ 1058 if (result) { 1059 int pathlen = 0; 1060 u64 base = 0; 1061 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen, 1062 &base, 0); 1063 1064 /* mark error on parent + clear complete */ 1065 mapping_set_error(req->r_parent->i_mapping, result); 1066 ceph_dir_clear_complete(req->r_parent); 1067 1068 /* drop the dentry -- we don't know its status */ 1069 if (!d_unhashed(req->r_dentry)) 1070 d_drop(req->r_dentry); 1071 1072 /* mark inode itself for an error (since metadata is bogus) */ 1073 mapping_set_error(req->r_old_inode->i_mapping, result); 1074 1075 pr_warn("ceph: async unlink failure path=(%llx)%s result=%d!\n", 1076 base, IS_ERR(path) ? "<<bad>>" : path, result); 1077 ceph_mdsc_free_path(path, pathlen); 1078 } 1079 out: 1080 iput(req->r_old_inode); 1081 ceph_mdsc_release_dir_caps(req); 1082 } 1083 1084 static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry) 1085 { 1086 struct ceph_inode_info *ci = ceph_inode(dir); 1087 struct ceph_dentry_info *di; 1088 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_UNLINK; 1089 1090 spin_lock(&ci->i_ceph_lock); 1091 if ((__ceph_caps_issued(ci, NULL) & want) == want) { 1092 ceph_take_cap_refs(ci, want, false); 1093 got = want; 1094 } 1095 spin_unlock(&ci->i_ceph_lock); 1096 1097 /* If we didn't get anything, return 0 */ 1098 if (!got) 1099 return 0; 1100 1101 spin_lock(&dentry->d_lock); 1102 di = ceph_dentry(dentry); 1103 /* 1104 * - We are holding Fx, which implies Fs caps. 1105 * - Only support async unlink for primary linkage 1106 */ 1107 if (atomic_read(&ci->i_shared_gen) != di->lease_shared_gen || 1108 !(di->flags & CEPH_DENTRY_PRIMARY_LINK)) 1109 want = 0; 1110 spin_unlock(&dentry->d_lock); 1111 1112 /* Do we still want what we've got? */ 1113 if (want == got) 1114 return got; 1115 1116 ceph_put_cap_refs(ci, got); 1117 return 0; 1118 } 1119 1120 /* 1121 * rmdir and unlink are differ only by the metadata op code 1122 */ 1123 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 1124 { 1125 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 1126 struct ceph_mds_client *mdsc = fsc->mdsc; 1127 struct inode *inode = d_inode(dentry); 1128 struct ceph_mds_request *req; 1129 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS); 1130 int err = -EROFS; 1131 int op; 1132 1133 if (ceph_snap(dir) == CEPH_SNAPDIR) { 1134 /* rmdir .snap/foo is RMSNAP */ 1135 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry); 1136 op = CEPH_MDS_OP_RMSNAP; 1137 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 1138 dout("unlink/rmdir dir %p dn %p inode %p\n", 1139 dir, dentry, inode); 1140 op = d_is_dir(dentry) ? 1141 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 1142 } else 1143 goto out; 1144 retry: 1145 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1146 if (IS_ERR(req)) { 1147 err = PTR_ERR(req); 1148 goto out; 1149 } 1150 req->r_dentry = dget(dentry); 1151 req->r_num_caps = 2; 1152 req->r_parent = dir; 1153 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 1154 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 1155 req->r_inode_drop = ceph_drop_caps_for_unlink(inode); 1156 1157 if (try_async && op == CEPH_MDS_OP_UNLINK && 1158 (req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) { 1159 dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir), 1160 dentry->d_name.len, dentry->d_name.name, 1161 ceph_cap_string(req->r_dir_caps)); 1162 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags); 1163 req->r_callback = ceph_async_unlink_cb; 1164 req->r_old_inode = d_inode(dentry); 1165 ihold(req->r_old_inode); 1166 err = ceph_mdsc_submit_request(mdsc, dir, req); 1167 if (!err) { 1168 /* 1169 * We have enough caps, so we assume that the unlink 1170 * will succeed. Fix up the target inode and dcache. 1171 */ 1172 drop_nlink(inode); 1173 d_delete(dentry); 1174 } else if (err == -EJUKEBOX) { 1175 try_async = false; 1176 ceph_mdsc_put_request(req); 1177 goto retry; 1178 } 1179 } else { 1180 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 1181 err = ceph_mdsc_do_request(mdsc, dir, req); 1182 if (!err && !req->r_reply_info.head->is_dentry) 1183 d_delete(dentry); 1184 } 1185 1186 ceph_mdsc_put_request(req); 1187 out: 1188 return err; 1189 } 1190 1191 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 1192 struct inode *new_dir, struct dentry *new_dentry, 1193 unsigned int flags) 1194 { 1195 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 1196 struct ceph_mds_client *mdsc = fsc->mdsc; 1197 struct ceph_mds_request *req; 1198 int op = CEPH_MDS_OP_RENAME; 1199 int err; 1200 1201 if (flags) 1202 return -EINVAL; 1203 1204 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 1205 return -EXDEV; 1206 if (ceph_snap(old_dir) != CEPH_NOSNAP) { 1207 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR) 1208 op = CEPH_MDS_OP_RENAMESNAP; 1209 else 1210 return -EROFS; 1211 } else if (old_dir != new_dir) { 1212 err = ceph_quota_check_rename(mdsc, d_inode(old_dentry), 1213 new_dir); 1214 if (err) 1215 return err; 1216 } 1217 1218 dout("rename dir %p dentry %p to dir %p dentry %p\n", 1219 old_dir, old_dentry, new_dir, new_dentry); 1220 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1221 if (IS_ERR(req)) 1222 return PTR_ERR(req); 1223 ihold(old_dir); 1224 req->r_dentry = dget(new_dentry); 1225 req->r_num_caps = 2; 1226 req->r_old_dentry = dget(old_dentry); 1227 req->r_old_dentry_dir = old_dir; 1228 req->r_parent = new_dir; 1229 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags); 1230 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 1231 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 1232 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 1233 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 1234 /* release LINK_RDCACHE on source inode (mds will lock it) */ 1235 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 1236 if (d_really_is_positive(new_dentry)) { 1237 req->r_inode_drop = 1238 ceph_drop_caps_for_unlink(d_inode(new_dentry)); 1239 } 1240 err = ceph_mdsc_do_request(mdsc, old_dir, req); 1241 if (!err && !req->r_reply_info.head->is_dentry) { 1242 /* 1243 * Normally d_move() is done by fill_trace (called by 1244 * do_request, above). If there is no trace, we need 1245 * to do it here. 1246 */ 1247 d_move(old_dentry, new_dentry); 1248 } 1249 ceph_mdsc_put_request(req); 1250 return err; 1251 } 1252 1253 /* 1254 * Move dentry to tail of mdsc->dentry_leases list when lease is updated. 1255 * Leases at front of the list will expire first. (Assume all leases have 1256 * similar duration) 1257 * 1258 * Called under dentry->d_lock. 1259 */ 1260 void __ceph_dentry_lease_touch(struct ceph_dentry_info *di) 1261 { 1262 struct dentry *dn = di->dentry; 1263 struct ceph_mds_client *mdsc; 1264 1265 dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn); 1266 1267 di->flags |= CEPH_DENTRY_LEASE_LIST; 1268 if (di->flags & CEPH_DENTRY_SHRINK_LIST) { 1269 di->flags |= CEPH_DENTRY_REFERENCED; 1270 return; 1271 } 1272 1273 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1274 spin_lock(&mdsc->dentry_list_lock); 1275 list_move_tail(&di->lease_list, &mdsc->dentry_leases); 1276 spin_unlock(&mdsc->dentry_list_lock); 1277 } 1278 1279 static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc, 1280 struct ceph_dentry_info *di) 1281 { 1282 di->flags &= ~(CEPH_DENTRY_LEASE_LIST | CEPH_DENTRY_REFERENCED); 1283 di->lease_gen = 0; 1284 di->time = jiffies; 1285 list_move_tail(&di->lease_list, &mdsc->dentry_dir_leases); 1286 } 1287 1288 /* 1289 * When dir lease is used, add dentry to tail of mdsc->dentry_dir_leases 1290 * list if it's not in the list, otherwise set 'referenced' flag. 1291 * 1292 * Called under dentry->d_lock. 1293 */ 1294 void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di) 1295 { 1296 struct dentry *dn = di->dentry; 1297 struct ceph_mds_client *mdsc; 1298 1299 dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n", 1300 di, dn, dn, di->offset); 1301 1302 if (!list_empty(&di->lease_list)) { 1303 if (di->flags & CEPH_DENTRY_LEASE_LIST) { 1304 /* don't remove dentry from dentry lease list 1305 * if its lease is valid */ 1306 if (__dentry_lease_is_valid(di)) 1307 return; 1308 } else { 1309 di->flags |= CEPH_DENTRY_REFERENCED; 1310 return; 1311 } 1312 } 1313 1314 if (di->flags & CEPH_DENTRY_SHRINK_LIST) { 1315 di->flags |= CEPH_DENTRY_REFERENCED; 1316 di->flags &= ~CEPH_DENTRY_LEASE_LIST; 1317 return; 1318 } 1319 1320 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1321 spin_lock(&mdsc->dentry_list_lock); 1322 __dentry_dir_lease_touch(mdsc, di), 1323 spin_unlock(&mdsc->dentry_list_lock); 1324 } 1325 1326 static void __dentry_lease_unlist(struct ceph_dentry_info *di) 1327 { 1328 struct ceph_mds_client *mdsc; 1329 if (di->flags & CEPH_DENTRY_SHRINK_LIST) 1330 return; 1331 if (list_empty(&di->lease_list)) 1332 return; 1333 1334 mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc; 1335 spin_lock(&mdsc->dentry_list_lock); 1336 list_del_init(&di->lease_list); 1337 spin_unlock(&mdsc->dentry_list_lock); 1338 } 1339 1340 enum { 1341 KEEP = 0, 1342 DELETE = 1, 1343 TOUCH = 2, 1344 STOP = 4, 1345 }; 1346 1347 struct ceph_lease_walk_control { 1348 bool dir_lease; 1349 bool expire_dir_lease; 1350 unsigned long nr_to_scan; 1351 unsigned long dir_lease_ttl; 1352 }; 1353 1354 static unsigned long 1355 __dentry_leases_walk(struct ceph_mds_client *mdsc, 1356 struct ceph_lease_walk_control *lwc, 1357 int (*check)(struct dentry*, void*)) 1358 { 1359 struct ceph_dentry_info *di, *tmp; 1360 struct dentry *dentry, *last = NULL; 1361 struct list_head* list; 1362 LIST_HEAD(dispose); 1363 unsigned long freed = 0; 1364 int ret = 0; 1365 1366 list = lwc->dir_lease ? &mdsc->dentry_dir_leases : &mdsc->dentry_leases; 1367 spin_lock(&mdsc->dentry_list_lock); 1368 list_for_each_entry_safe(di, tmp, list, lease_list) { 1369 if (!lwc->nr_to_scan) 1370 break; 1371 --lwc->nr_to_scan; 1372 1373 dentry = di->dentry; 1374 if (last == dentry) 1375 break; 1376 1377 if (!spin_trylock(&dentry->d_lock)) 1378 continue; 1379 1380 if (__lockref_is_dead(&dentry->d_lockref)) { 1381 list_del_init(&di->lease_list); 1382 goto next; 1383 } 1384 1385 ret = check(dentry, lwc); 1386 if (ret & TOUCH) { 1387 /* move it into tail of dir lease list */ 1388 __dentry_dir_lease_touch(mdsc, di); 1389 if (!last) 1390 last = dentry; 1391 } 1392 if (ret & DELETE) { 1393 /* stale lease */ 1394 di->flags &= ~CEPH_DENTRY_REFERENCED; 1395 if (dentry->d_lockref.count > 0) { 1396 /* update_dentry_lease() will re-add 1397 * it to lease list, or 1398 * ceph_d_delete() will return 1 when 1399 * last reference is dropped */ 1400 list_del_init(&di->lease_list); 1401 } else { 1402 di->flags |= CEPH_DENTRY_SHRINK_LIST; 1403 list_move_tail(&di->lease_list, &dispose); 1404 dget_dlock(dentry); 1405 } 1406 } 1407 next: 1408 spin_unlock(&dentry->d_lock); 1409 if (ret & STOP) 1410 break; 1411 } 1412 spin_unlock(&mdsc->dentry_list_lock); 1413 1414 while (!list_empty(&dispose)) { 1415 di = list_first_entry(&dispose, struct ceph_dentry_info, 1416 lease_list); 1417 dentry = di->dentry; 1418 spin_lock(&dentry->d_lock); 1419 1420 list_del_init(&di->lease_list); 1421 di->flags &= ~CEPH_DENTRY_SHRINK_LIST; 1422 if (di->flags & CEPH_DENTRY_REFERENCED) { 1423 spin_lock(&mdsc->dentry_list_lock); 1424 if (di->flags & CEPH_DENTRY_LEASE_LIST) { 1425 list_add_tail(&di->lease_list, 1426 &mdsc->dentry_leases); 1427 } else { 1428 __dentry_dir_lease_touch(mdsc, di); 1429 } 1430 spin_unlock(&mdsc->dentry_list_lock); 1431 } else { 1432 freed++; 1433 } 1434 1435 spin_unlock(&dentry->d_lock); 1436 /* ceph_d_delete() does the trick */ 1437 dput(dentry); 1438 } 1439 return freed; 1440 } 1441 1442 static int __dentry_lease_check(struct dentry *dentry, void *arg) 1443 { 1444 struct ceph_dentry_info *di = ceph_dentry(dentry); 1445 int ret; 1446 1447 if (__dentry_lease_is_valid(di)) 1448 return STOP; 1449 ret = __dir_lease_try_check(dentry); 1450 if (ret == -EBUSY) 1451 return KEEP; 1452 if (ret > 0) 1453 return TOUCH; 1454 return DELETE; 1455 } 1456 1457 static int __dir_lease_check(struct dentry *dentry, void *arg) 1458 { 1459 struct ceph_lease_walk_control *lwc = arg; 1460 struct ceph_dentry_info *di = ceph_dentry(dentry); 1461 1462 int ret = __dir_lease_try_check(dentry); 1463 if (ret == -EBUSY) 1464 return KEEP; 1465 if (ret > 0) { 1466 if (time_before(jiffies, di->time + lwc->dir_lease_ttl)) 1467 return STOP; 1468 /* Move dentry to tail of dir lease list if we don't want 1469 * to delete it. So dentries in the list are checked in a 1470 * round robin manner */ 1471 if (!lwc->expire_dir_lease) 1472 return TOUCH; 1473 if (dentry->d_lockref.count > 0 || 1474 (di->flags & CEPH_DENTRY_REFERENCED)) 1475 return TOUCH; 1476 /* invalidate dir lease */ 1477 di->lease_shared_gen = 0; 1478 } 1479 return DELETE; 1480 } 1481 1482 int ceph_trim_dentries(struct ceph_mds_client *mdsc) 1483 { 1484 struct ceph_lease_walk_control lwc; 1485 unsigned long count; 1486 unsigned long freed; 1487 1488 spin_lock(&mdsc->caps_list_lock); 1489 if (mdsc->caps_use_max > 0 && 1490 mdsc->caps_use_count > mdsc->caps_use_max) 1491 count = mdsc->caps_use_count - mdsc->caps_use_max; 1492 else 1493 count = 0; 1494 spin_unlock(&mdsc->caps_list_lock); 1495 1496 lwc.dir_lease = false; 1497 lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE * 2; 1498 freed = __dentry_leases_walk(mdsc, &lwc, __dentry_lease_check); 1499 if (!lwc.nr_to_scan) /* more invalid leases */ 1500 return -EAGAIN; 1501 1502 if (lwc.nr_to_scan < CEPH_CAPS_PER_RELEASE) 1503 lwc.nr_to_scan = CEPH_CAPS_PER_RELEASE; 1504 1505 lwc.dir_lease = true; 1506 lwc.expire_dir_lease = freed < count; 1507 lwc.dir_lease_ttl = mdsc->fsc->mount_options->caps_wanted_delay_max * HZ; 1508 freed +=__dentry_leases_walk(mdsc, &lwc, __dir_lease_check); 1509 if (!lwc.nr_to_scan) /* more to check */ 1510 return -EAGAIN; 1511 1512 return freed > 0 ? 1 : 0; 1513 } 1514 1515 /* 1516 * Ensure a dentry lease will no longer revalidate. 1517 */ 1518 void ceph_invalidate_dentry_lease(struct dentry *dentry) 1519 { 1520 struct ceph_dentry_info *di = ceph_dentry(dentry); 1521 spin_lock(&dentry->d_lock); 1522 di->time = jiffies; 1523 di->lease_shared_gen = 0; 1524 di->flags &= ~CEPH_DENTRY_PRIMARY_LINK; 1525 __dentry_lease_unlist(di); 1526 spin_unlock(&dentry->d_lock); 1527 } 1528 1529 /* 1530 * Check if dentry lease is valid. If not, delete the lease. Try to 1531 * renew if the least is more than half up. 1532 */ 1533 static bool __dentry_lease_is_valid(struct ceph_dentry_info *di) 1534 { 1535 struct ceph_mds_session *session; 1536 1537 if (!di->lease_gen) 1538 return false; 1539 1540 session = di->lease_session; 1541 if (session) { 1542 u32 gen; 1543 unsigned long ttl; 1544 1545 spin_lock(&session->s_gen_ttl_lock); 1546 gen = session->s_cap_gen; 1547 ttl = session->s_cap_ttl; 1548 spin_unlock(&session->s_gen_ttl_lock); 1549 1550 if (di->lease_gen == gen && 1551 time_before(jiffies, ttl) && 1552 time_before(jiffies, di->time)) 1553 return true; 1554 } 1555 di->lease_gen = 0; 1556 return false; 1557 } 1558 1559 static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags) 1560 { 1561 struct ceph_dentry_info *di; 1562 struct ceph_mds_session *session = NULL; 1563 u32 seq = 0; 1564 int valid = 0; 1565 1566 spin_lock(&dentry->d_lock); 1567 di = ceph_dentry(dentry); 1568 if (di && __dentry_lease_is_valid(di)) { 1569 valid = 1; 1570 1571 if (di->lease_renew_after && 1572 time_after(jiffies, di->lease_renew_after)) { 1573 /* 1574 * We should renew. If we're in RCU walk mode 1575 * though, we can't do that so just return 1576 * -ECHILD. 1577 */ 1578 if (flags & LOOKUP_RCU) { 1579 valid = -ECHILD; 1580 } else { 1581 session = ceph_get_mds_session(di->lease_session); 1582 seq = di->lease_seq; 1583 di->lease_renew_after = 0; 1584 di->lease_renew_from = jiffies; 1585 } 1586 } 1587 } 1588 spin_unlock(&dentry->d_lock); 1589 1590 if (session) { 1591 ceph_mdsc_lease_send_msg(session, dentry, 1592 CEPH_MDS_LEASE_RENEW, seq); 1593 ceph_put_mds_session(session); 1594 } 1595 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 1596 return valid; 1597 } 1598 1599 /* 1600 * Called under dentry->d_lock. 1601 */ 1602 static int __dir_lease_try_check(const struct dentry *dentry) 1603 { 1604 struct ceph_dentry_info *di = ceph_dentry(dentry); 1605 struct inode *dir; 1606 struct ceph_inode_info *ci; 1607 int valid = 0; 1608 1609 if (!di->lease_shared_gen) 1610 return 0; 1611 if (IS_ROOT(dentry)) 1612 return 0; 1613 1614 dir = d_inode(dentry->d_parent); 1615 ci = ceph_inode(dir); 1616 1617 if (spin_trylock(&ci->i_ceph_lock)) { 1618 if (atomic_read(&ci->i_shared_gen) == di->lease_shared_gen && 1619 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 0)) 1620 valid = 1; 1621 spin_unlock(&ci->i_ceph_lock); 1622 } else { 1623 valid = -EBUSY; 1624 } 1625 1626 if (!valid) 1627 di->lease_shared_gen = 0; 1628 return valid; 1629 } 1630 1631 /* 1632 * Check if directory-wide content lease/cap is valid. 1633 */ 1634 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry, 1635 struct ceph_mds_client *mdsc) 1636 { 1637 struct ceph_inode_info *ci = ceph_inode(dir); 1638 int valid; 1639 int shared_gen; 1640 1641 spin_lock(&ci->i_ceph_lock); 1642 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 1643 if (valid) { 1644 __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD); 1645 shared_gen = atomic_read(&ci->i_shared_gen); 1646 } 1647 spin_unlock(&ci->i_ceph_lock); 1648 if (valid) { 1649 struct ceph_dentry_info *di; 1650 spin_lock(&dentry->d_lock); 1651 di = ceph_dentry(dentry); 1652 if (dir == d_inode(dentry->d_parent) && 1653 di && di->lease_shared_gen == shared_gen) 1654 __ceph_dentry_dir_lease_touch(di); 1655 else 1656 valid = 0; 1657 spin_unlock(&dentry->d_lock); 1658 } 1659 dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n", 1660 dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid); 1661 return valid; 1662 } 1663 1664 /* 1665 * Check if cached dentry can be trusted. 1666 */ 1667 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1668 { 1669 int valid = 0; 1670 struct dentry *parent; 1671 struct inode *dir, *inode; 1672 struct ceph_mds_client *mdsc; 1673 1674 if (flags & LOOKUP_RCU) { 1675 parent = READ_ONCE(dentry->d_parent); 1676 dir = d_inode_rcu(parent); 1677 if (!dir) 1678 return -ECHILD; 1679 inode = d_inode_rcu(dentry); 1680 } else { 1681 parent = dget_parent(dentry); 1682 dir = d_inode(parent); 1683 inode = d_inode(dentry); 1684 } 1685 1686 dout("d_revalidate %p '%pd' inode %p offset 0x%llx\n", dentry, 1687 dentry, inode, ceph_dentry(dentry)->offset); 1688 1689 mdsc = ceph_sb_to_client(dir->i_sb)->mdsc; 1690 1691 /* always trust cached snapped dentries, snapdir dentry */ 1692 if (ceph_snap(dir) != CEPH_NOSNAP) { 1693 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, 1694 dentry, inode); 1695 valid = 1; 1696 } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 1697 valid = 1; 1698 } else { 1699 valid = dentry_lease_is_valid(dentry, flags); 1700 if (valid == -ECHILD) 1701 return valid; 1702 if (valid || dir_lease_is_valid(dir, dentry, mdsc)) { 1703 if (inode) 1704 valid = ceph_is_any_caps(inode); 1705 else 1706 valid = 1; 1707 } 1708 } 1709 1710 if (!valid) { 1711 struct ceph_mds_request *req; 1712 int op, err; 1713 u32 mask; 1714 1715 if (flags & LOOKUP_RCU) 1716 return -ECHILD; 1717 1718 percpu_counter_inc(&mdsc->metric.d_lease_mis); 1719 1720 op = ceph_snap(dir) == CEPH_SNAPDIR ? 1721 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 1722 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 1723 if (!IS_ERR(req)) { 1724 req->r_dentry = dget(dentry); 1725 req->r_num_caps = 2; 1726 req->r_parent = dir; 1727 1728 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED; 1729 if (ceph_security_xattr_wanted(dir)) 1730 mask |= CEPH_CAP_XATTR_SHARED; 1731 req->r_args.getattr.mask = cpu_to_le32(mask); 1732 1733 err = ceph_mdsc_do_request(mdsc, NULL, req); 1734 switch (err) { 1735 case 0: 1736 if (d_really_is_positive(dentry) && 1737 d_inode(dentry) == req->r_target_inode) 1738 valid = 1; 1739 break; 1740 case -ENOENT: 1741 if (d_really_is_negative(dentry)) 1742 valid = 1; 1743 fallthrough; 1744 default: 1745 break; 1746 } 1747 ceph_mdsc_put_request(req); 1748 dout("d_revalidate %p lookup result=%d\n", 1749 dentry, err); 1750 } 1751 } else { 1752 percpu_counter_inc(&mdsc->metric.d_lease_hit); 1753 } 1754 1755 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1756 if (!valid) 1757 ceph_dir_clear_complete(dir); 1758 1759 if (!(flags & LOOKUP_RCU)) 1760 dput(parent); 1761 return valid; 1762 } 1763 1764 /* 1765 * Delete unused dentry that doesn't have valid lease 1766 * 1767 * Called under dentry->d_lock. 1768 */ 1769 static int ceph_d_delete(const struct dentry *dentry) 1770 { 1771 struct ceph_dentry_info *di; 1772 1773 /* won't release caps */ 1774 if (d_really_is_negative(dentry)) 1775 return 0; 1776 if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) 1777 return 0; 1778 /* vaild lease? */ 1779 di = ceph_dentry(dentry); 1780 if (di) { 1781 if (__dentry_lease_is_valid(di)) 1782 return 0; 1783 if (__dir_lease_try_check(dentry)) 1784 return 0; 1785 } 1786 return 1; 1787 } 1788 1789 /* 1790 * Release our ceph_dentry_info. 1791 */ 1792 static void ceph_d_release(struct dentry *dentry) 1793 { 1794 struct ceph_dentry_info *di = ceph_dentry(dentry); 1795 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 1796 1797 dout("d_release %p\n", dentry); 1798 1799 atomic64_dec(&fsc->mdsc->metric.total_dentries); 1800 1801 spin_lock(&dentry->d_lock); 1802 __dentry_lease_unlist(di); 1803 dentry->d_fsdata = NULL; 1804 spin_unlock(&dentry->d_lock); 1805 1806 if (di->lease_session) 1807 ceph_put_mds_session(di->lease_session); 1808 kmem_cache_free(ceph_dentry_cachep, di); 1809 } 1810 1811 /* 1812 * When the VFS prunes a dentry from the cache, we need to clear the 1813 * complete flag on the parent directory. 1814 * 1815 * Called under dentry->d_lock. 1816 */ 1817 static void ceph_d_prune(struct dentry *dentry) 1818 { 1819 struct ceph_inode_info *dir_ci; 1820 struct ceph_dentry_info *di; 1821 1822 dout("ceph_d_prune %pd %p\n", dentry, dentry); 1823 1824 /* do we have a valid parent? */ 1825 if (IS_ROOT(dentry)) 1826 return; 1827 1828 /* we hold d_lock, so d_parent is stable */ 1829 dir_ci = ceph_inode(d_inode(dentry->d_parent)); 1830 if (dir_ci->i_vino.snap == CEPH_SNAPDIR) 1831 return; 1832 1833 /* who calls d_delete() should also disable dcache readdir */ 1834 if (d_really_is_negative(dentry)) 1835 return; 1836 1837 /* d_fsdata does not get cleared until d_release */ 1838 if (!d_unhashed(dentry)) { 1839 __ceph_dir_clear_complete(dir_ci); 1840 return; 1841 } 1842 1843 /* Disable dcache readdir just in case that someone called d_drop() 1844 * or d_invalidate(), but MDS didn't revoke CEPH_CAP_FILE_SHARED 1845 * properly (dcache readdir is still enabled) */ 1846 di = ceph_dentry(dentry); 1847 if (di->offset > 0 && 1848 di->lease_shared_gen == atomic_read(&dir_ci->i_shared_gen)) 1849 __ceph_dir_clear_ordered(dir_ci); 1850 } 1851 1852 /* 1853 * read() on a dir. This weird interface hack only works if mounted 1854 * with '-o dirstat'. 1855 */ 1856 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1857 loff_t *ppos) 1858 { 1859 struct ceph_dir_file_info *dfi = file->private_data; 1860 struct inode *inode = file_inode(file); 1861 struct ceph_inode_info *ci = ceph_inode(inode); 1862 int left; 1863 const int bufsize = 1024; 1864 1865 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1866 return -EISDIR; 1867 1868 if (!dfi->dir_info) { 1869 dfi->dir_info = kmalloc(bufsize, GFP_KERNEL); 1870 if (!dfi->dir_info) 1871 return -ENOMEM; 1872 dfi->dir_info_len = 1873 snprintf(dfi->dir_info, bufsize, 1874 "entries: %20lld\n" 1875 " files: %20lld\n" 1876 " subdirs: %20lld\n" 1877 "rentries: %20lld\n" 1878 " rfiles: %20lld\n" 1879 " rsubdirs: %20lld\n" 1880 "rbytes: %20lld\n" 1881 "rctime: %10lld.%09ld\n", 1882 ci->i_files + ci->i_subdirs, 1883 ci->i_files, 1884 ci->i_subdirs, 1885 ci->i_rfiles + ci->i_rsubdirs, 1886 ci->i_rfiles, 1887 ci->i_rsubdirs, 1888 ci->i_rbytes, 1889 ci->i_rctime.tv_sec, 1890 ci->i_rctime.tv_nsec); 1891 } 1892 1893 if (*ppos >= dfi->dir_info_len) 1894 return 0; 1895 size = min_t(unsigned, size, dfi->dir_info_len-*ppos); 1896 left = copy_to_user(buf, dfi->dir_info + *ppos, size); 1897 if (left == size) 1898 return -EFAULT; 1899 *ppos += (size - left); 1900 return size - left; 1901 } 1902 1903 1904 1905 /* 1906 * Return name hash for a given dentry. This is dependent on 1907 * the parent directory's hash function. 1908 */ 1909 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1910 { 1911 struct ceph_inode_info *dci = ceph_inode(dir); 1912 unsigned hash; 1913 1914 switch (dci->i_dir_layout.dl_dir_hash) { 1915 case 0: /* for backward compat */ 1916 case CEPH_STR_HASH_LINUX: 1917 return dn->d_name.hash; 1918 1919 default: 1920 spin_lock(&dn->d_lock); 1921 hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1922 dn->d_name.name, dn->d_name.len); 1923 spin_unlock(&dn->d_lock); 1924 return hash; 1925 } 1926 } 1927 1928 const struct file_operations ceph_dir_fops = { 1929 .read = ceph_read_dir, 1930 .iterate = ceph_readdir, 1931 .llseek = ceph_dir_llseek, 1932 .open = ceph_open, 1933 .release = ceph_release, 1934 .unlocked_ioctl = ceph_ioctl, 1935 .compat_ioctl = compat_ptr_ioctl, 1936 .fsync = ceph_fsync, 1937 .lock = ceph_lock, 1938 .flock = ceph_flock, 1939 }; 1940 1941 const struct file_operations ceph_snapdir_fops = { 1942 .iterate = ceph_readdir, 1943 .llseek = ceph_dir_llseek, 1944 .open = ceph_open, 1945 .release = ceph_release, 1946 }; 1947 1948 const struct inode_operations ceph_dir_iops = { 1949 .lookup = ceph_lookup, 1950 .permission = ceph_permission, 1951 .getattr = ceph_getattr, 1952 .setattr = ceph_setattr, 1953 .listxattr = ceph_listxattr, 1954 .get_acl = ceph_get_acl, 1955 .set_acl = ceph_set_acl, 1956 .mknod = ceph_mknod, 1957 .symlink = ceph_symlink, 1958 .mkdir = ceph_mkdir, 1959 .link = ceph_link, 1960 .unlink = ceph_unlink, 1961 .rmdir = ceph_unlink, 1962 .rename = ceph_rename, 1963 .create = ceph_create, 1964 .atomic_open = ceph_atomic_open, 1965 }; 1966 1967 const struct inode_operations ceph_snapdir_iops = { 1968 .lookup = ceph_lookup, 1969 .permission = ceph_permission, 1970 .getattr = ceph_getattr, 1971 .mkdir = ceph_mkdir, 1972 .rmdir = ceph_unlink, 1973 .rename = ceph_rename, 1974 }; 1975 1976 const struct dentry_operations ceph_dentry_ops = { 1977 .d_revalidate = ceph_d_revalidate, 1978 .d_delete = ceph_d_delete, 1979 .d_release = ceph_d_release, 1980 .d_prune = ceph_d_prune, 1981 .d_init = ceph_d_init, 1982 }; 1983