1 #include "ceph_debug.h" 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/sched.h> 7 8 #include "super.h" 9 10 /* 11 * Directory operations: readdir, lookup, create, link, unlink, 12 * rename, etc. 13 */ 14 15 /* 16 * Ceph MDS operations are specified in terms of a base ino and 17 * relative path. Thus, the client can specify an operation on a 18 * specific inode (e.g., a getattr due to fstat(2)), or as a path 19 * relative to, say, the root directory. 20 * 21 * Normally, we limit ourselves to strict inode ops (no path component) 22 * or dentry operations (a single path component relative to an ino). The 23 * exception to this is open_root_dentry(), which will open the mount 24 * point by name. 25 */ 26 27 const struct inode_operations ceph_dir_iops; 28 const struct file_operations ceph_dir_fops; 29 struct dentry_operations ceph_dentry_ops; 30 31 /* 32 * Initialize ceph dentry state. 33 */ 34 int ceph_init_dentry(struct dentry *dentry) 35 { 36 struct ceph_dentry_info *di; 37 38 if (dentry->d_fsdata) 39 return 0; 40 41 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 42 dentry->d_op = &ceph_dentry_ops; 43 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 44 dentry->d_op = &ceph_snapdir_dentry_ops; 45 else 46 dentry->d_op = &ceph_snap_dentry_ops; 47 48 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS); 49 if (!di) 50 return -ENOMEM; /* oh well */ 51 52 spin_lock(&dentry->d_lock); 53 if (dentry->d_fsdata) /* lost a race */ 54 goto out_unlock; 55 di->dentry = dentry; 56 di->lease_session = NULL; 57 dentry->d_fsdata = di; 58 dentry->d_time = jiffies; 59 ceph_dentry_lru_add(dentry); 60 out_unlock: 61 spin_unlock(&dentry->d_lock); 62 return 0; 63 } 64 65 66 67 /* 68 * for readdir, we encode the directory frag and offset within that 69 * frag into f_pos. 70 */ 71 static unsigned fpos_frag(loff_t p) 72 { 73 return p >> 32; 74 } 75 static unsigned fpos_off(loff_t p) 76 { 77 return p & 0xffffffff; 78 } 79 80 /* 81 * When possible, we try to satisfy a readdir by peeking at the 82 * dcache. We make this work by carefully ordering dentries on 83 * d_u.d_child when we initially get results back from the MDS, and 84 * falling back to a "normal" sync readdir if any dentries in the dir 85 * are dropped. 86 * 87 * I_COMPLETE tells indicates we have all dentries in the dir. It is 88 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 89 * the MDS if/when the directory is modified). 90 */ 91 static int __dcache_readdir(struct file *filp, 92 void *dirent, filldir_t filldir) 93 { 94 struct inode *inode = filp->f_dentry->d_inode; 95 struct ceph_file_info *fi = filp->private_data; 96 struct dentry *parent = filp->f_dentry; 97 struct inode *dir = parent->d_inode; 98 struct list_head *p; 99 struct dentry *dentry, *last; 100 struct ceph_dentry_info *di; 101 int err = 0; 102 103 /* claim ref on last dentry we returned */ 104 last = fi->dentry; 105 fi->dentry = NULL; 106 107 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, 108 last); 109 110 spin_lock(&dcache_lock); 111 112 /* start at beginning? */ 113 if (filp->f_pos == 2 || (last && 114 filp->f_pos < ceph_dentry(last)->offset)) { 115 if (list_empty(&parent->d_subdirs)) 116 goto out_unlock; 117 p = parent->d_subdirs.prev; 118 dout(" initial p %p/%p\n", p->prev, p->next); 119 } else { 120 p = last->d_u.d_child.prev; 121 } 122 123 more: 124 dentry = list_entry(p, struct dentry, d_u.d_child); 125 di = ceph_dentry(dentry); 126 while (1) { 127 dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next, 128 parent->d_subdirs.prev, parent->d_subdirs.next); 129 if (p == &parent->d_subdirs) { 130 fi->at_end = 1; 131 goto out_unlock; 132 } 133 if (!d_unhashed(dentry) && dentry->d_inode && 134 filp->f_pos <= di->offset) 135 break; 136 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 137 dentry->d_name.len, dentry->d_name.name, di->offset, 138 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", 139 !dentry->d_inode ? " null" : ""); 140 p = p->prev; 141 dentry = list_entry(p, struct dentry, d_u.d_child); 142 di = ceph_dentry(dentry); 143 } 144 145 atomic_inc(&dentry->d_count); 146 spin_unlock(&dcache_lock); 147 spin_unlock(&inode->i_lock); 148 149 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 150 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 151 filp->f_pos = di->offset; 152 err = filldir(dirent, dentry->d_name.name, 153 dentry->d_name.len, di->offset, 154 dentry->d_inode->i_ino, 155 dentry->d_inode->i_mode >> 12); 156 157 if (last) { 158 if (err < 0) { 159 /* remember our position */ 160 fi->dentry = last; 161 fi->next_offset = di->offset; 162 } else { 163 dput(last); 164 } 165 last = NULL; 166 } 167 168 spin_lock(&inode->i_lock); 169 spin_lock(&dcache_lock); 170 171 if (err < 0) 172 goto out_unlock; 173 174 last = dentry; 175 176 p = p->prev; 177 filp->f_pos++; 178 179 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ 180 if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE)) 181 goto more; 182 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); 183 err = -EAGAIN; 184 185 out_unlock: 186 spin_unlock(&dcache_lock); 187 188 if (last) { 189 spin_unlock(&inode->i_lock); 190 dput(last); 191 spin_lock(&inode->i_lock); 192 } 193 194 return err; 195 } 196 197 /* 198 * make note of the last dentry we read, so we can 199 * continue at the same lexicographical point, 200 * regardless of what dir changes take place on the 201 * server. 202 */ 203 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 204 int len) 205 { 206 kfree(fi->last_name); 207 fi->last_name = kmalloc(len+1, GFP_NOFS); 208 if (!fi->last_name) 209 return -ENOMEM; 210 memcpy(fi->last_name, name, len); 211 fi->last_name[len] = 0; 212 dout("note_last_dentry '%s'\n", fi->last_name); 213 return 0; 214 } 215 216 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) 217 { 218 struct ceph_file_info *fi = filp->private_data; 219 struct inode *inode = filp->f_dentry->d_inode; 220 struct ceph_inode_info *ci = ceph_inode(inode); 221 struct ceph_client *client = ceph_inode_to_client(inode); 222 struct ceph_mds_client *mdsc = &client->mdsc; 223 unsigned frag = fpos_frag(filp->f_pos); 224 int off = fpos_off(filp->f_pos); 225 int err; 226 u32 ftype; 227 struct ceph_mds_reply_info_parsed *rinfo; 228 const int max_entries = client->mount_args->max_readdir; 229 230 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 231 if (fi->at_end) 232 return 0; 233 234 /* always start with . and .. */ 235 if (filp->f_pos == 0) { 236 /* note dir version at start of readdir so we can tell 237 * if any dentries get dropped */ 238 fi->dir_release_count = ci->i_release_count; 239 240 dout("readdir off 0 -> '.'\n"); 241 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), 242 inode->i_ino, inode->i_mode >> 12) < 0) 243 return 0; 244 filp->f_pos = 1; 245 off = 1; 246 } 247 if (filp->f_pos == 1) { 248 dout("readdir off 1 -> '..'\n"); 249 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 250 filp->f_dentry->d_parent->d_inode->i_ino, 251 inode->i_mode >> 12) < 0) 252 return 0; 253 filp->f_pos = 2; 254 off = 2; 255 } 256 257 /* can we use the dcache? */ 258 spin_lock(&inode->i_lock); 259 if ((filp->f_pos == 2 || fi->dentry) && 260 !ceph_test_opt(client, NOASYNCREADDIR) && 261 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 262 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 263 err = __dcache_readdir(filp, dirent, filldir); 264 if (err != -EAGAIN) { 265 spin_unlock(&inode->i_lock); 266 return err; 267 } 268 } 269 spin_unlock(&inode->i_lock); 270 if (fi->dentry) { 271 err = note_last_dentry(fi, fi->dentry->d_name.name, 272 fi->dentry->d_name.len); 273 if (err) 274 return err; 275 dput(fi->dentry); 276 fi->dentry = NULL; 277 } 278 279 /* proceed with a normal readdir */ 280 281 more: 282 /* do we have the correct frag content buffered? */ 283 if (fi->frag != frag || fi->last_readdir == NULL) { 284 struct ceph_mds_request *req; 285 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 286 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 287 288 /* discard old result, if any */ 289 if (fi->last_readdir) 290 ceph_mdsc_put_request(fi->last_readdir); 291 292 /* requery frag tree, as the frag topology may have changed */ 293 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); 294 295 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 296 ceph_vinop(inode), frag, fi->last_name); 297 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 298 if (IS_ERR(req)) 299 return PTR_ERR(req); 300 req->r_inode = igrab(inode); 301 req->r_dentry = dget(filp->f_dentry); 302 /* hints to request -> mds selection code */ 303 req->r_direct_mode = USE_AUTH_MDS; 304 req->r_direct_hash = ceph_frag_value(frag); 305 req->r_direct_is_hash = true; 306 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 307 req->r_readdir_offset = fi->next_offset; 308 req->r_args.readdir.frag = cpu_to_le32(frag); 309 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 310 req->r_num_caps = max_entries; 311 err = ceph_mdsc_do_request(mdsc, NULL, req); 312 if (err < 0) { 313 ceph_mdsc_put_request(req); 314 return err; 315 } 316 dout("readdir got and parsed readdir result=%d" 317 " on frag %x, end=%d, complete=%d\n", err, frag, 318 (int)req->r_reply_info.dir_end, 319 (int)req->r_reply_info.dir_complete); 320 321 if (!req->r_did_prepopulate) { 322 dout("readdir !did_prepopulate"); 323 fi->dir_release_count--; /* preclude I_COMPLETE */ 324 } 325 326 /* note next offset and last dentry name */ 327 fi->offset = fi->next_offset; 328 fi->last_readdir = req; 329 330 if (req->r_reply_info.dir_end) { 331 kfree(fi->last_name); 332 fi->last_name = NULL; 333 fi->next_offset = 0; 334 } else { 335 rinfo = &req->r_reply_info; 336 err = note_last_dentry(fi, 337 rinfo->dir_dname[rinfo->dir_nr-1], 338 rinfo->dir_dname_len[rinfo->dir_nr-1]); 339 if (err) 340 return err; 341 fi->next_offset += rinfo->dir_nr; 342 } 343 } 344 345 rinfo = &fi->last_readdir->r_reply_info; 346 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 347 rinfo->dir_nr, off, fi->offset); 348 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { 349 u64 pos = ceph_make_fpos(frag, off); 350 struct ceph_mds_reply_inode *in = 351 rinfo->dir_in[off - fi->offset].in; 352 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 353 off, off - fi->offset, rinfo->dir_nr, pos, 354 rinfo->dir_dname_len[off - fi->offset], 355 rinfo->dir_dname[off - fi->offset], in); 356 BUG_ON(!in); 357 ftype = le32_to_cpu(in->mode) >> 12; 358 if (filldir(dirent, 359 rinfo->dir_dname[off - fi->offset], 360 rinfo->dir_dname_len[off - fi->offset], 361 pos, 362 le64_to_cpu(in->ino), 363 ftype) < 0) { 364 dout("filldir stopping us...\n"); 365 return 0; 366 } 367 off++; 368 filp->f_pos = pos + 1; 369 } 370 371 if (fi->last_name) { 372 ceph_mdsc_put_request(fi->last_readdir); 373 fi->last_readdir = NULL; 374 goto more; 375 } 376 377 /* more frags? */ 378 if (!ceph_frag_is_rightmost(frag)) { 379 frag = ceph_frag_next(frag); 380 off = 0; 381 filp->f_pos = ceph_make_fpos(frag, off); 382 dout("readdir next frag is %x\n", frag); 383 goto more; 384 } 385 fi->at_end = 1; 386 387 /* 388 * if dir_release_count still matches the dir, no dentries 389 * were released during the whole readdir, and we should have 390 * the complete dir contents in our cache. 391 */ 392 spin_lock(&inode->i_lock); 393 if (ci->i_release_count == fi->dir_release_count) { 394 dout(" marking %p complete\n", inode); 395 ci->i_ceph_flags |= CEPH_I_COMPLETE; 396 ci->i_max_offset = filp->f_pos; 397 } 398 spin_unlock(&inode->i_lock); 399 400 dout("readdir %p filp %p done.\n", inode, filp); 401 return 0; 402 } 403 404 static void reset_readdir(struct ceph_file_info *fi) 405 { 406 if (fi->last_readdir) { 407 ceph_mdsc_put_request(fi->last_readdir); 408 fi->last_readdir = NULL; 409 } 410 kfree(fi->last_name); 411 fi->next_offset = 2; /* compensate for . and .. */ 412 if (fi->dentry) { 413 dput(fi->dentry); 414 fi->dentry = NULL; 415 } 416 fi->at_end = 0; 417 } 418 419 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) 420 { 421 struct ceph_file_info *fi = file->private_data; 422 struct inode *inode = file->f_mapping->host; 423 loff_t old_offset = offset; 424 loff_t retval; 425 426 mutex_lock(&inode->i_mutex); 427 switch (origin) { 428 case SEEK_END: 429 offset += inode->i_size + 2; /* FIXME */ 430 break; 431 case SEEK_CUR: 432 offset += file->f_pos; 433 } 434 retval = -EINVAL; 435 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 436 if (offset != file->f_pos) { 437 file->f_pos = offset; 438 file->f_version = 0; 439 fi->at_end = 0; 440 } 441 retval = offset; 442 443 /* 444 * discard buffered readdir content on seekdir(0), or 445 * seek to new frag, or seek prior to current chunk. 446 */ 447 if (offset == 0 || 448 fpos_frag(offset) != fpos_frag(old_offset) || 449 fpos_off(offset) < fi->offset) { 450 dout("dir_llseek dropping %p content\n", file); 451 reset_readdir(fi); 452 } 453 454 /* bump dir_release_count if we did a forward seek */ 455 if (offset > old_offset) 456 fi->dir_release_count--; 457 } 458 mutex_unlock(&inode->i_mutex); 459 return retval; 460 } 461 462 /* 463 * Process result of a lookup/open request. 464 * 465 * Mainly, make sure we return the final req->r_dentry (if it already 466 * existed) in place of the original VFS-provided dentry when they 467 * differ. 468 * 469 * Gracefully handle the case where the MDS replies with -ENOENT and 470 * no trace (which it may do, at its discretion, e.g., if it doesn't 471 * care to issue a lease on the negative dentry). 472 */ 473 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 474 struct dentry *dentry, int err) 475 { 476 struct ceph_client *client = ceph_client(dentry->d_sb); 477 struct inode *parent = dentry->d_parent->d_inode; 478 479 /* .snap dir? */ 480 if (err == -ENOENT && 481 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */ 482 strcmp(dentry->d_name.name, 483 client->mount_args->snapdir_name) == 0) { 484 struct inode *inode = ceph_get_snapdir(parent); 485 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 486 dentry, dentry->d_name.len, dentry->d_name.name, inode); 487 d_add(dentry, inode); 488 err = 0; 489 } 490 491 if (err == -ENOENT) { 492 /* no trace? */ 493 err = 0; 494 if (!req->r_reply_info.head->is_dentry) { 495 dout("ENOENT and no trace, dentry %p inode %p\n", 496 dentry, dentry->d_inode); 497 if (dentry->d_inode) { 498 d_drop(dentry); 499 err = -ENOENT; 500 } else { 501 d_add(dentry, NULL); 502 } 503 } 504 } 505 if (err) 506 dentry = ERR_PTR(err); 507 else if (dentry != req->r_dentry) 508 dentry = dget(req->r_dentry); /* we got spliced */ 509 else 510 dentry = NULL; 511 return dentry; 512 } 513 514 /* 515 * Look up a single dir entry. If there is a lookup intent, inform 516 * the MDS so that it gets our 'caps wanted' value in a single op. 517 */ 518 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 519 struct nameidata *nd) 520 { 521 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 522 struct ceph_mds_client *mdsc = &client->mdsc; 523 struct ceph_mds_request *req; 524 int op; 525 int err; 526 527 dout("lookup %p dentry %p '%.*s'\n", 528 dir, dentry, dentry->d_name.len, dentry->d_name.name); 529 530 if (dentry->d_name.len > NAME_MAX) 531 return ERR_PTR(-ENAMETOOLONG); 532 533 err = ceph_init_dentry(dentry); 534 if (err < 0) 535 return ERR_PTR(err); 536 537 /* open (but not create!) intent? */ 538 if (nd && 539 (nd->flags & LOOKUP_OPEN) && 540 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ 541 !(nd->intent.open.flags & O_CREAT)) { 542 int mode = nd->intent.open.create_mode & ~current->fs->umask; 543 return ceph_lookup_open(dir, dentry, nd, mode, 1); 544 } 545 546 /* can we conclude ENOENT locally? */ 547 if (dentry->d_inode == NULL) { 548 struct ceph_inode_info *ci = ceph_inode(dir); 549 struct ceph_dentry_info *di = ceph_dentry(dentry); 550 551 spin_lock(&dir->i_lock); 552 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 553 if (strncmp(dentry->d_name.name, 554 client->mount_args->snapdir_name, 555 dentry->d_name.len) && 556 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 557 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 558 di->offset = ci->i_max_offset++; 559 spin_unlock(&dir->i_lock); 560 dout(" dir %p complete, -ENOENT\n", dir); 561 d_add(dentry, NULL); 562 di->lease_shared_gen = ci->i_shared_gen; 563 return NULL; 564 } 565 spin_unlock(&dir->i_lock); 566 } 567 568 op = ceph_snap(dir) == CEPH_SNAPDIR ? 569 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 570 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 571 if (IS_ERR(req)) 572 return ERR_PTR(PTR_ERR(req)); 573 req->r_dentry = dget(dentry); 574 req->r_num_caps = 2; 575 /* we only need inode linkage */ 576 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 577 req->r_locked_dir = dir; 578 err = ceph_mdsc_do_request(mdsc, NULL, req); 579 dentry = ceph_finish_lookup(req, dentry, err); 580 ceph_mdsc_put_request(req); /* will dput(dentry) */ 581 dout("lookup result=%p\n", dentry); 582 return dentry; 583 } 584 585 /* 586 * If we do a create but get no trace back from the MDS, follow up with 587 * a lookup (the VFS expects us to link up the provided dentry). 588 */ 589 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 590 { 591 struct dentry *result = ceph_lookup(dir, dentry, NULL); 592 593 if (result && !IS_ERR(result)) { 594 /* 595 * We created the item, then did a lookup, and found 596 * it was already linked to another inode we already 597 * had in our cache (and thus got spliced). Link our 598 * dentry to that inode, but don't hash it, just in 599 * case the VFS wants to dereference it. 600 */ 601 BUG_ON(!result->d_inode); 602 d_instantiate(dentry, result->d_inode); 603 return 0; 604 } 605 return PTR_ERR(result); 606 } 607 608 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 609 int mode, dev_t rdev) 610 { 611 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 612 struct ceph_mds_client *mdsc = &client->mdsc; 613 struct ceph_mds_request *req; 614 int err; 615 616 if (ceph_snap(dir) != CEPH_NOSNAP) 617 return -EROFS; 618 619 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n", 620 dir, dentry, mode, rdev); 621 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 622 if (IS_ERR(req)) { 623 d_drop(dentry); 624 return PTR_ERR(req); 625 } 626 req->r_dentry = dget(dentry); 627 req->r_num_caps = 2; 628 req->r_locked_dir = dir; 629 req->r_args.mknod.mode = cpu_to_le32(mode); 630 req->r_args.mknod.rdev = cpu_to_le32(rdev); 631 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 632 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 633 err = ceph_mdsc_do_request(mdsc, dir, req); 634 if (!err && !req->r_reply_info.head->is_dentry) 635 err = ceph_handle_notrace_create(dir, dentry); 636 ceph_mdsc_put_request(req); 637 if (err) 638 d_drop(dentry); 639 return err; 640 } 641 642 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode, 643 struct nameidata *nd) 644 { 645 dout("create in dir %p dentry %p name '%.*s'\n", 646 dir, dentry, dentry->d_name.len, dentry->d_name.name); 647 648 if (ceph_snap(dir) != CEPH_NOSNAP) 649 return -EROFS; 650 651 if (nd) { 652 BUG_ON((nd->flags & LOOKUP_OPEN) == 0); 653 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); 654 /* hrm, what should i do here if we get aliased? */ 655 if (IS_ERR(dentry)) 656 return PTR_ERR(dentry); 657 return 0; 658 } 659 660 /* fall back to mknod */ 661 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); 662 } 663 664 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 665 const char *dest) 666 { 667 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 668 struct ceph_mds_client *mdsc = &client->mdsc; 669 struct ceph_mds_request *req; 670 int err; 671 672 if (ceph_snap(dir) != CEPH_NOSNAP) 673 return -EROFS; 674 675 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 676 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 677 if (IS_ERR(req)) { 678 d_drop(dentry); 679 return PTR_ERR(req); 680 } 681 req->r_dentry = dget(dentry); 682 req->r_num_caps = 2; 683 req->r_path2 = kstrdup(dest, GFP_NOFS); 684 req->r_locked_dir = dir; 685 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 686 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 687 err = ceph_mdsc_do_request(mdsc, dir, req); 688 if (!err && !req->r_reply_info.head->is_dentry) 689 err = ceph_handle_notrace_create(dir, dentry); 690 ceph_mdsc_put_request(req); 691 if (err) 692 d_drop(dentry); 693 return err; 694 } 695 696 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) 697 { 698 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 699 struct ceph_mds_client *mdsc = &client->mdsc; 700 struct ceph_mds_request *req; 701 int err = -EROFS; 702 int op; 703 704 if (ceph_snap(dir) == CEPH_SNAPDIR) { 705 /* mkdir .snap/foo is a MKSNAP */ 706 op = CEPH_MDS_OP_MKSNAP; 707 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 708 dentry->d_name.len, dentry->d_name.name, dentry); 709 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 710 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode); 711 op = CEPH_MDS_OP_MKDIR; 712 } else { 713 goto out; 714 } 715 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 716 if (IS_ERR(req)) { 717 err = PTR_ERR(req); 718 goto out; 719 } 720 721 req->r_dentry = dget(dentry); 722 req->r_num_caps = 2; 723 req->r_locked_dir = dir; 724 req->r_args.mkdir.mode = cpu_to_le32(mode); 725 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 726 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 727 err = ceph_mdsc_do_request(mdsc, dir, req); 728 if (!err && !req->r_reply_info.head->is_dentry) 729 err = ceph_handle_notrace_create(dir, dentry); 730 ceph_mdsc_put_request(req); 731 out: 732 if (err < 0) 733 d_drop(dentry); 734 return err; 735 } 736 737 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 738 struct dentry *dentry) 739 { 740 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 741 struct ceph_mds_client *mdsc = &client->mdsc; 742 struct ceph_mds_request *req; 743 int err; 744 745 if (ceph_snap(dir) != CEPH_NOSNAP) 746 return -EROFS; 747 748 dout("link in dir %p old_dentry %p dentry %p\n", dir, 749 old_dentry, dentry); 750 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 751 if (IS_ERR(req)) { 752 d_drop(dentry); 753 return PTR_ERR(req); 754 } 755 req->r_dentry = dget(dentry); 756 req->r_num_caps = 2; 757 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ 758 req->r_locked_dir = dir; 759 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 760 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 761 err = ceph_mdsc_do_request(mdsc, dir, req); 762 if (err) 763 d_drop(dentry); 764 else if (!req->r_reply_info.head->is_dentry) 765 d_instantiate(dentry, igrab(old_dentry->d_inode)); 766 ceph_mdsc_put_request(req); 767 return err; 768 } 769 770 /* 771 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 772 * looks like the link count will hit 0, drop any other caps (other 773 * than PIN) we don't specifically want (due to the file still being 774 * open). 775 */ 776 static int drop_caps_for_unlink(struct inode *inode) 777 { 778 struct ceph_inode_info *ci = ceph_inode(inode); 779 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 780 781 spin_lock(&inode->i_lock); 782 if (inode->i_nlink == 1) { 783 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 784 ci->i_ceph_flags |= CEPH_I_NODELAY; 785 } 786 spin_unlock(&inode->i_lock); 787 return drop; 788 } 789 790 /* 791 * rmdir and unlink are differ only by the metadata op code 792 */ 793 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 794 { 795 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 796 struct ceph_mds_client *mdsc = &client->mdsc; 797 struct inode *inode = dentry->d_inode; 798 struct ceph_mds_request *req; 799 int err = -EROFS; 800 int op; 801 802 if (ceph_snap(dir) == CEPH_SNAPDIR) { 803 /* rmdir .snap/foo is RMSNAP */ 804 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 805 dentry->d_name.name, dentry); 806 op = CEPH_MDS_OP_RMSNAP; 807 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 808 dout("unlink/rmdir dir %p dn %p inode %p\n", 809 dir, dentry, inode); 810 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ? 811 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 812 } else 813 goto out; 814 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 815 if (IS_ERR(req)) { 816 err = PTR_ERR(req); 817 goto out; 818 } 819 req->r_dentry = dget(dentry); 820 req->r_num_caps = 2; 821 req->r_locked_dir = dir; 822 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 823 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 824 req->r_inode_drop = drop_caps_for_unlink(inode); 825 err = ceph_mdsc_do_request(mdsc, dir, req); 826 if (!err && !req->r_reply_info.head->is_dentry) 827 d_delete(dentry); 828 ceph_mdsc_put_request(req); 829 out: 830 return err; 831 } 832 833 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 834 struct inode *new_dir, struct dentry *new_dentry) 835 { 836 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb); 837 struct ceph_mds_client *mdsc = &client->mdsc; 838 struct ceph_mds_request *req; 839 int err; 840 841 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 842 return -EXDEV; 843 if (ceph_snap(old_dir) != CEPH_NOSNAP || 844 ceph_snap(new_dir) != CEPH_NOSNAP) 845 return -EROFS; 846 dout("rename dir %p dentry %p to dir %p dentry %p\n", 847 old_dir, old_dentry, new_dir, new_dentry); 848 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 849 if (IS_ERR(req)) 850 return PTR_ERR(req); 851 req->r_dentry = dget(new_dentry); 852 req->r_num_caps = 2; 853 req->r_old_dentry = dget(old_dentry); 854 req->r_locked_dir = new_dir; 855 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 856 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 857 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 858 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 859 /* release LINK_RDCACHE on source inode (mds will lock it) */ 860 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 861 if (new_dentry->d_inode) 862 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 863 err = ceph_mdsc_do_request(mdsc, old_dir, req); 864 if (!err && !req->r_reply_info.head->is_dentry) { 865 /* 866 * Normally d_move() is done by fill_trace (called by 867 * do_request, above). If there is no trace, we need 868 * to do it here. 869 */ 870 d_move(old_dentry, new_dentry); 871 } 872 ceph_mdsc_put_request(req); 873 return err; 874 } 875 876 877 /* 878 * Check if dentry lease is valid. If not, delete the lease. Try to 879 * renew if the least is more than half up. 880 */ 881 static int dentry_lease_is_valid(struct dentry *dentry) 882 { 883 struct ceph_dentry_info *di; 884 struct ceph_mds_session *s; 885 int valid = 0; 886 u32 gen; 887 unsigned long ttl; 888 struct ceph_mds_session *session = NULL; 889 struct inode *dir = NULL; 890 u32 seq = 0; 891 892 spin_lock(&dentry->d_lock); 893 di = ceph_dentry(dentry); 894 if (di && di->lease_session) { 895 s = di->lease_session; 896 spin_lock(&s->s_cap_lock); 897 gen = s->s_cap_gen; 898 ttl = s->s_cap_ttl; 899 spin_unlock(&s->s_cap_lock); 900 901 if (di->lease_gen == gen && 902 time_before(jiffies, dentry->d_time) && 903 time_before(jiffies, ttl)) { 904 valid = 1; 905 if (di->lease_renew_after && 906 time_after(jiffies, di->lease_renew_after)) { 907 /* we should renew */ 908 dir = dentry->d_parent->d_inode; 909 session = ceph_get_mds_session(s); 910 seq = di->lease_seq; 911 di->lease_renew_after = 0; 912 di->lease_renew_from = jiffies; 913 } 914 } else { 915 __ceph_mdsc_drop_dentry_lease(dentry); 916 } 917 } 918 spin_unlock(&dentry->d_lock); 919 920 if (session) { 921 ceph_mdsc_lease_send_msg(session, dir, dentry, 922 CEPH_MDS_LEASE_RENEW, seq); 923 ceph_put_mds_session(session); 924 } 925 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 926 return valid; 927 } 928 929 /* 930 * Check if directory-wide content lease/cap is valid. 931 */ 932 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 933 { 934 struct ceph_inode_info *ci = ceph_inode(dir); 935 struct ceph_dentry_info *di = ceph_dentry(dentry); 936 int valid = 0; 937 938 spin_lock(&dir->i_lock); 939 if (ci->i_shared_gen == di->lease_shared_gen) 940 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 941 spin_unlock(&dir->i_lock); 942 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 943 dir, (unsigned)ci->i_shared_gen, dentry, 944 (unsigned)di->lease_shared_gen, valid); 945 return valid; 946 } 947 948 /* 949 * Check if cached dentry can be trusted. 950 */ 951 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) 952 { 953 struct inode *dir = dentry->d_parent->d_inode; 954 955 dout("d_revalidate %p '%.*s' inode %p\n", dentry, 956 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 957 958 /* always trust cached snapped dentries, snapdir dentry */ 959 if (ceph_snap(dir) != CEPH_NOSNAP) { 960 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 961 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 962 goto out_touch; 963 } 964 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) 965 goto out_touch; 966 967 if (dentry_lease_is_valid(dentry) || 968 dir_lease_is_valid(dir, dentry)) 969 goto out_touch; 970 971 dout("d_revalidate %p invalid\n", dentry); 972 d_drop(dentry); 973 return 0; 974 out_touch: 975 ceph_dentry_lru_touch(dentry); 976 return 1; 977 } 978 979 /* 980 * When a dentry is released, clear the dir I_COMPLETE if it was part 981 * of the current dir gen. 982 */ 983 static void ceph_dentry_release(struct dentry *dentry) 984 { 985 struct ceph_dentry_info *di = ceph_dentry(dentry); 986 struct inode *parent_inode = dentry->d_parent->d_inode; 987 988 if (parent_inode) { 989 struct ceph_inode_info *ci = ceph_inode(parent_inode); 990 991 spin_lock(&parent_inode->i_lock); 992 if (ci->i_shared_gen == di->lease_shared_gen) { 993 dout(" clearing %p complete (d_release)\n", 994 parent_inode); 995 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 996 ci->i_release_count++; 997 } 998 spin_unlock(&parent_inode->i_lock); 999 } 1000 if (di) { 1001 ceph_dentry_lru_del(dentry); 1002 if (di->lease_session) 1003 ceph_put_mds_session(di->lease_session); 1004 kmem_cache_free(ceph_dentry_cachep, di); 1005 dentry->d_fsdata = NULL; 1006 } 1007 } 1008 1009 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1010 struct nameidata *nd) 1011 { 1012 /* 1013 * Eventually, we'll want to revalidate snapped metadata 1014 * too... probably... 1015 */ 1016 return 1; 1017 } 1018 1019 1020 1021 /* 1022 * read() on a dir. This weird interface hack only works if mounted 1023 * with '-o dirstat'. 1024 */ 1025 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1026 loff_t *ppos) 1027 { 1028 struct ceph_file_info *cf = file->private_data; 1029 struct inode *inode = file->f_dentry->d_inode; 1030 struct ceph_inode_info *ci = ceph_inode(inode); 1031 int left; 1032 1033 if (!ceph_test_opt(ceph_client(inode->i_sb), DIRSTAT)) 1034 return -EISDIR; 1035 1036 if (!cf->dir_info) { 1037 cf->dir_info = kmalloc(1024, GFP_NOFS); 1038 if (!cf->dir_info) 1039 return -ENOMEM; 1040 cf->dir_info_len = 1041 sprintf(cf->dir_info, 1042 "entries: %20lld\n" 1043 " files: %20lld\n" 1044 " subdirs: %20lld\n" 1045 "rentries: %20lld\n" 1046 " rfiles: %20lld\n" 1047 " rsubdirs: %20lld\n" 1048 "rbytes: %20lld\n" 1049 "rctime: %10ld.%09ld\n", 1050 ci->i_files + ci->i_subdirs, 1051 ci->i_files, 1052 ci->i_subdirs, 1053 ci->i_rfiles + ci->i_rsubdirs, 1054 ci->i_rfiles, 1055 ci->i_rsubdirs, 1056 ci->i_rbytes, 1057 (long)ci->i_rctime.tv_sec, 1058 (long)ci->i_rctime.tv_nsec); 1059 } 1060 1061 if (*ppos >= cf->dir_info_len) 1062 return 0; 1063 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1064 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1065 if (left == size) 1066 return -EFAULT; 1067 *ppos += (size - left); 1068 return size - left; 1069 } 1070 1071 /* 1072 * an fsync() on a dir will wait for any uncommitted directory 1073 * operations to commit. 1074 */ 1075 static int ceph_dir_fsync(struct file *file, struct dentry *dentry, 1076 int datasync) 1077 { 1078 struct inode *inode = dentry->d_inode; 1079 struct ceph_inode_info *ci = ceph_inode(inode); 1080 struct list_head *head = &ci->i_unsafe_dirops; 1081 struct ceph_mds_request *req; 1082 u64 last_tid; 1083 int ret = 0; 1084 1085 dout("dir_fsync %p\n", inode); 1086 spin_lock(&ci->i_unsafe_lock); 1087 if (list_empty(head)) 1088 goto out; 1089 1090 req = list_entry(head->prev, 1091 struct ceph_mds_request, r_unsafe_dir_item); 1092 last_tid = req->r_tid; 1093 1094 do { 1095 ceph_mdsc_get_request(req); 1096 spin_unlock(&ci->i_unsafe_lock); 1097 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1098 inode, req->r_tid, last_tid); 1099 if (req->r_timeout) { 1100 ret = wait_for_completion_timeout( 1101 &req->r_safe_completion, req->r_timeout); 1102 if (ret > 0) 1103 ret = 0; 1104 else if (ret == 0) 1105 ret = -EIO; /* timed out */ 1106 } else { 1107 wait_for_completion(&req->r_safe_completion); 1108 } 1109 spin_lock(&ci->i_unsafe_lock); 1110 ceph_mdsc_put_request(req); 1111 1112 if (ret || list_empty(head)) 1113 break; 1114 req = list_entry(head->next, 1115 struct ceph_mds_request, r_unsafe_dir_item); 1116 } while (req->r_tid < last_tid); 1117 out: 1118 spin_unlock(&ci->i_unsafe_lock); 1119 return ret; 1120 } 1121 1122 /* 1123 * We maintain a private dentry LRU. 1124 * 1125 * FIXME: this needs to be changed to a per-mds lru to be useful. 1126 */ 1127 void ceph_dentry_lru_add(struct dentry *dn) 1128 { 1129 struct ceph_dentry_info *di = ceph_dentry(dn); 1130 struct ceph_mds_client *mdsc; 1131 dout("dentry_lru_add %p %p\t%.*s\n", 1132 di, dn, dn->d_name.len, dn->d_name.name); 1133 1134 if (di) { 1135 mdsc = &ceph_client(dn->d_sb)->mdsc; 1136 spin_lock(&mdsc->dentry_lru_lock); 1137 list_add_tail(&di->lru, &mdsc->dentry_lru); 1138 mdsc->num_dentry++; 1139 spin_unlock(&mdsc->dentry_lru_lock); 1140 } 1141 } 1142 1143 void ceph_dentry_lru_touch(struct dentry *dn) 1144 { 1145 struct ceph_dentry_info *di = ceph_dentry(dn); 1146 struct ceph_mds_client *mdsc; 1147 dout("dentry_lru_touch %p %p\t%.*s\n", 1148 di, dn, dn->d_name.len, dn->d_name.name); 1149 1150 if (di) { 1151 mdsc = &ceph_client(dn->d_sb)->mdsc; 1152 spin_lock(&mdsc->dentry_lru_lock); 1153 list_move_tail(&di->lru, &mdsc->dentry_lru); 1154 spin_unlock(&mdsc->dentry_lru_lock); 1155 } 1156 } 1157 1158 void ceph_dentry_lru_del(struct dentry *dn) 1159 { 1160 struct ceph_dentry_info *di = ceph_dentry(dn); 1161 struct ceph_mds_client *mdsc; 1162 1163 dout("dentry_lru_del %p %p\t%.*s\n", 1164 di, dn, dn->d_name.len, dn->d_name.name); 1165 if (di) { 1166 mdsc = &ceph_client(dn->d_sb)->mdsc; 1167 spin_lock(&mdsc->dentry_lru_lock); 1168 list_del_init(&di->lru); 1169 mdsc->num_dentry--; 1170 spin_unlock(&mdsc->dentry_lru_lock); 1171 } 1172 } 1173 1174 const struct file_operations ceph_dir_fops = { 1175 .read = ceph_read_dir, 1176 .readdir = ceph_readdir, 1177 .llseek = ceph_dir_llseek, 1178 .open = ceph_open, 1179 .release = ceph_release, 1180 .unlocked_ioctl = ceph_ioctl, 1181 .fsync = ceph_dir_fsync, 1182 }; 1183 1184 const struct inode_operations ceph_dir_iops = { 1185 .lookup = ceph_lookup, 1186 .permission = ceph_permission, 1187 .getattr = ceph_getattr, 1188 .setattr = ceph_setattr, 1189 .setxattr = ceph_setxattr, 1190 .getxattr = ceph_getxattr, 1191 .listxattr = ceph_listxattr, 1192 .removexattr = ceph_removexattr, 1193 .mknod = ceph_mknod, 1194 .symlink = ceph_symlink, 1195 .mkdir = ceph_mkdir, 1196 .link = ceph_link, 1197 .unlink = ceph_unlink, 1198 .rmdir = ceph_unlink, 1199 .rename = ceph_rename, 1200 .create = ceph_create, 1201 }; 1202 1203 struct dentry_operations ceph_dentry_ops = { 1204 .d_revalidate = ceph_d_revalidate, 1205 .d_release = ceph_dentry_release, 1206 }; 1207 1208 struct dentry_operations ceph_snapdir_dentry_ops = { 1209 .d_revalidate = ceph_snapdir_d_revalidate, 1210 }; 1211 1212 struct dentry_operations ceph_snap_dentry_ops = { 1213 }; 1214