1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/namei.h> 10 #include <linux/writeback.h> 11 #include <linux/vmalloc.h> 12 #include <linux/pagevec.h> 13 14 #include "super.h" 15 #include "mds_client.h" 16 #include <linux/ceph/decode.h> 17 18 /* 19 * Ceph inode operations 20 * 21 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 22 * setattr, etc.), xattr helpers, and helpers for assimilating 23 * metadata returned by the MDS into our cache. 24 * 25 * Also define helpers for doing asynchronous writeback, invalidation, 26 * and truncation for the benefit of those who can't afford to block 27 * (typically because they are in the message handler path). 28 */ 29 30 static const struct inode_operations ceph_symlink_iops; 31 32 static void ceph_invalidate_work(struct work_struct *work); 33 static void ceph_writeback_work(struct work_struct *work); 34 static void ceph_vmtruncate_work(struct work_struct *work); 35 36 /* 37 * find or create an inode, given the ceph ino number 38 */ 39 static int ceph_set_ino_cb(struct inode *inode, void *data) 40 { 41 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 42 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); 43 return 0; 44 } 45 46 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 47 { 48 struct inode *inode; 49 ino_t t = ceph_vino_to_ino(vino); 50 51 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 52 if (inode == NULL) 53 return ERR_PTR(-ENOMEM); 54 if (inode->i_state & I_NEW) { 55 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 56 inode, ceph_vinop(inode), (u64)inode->i_ino); 57 unlock_new_inode(inode); 58 } 59 60 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 61 vino.snap, inode); 62 return inode; 63 } 64 65 /* 66 * get/constuct snapdir inode for a given directory 67 */ 68 struct inode *ceph_get_snapdir(struct inode *parent) 69 { 70 struct ceph_vino vino = { 71 .ino = ceph_ino(parent), 72 .snap = CEPH_SNAPDIR, 73 }; 74 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 75 struct ceph_inode_info *ci = ceph_inode(inode); 76 77 BUG_ON(!S_ISDIR(parent->i_mode)); 78 if (IS_ERR(inode)) 79 return inode; 80 inode->i_mode = parent->i_mode; 81 inode->i_uid = parent->i_uid; 82 inode->i_gid = parent->i_gid; 83 inode->i_op = &ceph_dir_iops; 84 inode->i_fop = &ceph_dir_fops; 85 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 86 ci->i_rbytes = 0; 87 return inode; 88 } 89 90 const struct inode_operations ceph_file_iops = { 91 .permission = ceph_permission, 92 .setattr = ceph_setattr, 93 .getattr = ceph_getattr, 94 .setxattr = ceph_setxattr, 95 .getxattr = ceph_getxattr, 96 .listxattr = ceph_listxattr, 97 .removexattr = ceph_removexattr, 98 }; 99 100 101 /* 102 * We use a 'frag tree' to keep track of the MDS's directory fragments 103 * for a given inode (usually there is just a single fragment). We 104 * need to know when a child frag is delegated to a new MDS, or when 105 * it is flagged as replicated, so we can direct our requests 106 * accordingly. 107 */ 108 109 /* 110 * find/create a frag in the tree 111 */ 112 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 113 u32 f) 114 { 115 struct rb_node **p; 116 struct rb_node *parent = NULL; 117 struct ceph_inode_frag *frag; 118 int c; 119 120 p = &ci->i_fragtree.rb_node; 121 while (*p) { 122 parent = *p; 123 frag = rb_entry(parent, struct ceph_inode_frag, node); 124 c = ceph_frag_compare(f, frag->frag); 125 if (c < 0) 126 p = &(*p)->rb_left; 127 else if (c > 0) 128 p = &(*p)->rb_right; 129 else 130 return frag; 131 } 132 133 frag = kmalloc(sizeof(*frag), GFP_NOFS); 134 if (!frag) { 135 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 136 "frag %x\n", &ci->vfs_inode, 137 ceph_vinop(&ci->vfs_inode), f); 138 return ERR_PTR(-ENOMEM); 139 } 140 frag->frag = f; 141 frag->split_by = 0; 142 frag->mds = -1; 143 frag->ndist = 0; 144 145 rb_link_node(&frag->node, parent, p); 146 rb_insert_color(&frag->node, &ci->i_fragtree); 147 148 dout("get_or_create_frag added %llx.%llx frag %x\n", 149 ceph_vinop(&ci->vfs_inode), f); 150 return frag; 151 } 152 153 /* 154 * find a specific frag @f 155 */ 156 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 157 { 158 struct rb_node *n = ci->i_fragtree.rb_node; 159 160 while (n) { 161 struct ceph_inode_frag *frag = 162 rb_entry(n, struct ceph_inode_frag, node); 163 int c = ceph_frag_compare(f, frag->frag); 164 if (c < 0) 165 n = n->rb_left; 166 else if (c > 0) 167 n = n->rb_right; 168 else 169 return frag; 170 } 171 return NULL; 172 } 173 174 /* 175 * Choose frag containing the given value @v. If @pfrag is 176 * specified, copy the frag delegation info to the caller if 177 * it is present. 178 */ 179 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 180 struct ceph_inode_frag *pfrag, 181 int *found) 182 { 183 u32 t = ceph_frag_make(0, 0); 184 struct ceph_inode_frag *frag; 185 unsigned nway, i; 186 u32 n; 187 188 if (found) 189 *found = 0; 190 191 mutex_lock(&ci->i_fragtree_mutex); 192 while (1) { 193 WARN_ON(!ceph_frag_contains_value(t, v)); 194 frag = __ceph_find_frag(ci, t); 195 if (!frag) 196 break; /* t is a leaf */ 197 if (frag->split_by == 0) { 198 if (pfrag) 199 memcpy(pfrag, frag, sizeof(*pfrag)); 200 if (found) 201 *found = 1; 202 break; 203 } 204 205 /* choose child */ 206 nway = 1 << frag->split_by; 207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 208 frag->split_by, nway); 209 for (i = 0; i < nway; i++) { 210 n = ceph_frag_make_child(t, frag->split_by, i); 211 if (ceph_frag_contains_value(n, v)) { 212 t = n; 213 break; 214 } 215 } 216 BUG_ON(i == nway); 217 } 218 dout("choose_frag(%x) = %x\n", v, t); 219 220 mutex_unlock(&ci->i_fragtree_mutex); 221 return t; 222 } 223 224 /* 225 * Process dirfrag (delegation) info from the mds. Include leaf 226 * fragment in tree ONLY if ndist > 0. Otherwise, only 227 * branches/splits are included in i_fragtree) 228 */ 229 static int ceph_fill_dirfrag(struct inode *inode, 230 struct ceph_mds_reply_dirfrag *dirinfo) 231 { 232 struct ceph_inode_info *ci = ceph_inode(inode); 233 struct ceph_inode_frag *frag; 234 u32 id = le32_to_cpu(dirinfo->frag); 235 int mds = le32_to_cpu(dirinfo->auth); 236 int ndist = le32_to_cpu(dirinfo->ndist); 237 int i; 238 int err = 0; 239 240 mutex_lock(&ci->i_fragtree_mutex); 241 if (ndist == 0) { 242 /* no delegation info needed. */ 243 frag = __ceph_find_frag(ci, id); 244 if (!frag) 245 goto out; 246 if (frag->split_by == 0) { 247 /* tree leaf, remove */ 248 dout("fill_dirfrag removed %llx.%llx frag %x" 249 " (no ref)\n", ceph_vinop(inode), id); 250 rb_erase(&frag->node, &ci->i_fragtree); 251 kfree(frag); 252 } else { 253 /* tree branch, keep and clear */ 254 dout("fill_dirfrag cleared %llx.%llx frag %x" 255 " referral\n", ceph_vinop(inode), id); 256 frag->mds = -1; 257 frag->ndist = 0; 258 } 259 goto out; 260 } 261 262 263 /* find/add this frag to store mds delegation info */ 264 frag = __get_or_create_frag(ci, id); 265 if (IS_ERR(frag)) { 266 /* this is not the end of the world; we can continue 267 with bad/inaccurate delegation info */ 268 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 269 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 270 err = -ENOMEM; 271 goto out; 272 } 273 274 frag->mds = mds; 275 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 276 for (i = 0; i < frag->ndist; i++) 277 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 278 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 279 ceph_vinop(inode), frag->frag, frag->ndist); 280 281 out: 282 mutex_unlock(&ci->i_fragtree_mutex); 283 return err; 284 } 285 286 287 /* 288 * initialize a newly allocated inode. 289 */ 290 struct inode *ceph_alloc_inode(struct super_block *sb) 291 { 292 struct ceph_inode_info *ci; 293 int i; 294 295 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 296 if (!ci) 297 return NULL; 298 299 dout("alloc_inode %p\n", &ci->vfs_inode); 300 301 ci->i_version = 0; 302 ci->i_time_warp_seq = 0; 303 ci->i_ceph_flags = 0; 304 ci->i_release_count = 0; 305 ci->i_symlink = NULL; 306 307 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 308 309 ci->i_fragtree = RB_ROOT; 310 mutex_init(&ci->i_fragtree_mutex); 311 312 ci->i_xattrs.blob = NULL; 313 ci->i_xattrs.prealloc_blob = NULL; 314 ci->i_xattrs.dirty = false; 315 ci->i_xattrs.index = RB_ROOT; 316 ci->i_xattrs.count = 0; 317 ci->i_xattrs.names_size = 0; 318 ci->i_xattrs.vals_size = 0; 319 ci->i_xattrs.version = 0; 320 ci->i_xattrs.index_version = 0; 321 322 ci->i_caps = RB_ROOT; 323 ci->i_auth_cap = NULL; 324 ci->i_dirty_caps = 0; 325 ci->i_flushing_caps = 0; 326 INIT_LIST_HEAD(&ci->i_dirty_item); 327 INIT_LIST_HEAD(&ci->i_flushing_item); 328 ci->i_cap_flush_seq = 0; 329 ci->i_cap_flush_last_tid = 0; 330 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 331 init_waitqueue_head(&ci->i_cap_wq); 332 ci->i_hold_caps_min = 0; 333 ci->i_hold_caps_max = 0; 334 INIT_LIST_HEAD(&ci->i_cap_delay_list); 335 ci->i_cap_exporting_mds = 0; 336 ci->i_cap_exporting_mseq = 0; 337 ci->i_cap_exporting_issued = 0; 338 INIT_LIST_HEAD(&ci->i_cap_snaps); 339 ci->i_head_snapc = NULL; 340 ci->i_snap_caps = 0; 341 342 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 343 ci->i_nr_by_mode[i] = 0; 344 345 ci->i_truncate_seq = 0; 346 ci->i_truncate_size = 0; 347 ci->i_truncate_pending = 0; 348 349 ci->i_max_size = 0; 350 ci->i_reported_size = 0; 351 ci->i_wanted_max_size = 0; 352 ci->i_requested_max_size = 0; 353 354 ci->i_pin_ref = 0; 355 ci->i_rd_ref = 0; 356 ci->i_rdcache_ref = 0; 357 ci->i_wr_ref = 0; 358 ci->i_wb_ref = 0; 359 ci->i_wrbuffer_ref = 0; 360 ci->i_wrbuffer_ref_head = 0; 361 ci->i_shared_gen = 0; 362 ci->i_rdcache_gen = 0; 363 ci->i_rdcache_revoking = 0; 364 365 INIT_LIST_HEAD(&ci->i_unsafe_writes); 366 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 367 spin_lock_init(&ci->i_unsafe_lock); 368 369 ci->i_snap_realm = NULL; 370 INIT_LIST_HEAD(&ci->i_snap_realm_item); 371 INIT_LIST_HEAD(&ci->i_snap_flush_item); 372 373 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 374 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 375 376 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 377 378 return &ci->vfs_inode; 379 } 380 381 static void ceph_i_callback(struct rcu_head *head) 382 { 383 struct inode *inode = container_of(head, struct inode, i_rcu); 384 struct ceph_inode_info *ci = ceph_inode(inode); 385 386 INIT_LIST_HEAD(&inode->i_dentry); 387 kmem_cache_free(ceph_inode_cachep, ci); 388 } 389 390 void ceph_destroy_inode(struct inode *inode) 391 { 392 struct ceph_inode_info *ci = ceph_inode(inode); 393 struct ceph_inode_frag *frag; 394 struct rb_node *n; 395 396 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 397 398 ceph_queue_caps_release(inode); 399 400 /* 401 * we may still have a snap_realm reference if there are stray 402 * caps in i_cap_exporting_issued or i_snap_caps. 403 */ 404 if (ci->i_snap_realm) { 405 struct ceph_mds_client *mdsc = 406 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 407 struct ceph_snap_realm *realm = ci->i_snap_realm; 408 409 dout(" dropping residual ref to snap realm %p\n", realm); 410 spin_lock(&realm->inodes_with_caps_lock); 411 list_del_init(&ci->i_snap_realm_item); 412 spin_unlock(&realm->inodes_with_caps_lock); 413 ceph_put_snap_realm(mdsc, realm); 414 } 415 416 kfree(ci->i_symlink); 417 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 418 frag = rb_entry(n, struct ceph_inode_frag, node); 419 rb_erase(n, &ci->i_fragtree); 420 kfree(frag); 421 } 422 423 __ceph_destroy_xattrs(ci); 424 if (ci->i_xattrs.blob) 425 ceph_buffer_put(ci->i_xattrs.blob); 426 if (ci->i_xattrs.prealloc_blob) 427 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 428 429 call_rcu(&inode->i_rcu, ceph_i_callback); 430 } 431 432 433 /* 434 * Helpers to fill in size, ctime, mtime, and atime. We have to be 435 * careful because either the client or MDS may have more up to date 436 * info, depending on which capabilities are held, and whether 437 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 438 * and size are monotonically increasing, except when utimes() or 439 * truncate() increments the corresponding _seq values.) 440 */ 441 int ceph_fill_file_size(struct inode *inode, int issued, 442 u32 truncate_seq, u64 truncate_size, u64 size) 443 { 444 struct ceph_inode_info *ci = ceph_inode(inode); 445 int queue_trunc = 0; 446 447 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 448 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 449 dout("size %lld -> %llu\n", inode->i_size, size); 450 inode->i_size = size; 451 inode->i_blocks = (size + (1<<9) - 1) >> 9; 452 ci->i_reported_size = size; 453 if (truncate_seq != ci->i_truncate_seq) { 454 dout("truncate_seq %u -> %u\n", 455 ci->i_truncate_seq, truncate_seq); 456 ci->i_truncate_seq = truncate_seq; 457 /* 458 * If we hold relevant caps, or in the case where we're 459 * not the only client referencing this file and we 460 * don't hold those caps, then we need to check whether 461 * the file is either opened or mmaped 462 */ 463 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| 464 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| 465 CEPH_CAP_FILE_EXCL| 466 CEPH_CAP_FILE_LAZYIO)) || 467 mapping_mapped(inode->i_mapping) || 468 __ceph_caps_file_wanted(ci)) { 469 ci->i_truncate_pending++; 470 queue_trunc = 1; 471 } 472 } 473 } 474 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 475 ci->i_truncate_size != truncate_size) { 476 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 477 truncate_size); 478 ci->i_truncate_size = truncate_size; 479 } 480 return queue_trunc; 481 } 482 483 void ceph_fill_file_time(struct inode *inode, int issued, 484 u64 time_warp_seq, struct timespec *ctime, 485 struct timespec *mtime, struct timespec *atime) 486 { 487 struct ceph_inode_info *ci = ceph_inode(inode); 488 int warn = 0; 489 490 if (issued & (CEPH_CAP_FILE_EXCL| 491 CEPH_CAP_FILE_WR| 492 CEPH_CAP_FILE_BUFFER| 493 CEPH_CAP_AUTH_EXCL| 494 CEPH_CAP_XATTR_EXCL)) { 495 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 496 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 497 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 498 ctime->tv_sec, ctime->tv_nsec); 499 inode->i_ctime = *ctime; 500 } 501 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 502 /* the MDS did a utimes() */ 503 dout("mtime %ld.%09ld -> %ld.%09ld " 504 "tw %d -> %d\n", 505 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 506 mtime->tv_sec, mtime->tv_nsec, 507 ci->i_time_warp_seq, (int)time_warp_seq); 508 509 inode->i_mtime = *mtime; 510 inode->i_atime = *atime; 511 ci->i_time_warp_seq = time_warp_seq; 512 } else if (time_warp_seq == ci->i_time_warp_seq) { 513 /* nobody did utimes(); take the max */ 514 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 515 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 516 inode->i_mtime.tv_sec, 517 inode->i_mtime.tv_nsec, 518 mtime->tv_sec, mtime->tv_nsec); 519 inode->i_mtime = *mtime; 520 } 521 if (timespec_compare(atime, &inode->i_atime) > 0) { 522 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 523 inode->i_atime.tv_sec, 524 inode->i_atime.tv_nsec, 525 atime->tv_sec, atime->tv_nsec); 526 inode->i_atime = *atime; 527 } 528 } else if (issued & CEPH_CAP_FILE_EXCL) { 529 /* we did a utimes(); ignore mds values */ 530 } else { 531 warn = 1; 532 } 533 } else { 534 /* we have no write|excl caps; whatever the MDS says is true */ 535 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 536 inode->i_ctime = *ctime; 537 inode->i_mtime = *mtime; 538 inode->i_atime = *atime; 539 ci->i_time_warp_seq = time_warp_seq; 540 } else { 541 warn = 1; 542 } 543 } 544 if (warn) /* time_warp_seq shouldn't go backwards */ 545 dout("%p mds time_warp_seq %llu < %u\n", 546 inode, time_warp_seq, ci->i_time_warp_seq); 547 } 548 549 /* 550 * Populate an inode based on info from mds. May be called on new or 551 * existing inodes. 552 */ 553 static int fill_inode(struct inode *inode, 554 struct ceph_mds_reply_info_in *iinfo, 555 struct ceph_mds_reply_dirfrag *dirinfo, 556 struct ceph_mds_session *session, 557 unsigned long ttl_from, int cap_fmode, 558 struct ceph_cap_reservation *caps_reservation) 559 { 560 struct ceph_mds_reply_inode *info = iinfo->in; 561 struct ceph_inode_info *ci = ceph_inode(inode); 562 int i; 563 int issued = 0, implemented; 564 int updating_inode = 0; 565 struct timespec mtime, atime, ctime; 566 u32 nsplits; 567 struct ceph_buffer *xattr_blob = NULL; 568 int err = 0; 569 int queue_trunc = 0; 570 571 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 572 inode, ceph_vinop(inode), le64_to_cpu(info->version), 573 ci->i_version); 574 575 /* 576 * prealloc xattr data, if it looks like we'll need it. only 577 * if len > 4 (meaning there are actually xattrs; the first 4 578 * bytes are the xattr count). 579 */ 580 if (iinfo->xattr_len > 4) { 581 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 582 if (!xattr_blob) 583 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 584 iinfo->xattr_len); 585 } 586 587 spin_lock(&inode->i_lock); 588 589 /* 590 * provided version will be odd if inode value is projected, 591 * even if stable. skip the update if we have newer stable 592 * info (ours>=theirs, e.g. due to racing mds replies), unless 593 * we are getting projected (unstable) info (in which case the 594 * version is odd, and we want ours>theirs). 595 * us them 596 * 2 2 skip 597 * 3 2 skip 598 * 3 3 update 599 */ 600 if (le64_to_cpu(info->version) > 0 && 601 (ci->i_version & ~1) >= le64_to_cpu(info->version)) 602 goto no_change; 603 604 updating_inode = 1; 605 issued = __ceph_caps_issued(ci, &implemented); 606 issued |= implemented | __ceph_caps_dirty(ci); 607 608 /* update inode */ 609 ci->i_version = le64_to_cpu(info->version); 610 inode->i_version++; 611 inode->i_rdev = le32_to_cpu(info->rdev); 612 613 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 614 inode->i_mode = le32_to_cpu(info->mode); 615 inode->i_uid = le32_to_cpu(info->uid); 616 inode->i_gid = le32_to_cpu(info->gid); 617 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 618 inode->i_uid, inode->i_gid); 619 } 620 621 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 622 inode->i_nlink = le32_to_cpu(info->nlink); 623 624 /* be careful with mtime, atime, size */ 625 ceph_decode_timespec(&atime, &info->atime); 626 ceph_decode_timespec(&mtime, &info->mtime); 627 ceph_decode_timespec(&ctime, &info->ctime); 628 queue_trunc = ceph_fill_file_size(inode, issued, 629 le32_to_cpu(info->truncate_seq), 630 le64_to_cpu(info->truncate_size), 631 le64_to_cpu(info->size)); 632 ceph_fill_file_time(inode, issued, 633 le32_to_cpu(info->time_warp_seq), 634 &ctime, &mtime, &atime); 635 636 /* only update max_size on auth cap */ 637 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 638 ci->i_max_size != le64_to_cpu(info->max_size)) { 639 dout("max_size %lld -> %llu\n", ci->i_max_size, 640 le64_to_cpu(info->max_size)); 641 ci->i_max_size = le64_to_cpu(info->max_size); 642 } 643 644 ci->i_layout = info->layout; 645 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 646 647 /* xattrs */ 648 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 649 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 650 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 651 if (ci->i_xattrs.blob) 652 ceph_buffer_put(ci->i_xattrs.blob); 653 ci->i_xattrs.blob = xattr_blob; 654 if (xattr_blob) 655 memcpy(ci->i_xattrs.blob->vec.iov_base, 656 iinfo->xattr_data, iinfo->xattr_len); 657 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 658 xattr_blob = NULL; 659 } 660 661 inode->i_mapping->a_ops = &ceph_aops; 662 inode->i_mapping->backing_dev_info = 663 &ceph_sb_to_client(inode->i_sb)->backing_dev_info; 664 665 switch (inode->i_mode & S_IFMT) { 666 case S_IFIFO: 667 case S_IFBLK: 668 case S_IFCHR: 669 case S_IFSOCK: 670 init_special_inode(inode, inode->i_mode, inode->i_rdev); 671 inode->i_op = &ceph_file_iops; 672 break; 673 case S_IFREG: 674 inode->i_op = &ceph_file_iops; 675 inode->i_fop = &ceph_file_fops; 676 break; 677 case S_IFLNK: 678 inode->i_op = &ceph_symlink_iops; 679 if (!ci->i_symlink) { 680 int symlen = iinfo->symlink_len; 681 char *sym; 682 683 BUG_ON(symlen != inode->i_size); 684 spin_unlock(&inode->i_lock); 685 686 err = -ENOMEM; 687 sym = kmalloc(symlen+1, GFP_NOFS); 688 if (!sym) 689 goto out; 690 memcpy(sym, iinfo->symlink, symlen); 691 sym[symlen] = 0; 692 693 spin_lock(&inode->i_lock); 694 if (!ci->i_symlink) 695 ci->i_symlink = sym; 696 else 697 kfree(sym); /* lost a race */ 698 } 699 break; 700 case S_IFDIR: 701 inode->i_op = &ceph_dir_iops; 702 inode->i_fop = &ceph_dir_fops; 703 704 ci->i_dir_layout = iinfo->dir_layout; 705 706 ci->i_files = le64_to_cpu(info->files); 707 ci->i_subdirs = le64_to_cpu(info->subdirs); 708 ci->i_rbytes = le64_to_cpu(info->rbytes); 709 ci->i_rfiles = le64_to_cpu(info->rfiles); 710 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 711 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 712 break; 713 default: 714 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 715 ceph_vinop(inode), inode->i_mode); 716 } 717 718 no_change: 719 spin_unlock(&inode->i_lock); 720 721 /* queue truncate if we saw i_size decrease */ 722 if (queue_trunc) 723 ceph_queue_vmtruncate(inode); 724 725 /* populate frag tree */ 726 /* FIXME: move me up, if/when version reflects fragtree changes */ 727 nsplits = le32_to_cpu(info->fragtree.nsplits); 728 mutex_lock(&ci->i_fragtree_mutex); 729 for (i = 0; i < nsplits; i++) { 730 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 731 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 732 733 if (IS_ERR(frag)) 734 continue; 735 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 736 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 737 } 738 mutex_unlock(&ci->i_fragtree_mutex); 739 740 /* were we issued a capability? */ 741 if (info->cap.caps) { 742 if (ceph_snap(inode) == CEPH_NOSNAP) { 743 ceph_add_cap(inode, session, 744 le64_to_cpu(info->cap.cap_id), 745 cap_fmode, 746 le32_to_cpu(info->cap.caps), 747 le32_to_cpu(info->cap.wanted), 748 le32_to_cpu(info->cap.seq), 749 le32_to_cpu(info->cap.mseq), 750 le64_to_cpu(info->cap.realm), 751 info->cap.flags, 752 caps_reservation); 753 } else { 754 spin_lock(&inode->i_lock); 755 dout(" %p got snap_caps %s\n", inode, 756 ceph_cap_string(le32_to_cpu(info->cap.caps))); 757 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 758 if (cap_fmode >= 0) 759 __ceph_get_fmode(ci, cap_fmode); 760 spin_unlock(&inode->i_lock); 761 } 762 } else if (cap_fmode >= 0) { 763 pr_warning("mds issued no caps on %llx.%llx\n", 764 ceph_vinop(inode)); 765 __ceph_get_fmode(ci, cap_fmode); 766 } 767 768 /* set dir completion flag? */ 769 if (S_ISDIR(inode->i_mode) && 770 updating_inode && /* didn't jump to no_change */ 771 ci->i_files == 0 && ci->i_subdirs == 0 && 772 ceph_snap(inode) == CEPH_NOSNAP && 773 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 774 (issued & CEPH_CAP_FILE_EXCL) == 0 && 775 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 776 dout(" marking %p complete (empty)\n", inode); 777 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ 778 ci->i_max_offset = 2; 779 } 780 781 /* update delegation info? */ 782 if (dirinfo) 783 ceph_fill_dirfrag(inode, dirinfo); 784 785 err = 0; 786 787 out: 788 if (xattr_blob) 789 ceph_buffer_put(xattr_blob); 790 return err; 791 } 792 793 /* 794 * caller should hold session s_mutex. 795 */ 796 static void update_dentry_lease(struct dentry *dentry, 797 struct ceph_mds_reply_lease *lease, 798 struct ceph_mds_session *session, 799 unsigned long from_time) 800 { 801 struct ceph_dentry_info *di = ceph_dentry(dentry); 802 long unsigned duration = le32_to_cpu(lease->duration_ms); 803 long unsigned ttl = from_time + (duration * HZ) / 1000; 804 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 805 struct inode *dir; 806 807 /* only track leases on regular dentries */ 808 if (dentry->d_op != &ceph_dentry_ops) 809 return; 810 811 spin_lock(&dentry->d_lock); 812 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 813 dentry, duration, ttl); 814 815 /* make lease_rdcache_gen match directory */ 816 dir = dentry->d_parent->d_inode; 817 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 818 819 if (duration == 0) 820 goto out_unlock; 821 822 if (di->lease_gen == session->s_cap_gen && 823 time_before(ttl, dentry->d_time)) 824 goto out_unlock; /* we already have a newer lease. */ 825 826 if (di->lease_session && di->lease_session != session) 827 goto out_unlock; 828 829 ceph_dentry_lru_touch(dentry); 830 831 if (!di->lease_session) 832 di->lease_session = ceph_get_mds_session(session); 833 di->lease_gen = session->s_cap_gen; 834 di->lease_seq = le32_to_cpu(lease->seq); 835 di->lease_renew_after = half_ttl; 836 di->lease_renew_from = 0; 837 dentry->d_time = ttl; 838 out_unlock: 839 spin_unlock(&dentry->d_lock); 840 return; 841 } 842 843 /* 844 * Set dentry's directory position based on the current dir's max, and 845 * order it in d_subdirs, so that dcache_readdir behaves. 846 * 847 * Always called under directory's i_mutex. 848 */ 849 static void ceph_set_dentry_offset(struct dentry *dn) 850 { 851 struct dentry *dir = dn->d_parent; 852 struct inode *inode = dir->d_inode; 853 struct ceph_dentry_info *di; 854 855 BUG_ON(!inode); 856 857 di = ceph_dentry(dn); 858 859 spin_lock(&inode->i_lock); 860 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 861 spin_unlock(&inode->i_lock); 862 return; 863 } 864 di->offset = ceph_inode(inode)->i_max_offset++; 865 spin_unlock(&inode->i_lock); 866 867 spin_lock(&dir->d_lock); 868 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 869 list_move(&dn->d_u.d_child, &dir->d_subdirs); 870 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 871 dn->d_u.d_child.prev, dn->d_u.d_child.next); 872 spin_unlock(&dn->d_lock); 873 spin_unlock(&dir->d_lock); 874 } 875 876 /* 877 * splice a dentry to an inode. 878 * caller must hold directory i_mutex for this to be safe. 879 * 880 * we will only rehash the resulting dentry if @prehash is 881 * true; @prehash will be set to false (for the benefit of 882 * the caller) if we fail. 883 */ 884 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 885 bool *prehash, bool set_offset) 886 { 887 struct dentry *realdn; 888 889 BUG_ON(dn->d_inode); 890 891 /* dn must be unhashed */ 892 if (!d_unhashed(dn)) 893 d_drop(dn); 894 realdn = d_materialise_unique(dn, in); 895 if (IS_ERR(realdn)) { 896 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 897 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 898 if (prehash) 899 *prehash = false; /* don't rehash on error */ 900 dn = realdn; /* note realdn contains the error */ 901 goto out; 902 } else if (realdn) { 903 dout("dn %p (%d) spliced with %p (%d) " 904 "inode %p ino %llx.%llx\n", 905 dn, dn->d_count, 906 realdn, realdn->d_count, 907 realdn->d_inode, ceph_vinop(realdn->d_inode)); 908 dput(dn); 909 dn = realdn; 910 } else { 911 BUG_ON(!ceph_dentry(dn)); 912 dout("dn %p attached to %p ino %llx.%llx\n", 913 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 914 } 915 if ((!prehash || *prehash) && d_unhashed(dn)) 916 d_rehash(dn); 917 if (set_offset) 918 ceph_set_dentry_offset(dn); 919 out: 920 return dn; 921 } 922 923 /* 924 * Incorporate results into the local cache. This is either just 925 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 926 * after a lookup). 927 * 928 * A reply may contain 929 * a directory inode along with a dentry. 930 * and/or a target inode 931 * 932 * Called with snap_rwsem (read). 933 */ 934 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 935 struct ceph_mds_session *session) 936 { 937 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 938 struct inode *in = NULL; 939 struct ceph_mds_reply_inode *ininfo; 940 struct ceph_vino vino; 941 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 942 int i = 0; 943 int err = 0; 944 945 dout("fill_trace %p is_dentry %d is_target %d\n", req, 946 rinfo->head->is_dentry, rinfo->head->is_target); 947 948 #if 0 949 /* 950 * Debugging hook: 951 * 952 * If we resend completed ops to a recovering mds, we get no 953 * trace. Since that is very rare, pretend this is the case 954 * to ensure the 'no trace' handlers in the callers behave. 955 * 956 * Fill in inodes unconditionally to avoid breaking cap 957 * invariants. 958 */ 959 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 960 pr_info("fill_trace faking empty trace on %lld %s\n", 961 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 962 if (rinfo->head->is_dentry) { 963 rinfo->head->is_dentry = 0; 964 err = fill_inode(req->r_locked_dir, 965 &rinfo->diri, rinfo->dirfrag, 966 session, req->r_request_started, -1); 967 } 968 if (rinfo->head->is_target) { 969 rinfo->head->is_target = 0; 970 ininfo = rinfo->targeti.in; 971 vino.ino = le64_to_cpu(ininfo->ino); 972 vino.snap = le64_to_cpu(ininfo->snapid); 973 in = ceph_get_inode(sb, vino); 974 err = fill_inode(in, &rinfo->targeti, NULL, 975 session, req->r_request_started, 976 req->r_fmode); 977 iput(in); 978 } 979 } 980 #endif 981 982 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 983 dout("fill_trace reply is empty!\n"); 984 if (rinfo->head->result == 0 && req->r_locked_dir) 985 ceph_invalidate_dir_request(req); 986 return 0; 987 } 988 989 if (rinfo->head->is_dentry) { 990 struct inode *dir = req->r_locked_dir; 991 992 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 993 session, req->r_request_started, -1, 994 &req->r_caps_reservation); 995 if (err < 0) 996 return err; 997 } 998 999 /* 1000 * ignore null lease/binding on snapdir ENOENT, or else we 1001 * will have trouble splicing in the virtual snapdir later 1002 */ 1003 if (rinfo->head->is_dentry && !req->r_aborted && 1004 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1005 fsc->mount_options->snapdir_name, 1006 req->r_dentry->d_name.len))) { 1007 /* 1008 * lookup link rename : null -> possibly existing inode 1009 * mknod symlink mkdir : null -> new inode 1010 * unlink : linked -> null 1011 */ 1012 struct inode *dir = req->r_locked_dir; 1013 struct dentry *dn = req->r_dentry; 1014 bool have_dir_cap, have_lease; 1015 1016 BUG_ON(!dn); 1017 BUG_ON(!dir); 1018 BUG_ON(dn->d_parent->d_inode != dir); 1019 BUG_ON(ceph_ino(dir) != 1020 le64_to_cpu(rinfo->diri.in->ino)); 1021 BUG_ON(ceph_snap(dir) != 1022 le64_to_cpu(rinfo->diri.in->snapid)); 1023 1024 /* do we have a lease on the whole dir? */ 1025 have_dir_cap = 1026 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1027 CEPH_CAP_FILE_SHARED); 1028 1029 /* do we have a dn lease? */ 1030 have_lease = have_dir_cap || 1031 le32_to_cpu(rinfo->dlease->duration_ms); 1032 if (!have_lease) 1033 dout("fill_trace no dentry lease or dir cap\n"); 1034 1035 /* rename? */ 1036 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1037 dout(" src %p '%.*s' dst %p '%.*s'\n", 1038 req->r_old_dentry, 1039 req->r_old_dentry->d_name.len, 1040 req->r_old_dentry->d_name.name, 1041 dn, dn->d_name.len, dn->d_name.name); 1042 dout("fill_trace doing d_move %p -> %p\n", 1043 req->r_old_dentry, dn); 1044 1045 d_move(req->r_old_dentry, dn); 1046 dout(" src %p '%.*s' dst %p '%.*s'\n", 1047 req->r_old_dentry, 1048 req->r_old_dentry->d_name.len, 1049 req->r_old_dentry->d_name.name, 1050 dn, dn->d_name.len, dn->d_name.name); 1051 1052 /* ensure target dentry is invalidated, despite 1053 rehashing bug in vfs_rename_dir */ 1054 ceph_invalidate_dentry_lease(dn); 1055 1056 /* 1057 * d_move() puts the renamed dentry at the end of 1058 * d_subdirs. We need to assign it an appropriate 1059 * directory offset so we can behave when holding 1060 * I_COMPLETE. 1061 */ 1062 ceph_set_dentry_offset(req->r_old_dentry); 1063 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1064 ceph_dentry(req->r_old_dentry)->offset); 1065 1066 dn = req->r_old_dentry; /* use old_dentry */ 1067 in = dn->d_inode; 1068 } 1069 1070 /* null dentry? */ 1071 if (!rinfo->head->is_target) { 1072 dout("fill_trace null dentry\n"); 1073 if (dn->d_inode) { 1074 dout("d_delete %p\n", dn); 1075 d_delete(dn); 1076 } else { 1077 dout("d_instantiate %p NULL\n", dn); 1078 d_instantiate(dn, NULL); 1079 if (have_lease && d_unhashed(dn)) 1080 d_rehash(dn); 1081 update_dentry_lease(dn, rinfo->dlease, 1082 session, 1083 req->r_request_started); 1084 } 1085 goto done; 1086 } 1087 1088 /* attach proper inode */ 1089 ininfo = rinfo->targeti.in; 1090 vino.ino = le64_to_cpu(ininfo->ino); 1091 vino.snap = le64_to_cpu(ininfo->snapid); 1092 in = dn->d_inode; 1093 if (!in) { 1094 in = ceph_get_inode(sb, vino); 1095 if (IS_ERR(in)) { 1096 pr_err("fill_trace bad get_inode " 1097 "%llx.%llx\n", vino.ino, vino.snap); 1098 err = PTR_ERR(in); 1099 d_delete(dn); 1100 goto done; 1101 } 1102 dn = splice_dentry(dn, in, &have_lease, true); 1103 if (IS_ERR(dn)) { 1104 err = PTR_ERR(dn); 1105 goto done; 1106 } 1107 req->r_dentry = dn; /* may have spliced */ 1108 ihold(in); 1109 } else if (ceph_ino(in) == vino.ino && 1110 ceph_snap(in) == vino.snap) { 1111 ihold(in); 1112 } else { 1113 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1114 dn, in, ceph_ino(in), ceph_snap(in), 1115 vino.ino, vino.snap); 1116 have_lease = false; 1117 in = NULL; 1118 } 1119 1120 if (have_lease) 1121 update_dentry_lease(dn, rinfo->dlease, session, 1122 req->r_request_started); 1123 dout(" final dn %p\n", dn); 1124 i++; 1125 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1126 req->r_op == CEPH_MDS_OP_MKSNAP) { 1127 struct dentry *dn = req->r_dentry; 1128 1129 /* fill out a snapdir LOOKUPSNAP dentry */ 1130 BUG_ON(!dn); 1131 BUG_ON(!req->r_locked_dir); 1132 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1133 ininfo = rinfo->targeti.in; 1134 vino.ino = le64_to_cpu(ininfo->ino); 1135 vino.snap = le64_to_cpu(ininfo->snapid); 1136 in = ceph_get_inode(sb, vino); 1137 if (IS_ERR(in)) { 1138 pr_err("fill_inode get_inode badness %llx.%llx\n", 1139 vino.ino, vino.snap); 1140 err = PTR_ERR(in); 1141 d_delete(dn); 1142 goto done; 1143 } 1144 dout(" linking snapped dir %p to dn %p\n", in, dn); 1145 dn = splice_dentry(dn, in, NULL, true); 1146 if (IS_ERR(dn)) { 1147 err = PTR_ERR(dn); 1148 goto done; 1149 } 1150 req->r_dentry = dn; /* may have spliced */ 1151 ihold(in); 1152 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1153 } 1154 1155 if (rinfo->head->is_target) { 1156 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1157 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1158 1159 if (in == NULL || ceph_ino(in) != vino.ino || 1160 ceph_snap(in) != vino.snap) { 1161 in = ceph_get_inode(sb, vino); 1162 if (IS_ERR(in)) { 1163 err = PTR_ERR(in); 1164 goto done; 1165 } 1166 } 1167 req->r_target_inode = in; 1168 1169 err = fill_inode(in, 1170 &rinfo->targeti, NULL, 1171 session, req->r_request_started, 1172 (le32_to_cpu(rinfo->head->result) == 0) ? 1173 req->r_fmode : -1, 1174 &req->r_caps_reservation); 1175 if (err < 0) { 1176 pr_err("fill_inode badness %p %llx.%llx\n", 1177 in, ceph_vinop(in)); 1178 goto done; 1179 } 1180 } 1181 1182 done: 1183 dout("fill_trace done err=%d\n", err); 1184 return err; 1185 } 1186 1187 /* 1188 * Prepopulate our cache with readdir results, leases, etc. 1189 */ 1190 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1191 struct ceph_mds_session *session) 1192 { 1193 struct dentry *parent = req->r_dentry; 1194 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1195 struct qstr dname; 1196 struct dentry *dn; 1197 struct inode *in; 1198 int err = 0, i; 1199 struct inode *snapdir = NULL; 1200 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1201 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1202 struct ceph_dentry_info *di; 1203 1204 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1205 snapdir = ceph_get_snapdir(parent->d_inode); 1206 parent = d_find_alias(snapdir); 1207 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1208 rinfo->dir_nr, parent); 1209 } else { 1210 dout("readdir_prepopulate %d items under dn %p\n", 1211 rinfo->dir_nr, parent); 1212 if (rinfo->dir_dir) 1213 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1214 } 1215 1216 for (i = 0; i < rinfo->dir_nr; i++) { 1217 struct ceph_vino vino; 1218 1219 dname.name = rinfo->dir_dname[i]; 1220 dname.len = rinfo->dir_dname_len[i]; 1221 dname.hash = full_name_hash(dname.name, dname.len); 1222 1223 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1224 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1225 1226 retry_lookup: 1227 dn = d_lookup(parent, &dname); 1228 dout("d_lookup on parent=%p name=%.*s got %p\n", 1229 parent, dname.len, dname.name, dn); 1230 1231 if (!dn) { 1232 dn = d_alloc(parent, &dname); 1233 dout("d_alloc %p '%.*s' = %p\n", parent, 1234 dname.len, dname.name, dn); 1235 if (dn == NULL) { 1236 dout("d_alloc badness\n"); 1237 err = -ENOMEM; 1238 goto out; 1239 } 1240 err = ceph_init_dentry(dn); 1241 if (err < 0) { 1242 dput(dn); 1243 goto out; 1244 } 1245 } else if (dn->d_inode && 1246 (ceph_ino(dn->d_inode) != vino.ino || 1247 ceph_snap(dn->d_inode) != vino.snap)) { 1248 dout(" dn %p points to wrong inode %p\n", 1249 dn, dn->d_inode); 1250 d_delete(dn); 1251 dput(dn); 1252 goto retry_lookup; 1253 } else { 1254 /* reorder parent's d_subdirs */ 1255 spin_lock(&parent->d_lock); 1256 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 1257 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1258 spin_unlock(&dn->d_lock); 1259 spin_unlock(&parent->d_lock); 1260 } 1261 1262 di = dn->d_fsdata; 1263 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1264 1265 /* inode */ 1266 if (dn->d_inode) { 1267 in = dn->d_inode; 1268 } else { 1269 in = ceph_get_inode(parent->d_sb, vino); 1270 if (IS_ERR(in)) { 1271 dout("new_inode badness\n"); 1272 d_delete(dn); 1273 dput(dn); 1274 err = PTR_ERR(in); 1275 goto out; 1276 } 1277 dn = splice_dentry(dn, in, NULL, false); 1278 if (IS_ERR(dn)) 1279 dn = NULL; 1280 } 1281 1282 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1283 req->r_request_started, -1, 1284 &req->r_caps_reservation) < 0) { 1285 pr_err("fill_inode badness on %p\n", in); 1286 goto next_item; 1287 } 1288 if (dn) 1289 update_dentry_lease(dn, rinfo->dir_dlease[i], 1290 req->r_session, 1291 req->r_request_started); 1292 next_item: 1293 if (dn) 1294 dput(dn); 1295 } 1296 req->r_did_prepopulate = true; 1297 1298 out: 1299 if (snapdir) { 1300 iput(snapdir); 1301 dput(parent); 1302 } 1303 dout("readdir_prepopulate done\n"); 1304 return err; 1305 } 1306 1307 int ceph_inode_set_size(struct inode *inode, loff_t size) 1308 { 1309 struct ceph_inode_info *ci = ceph_inode(inode); 1310 int ret = 0; 1311 1312 spin_lock(&inode->i_lock); 1313 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1314 inode->i_size = size; 1315 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1316 1317 /* tell the MDS if we are approaching max_size */ 1318 if ((size << 1) >= ci->i_max_size && 1319 (ci->i_reported_size << 1) < ci->i_max_size) 1320 ret = 1; 1321 1322 spin_unlock(&inode->i_lock); 1323 return ret; 1324 } 1325 1326 /* 1327 * Write back inode data in a worker thread. (This can't be done 1328 * in the message handler context.) 1329 */ 1330 void ceph_queue_writeback(struct inode *inode) 1331 { 1332 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1333 &ceph_inode(inode)->i_wb_work)) { 1334 dout("ceph_queue_writeback %p\n", inode); 1335 ihold(inode); 1336 } else { 1337 dout("ceph_queue_writeback %p failed\n", inode); 1338 } 1339 } 1340 1341 static void ceph_writeback_work(struct work_struct *work) 1342 { 1343 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1344 i_wb_work); 1345 struct inode *inode = &ci->vfs_inode; 1346 1347 dout("writeback %p\n", inode); 1348 filemap_fdatawrite(&inode->i_data); 1349 iput(inode); 1350 } 1351 1352 /* 1353 * queue an async invalidation 1354 */ 1355 void ceph_queue_invalidate(struct inode *inode) 1356 { 1357 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1358 &ceph_inode(inode)->i_pg_inv_work)) { 1359 dout("ceph_queue_invalidate %p\n", inode); 1360 ihold(inode); 1361 } else { 1362 dout("ceph_queue_invalidate %p failed\n", inode); 1363 } 1364 } 1365 1366 /* 1367 * invalidate any pages that are not dirty or under writeback. this 1368 * includes pages that are clean and mapped. 1369 */ 1370 static void ceph_invalidate_nondirty_pages(struct address_space *mapping) 1371 { 1372 struct pagevec pvec; 1373 pgoff_t next = 0; 1374 int i; 1375 1376 pagevec_init(&pvec, 0); 1377 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1378 for (i = 0; i < pagevec_count(&pvec); i++) { 1379 struct page *page = pvec.pages[i]; 1380 pgoff_t index; 1381 int skip_page = 1382 (PageDirty(page) || PageWriteback(page)); 1383 1384 if (!skip_page) 1385 skip_page = !trylock_page(page); 1386 1387 /* 1388 * We really shouldn't be looking at the ->index of an 1389 * unlocked page. But we're not allowed to lock these 1390 * pages. So we rely upon nobody altering the ->index 1391 * of this (pinned-by-us) page. 1392 */ 1393 index = page->index; 1394 if (index > next) 1395 next = index; 1396 next++; 1397 1398 if (skip_page) 1399 continue; 1400 1401 generic_error_remove_page(mapping, page); 1402 unlock_page(page); 1403 } 1404 pagevec_release(&pvec); 1405 cond_resched(); 1406 } 1407 } 1408 1409 /* 1410 * Invalidate inode pages in a worker thread. (This can't be done 1411 * in the message handler context.) 1412 */ 1413 static void ceph_invalidate_work(struct work_struct *work) 1414 { 1415 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1416 i_pg_inv_work); 1417 struct inode *inode = &ci->vfs_inode; 1418 u32 orig_gen; 1419 int check = 0; 1420 1421 spin_lock(&inode->i_lock); 1422 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1423 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1424 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1425 /* nevermind! */ 1426 spin_unlock(&inode->i_lock); 1427 goto out; 1428 } 1429 orig_gen = ci->i_rdcache_gen; 1430 spin_unlock(&inode->i_lock); 1431 1432 ceph_invalidate_nondirty_pages(inode->i_mapping); 1433 1434 spin_lock(&inode->i_lock); 1435 if (orig_gen == ci->i_rdcache_gen && 1436 orig_gen == ci->i_rdcache_revoking) { 1437 dout("invalidate_pages %p gen %d successful\n", inode, 1438 ci->i_rdcache_gen); 1439 ci->i_rdcache_revoking--; 1440 check = 1; 1441 } else { 1442 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1443 inode, orig_gen, ci->i_rdcache_gen, 1444 ci->i_rdcache_revoking); 1445 } 1446 spin_unlock(&inode->i_lock); 1447 1448 if (check) 1449 ceph_check_caps(ci, 0, NULL); 1450 out: 1451 iput(inode); 1452 } 1453 1454 1455 /* 1456 * called by trunc_wq; take i_mutex ourselves 1457 * 1458 * We also truncate in a separate thread as well. 1459 */ 1460 static void ceph_vmtruncate_work(struct work_struct *work) 1461 { 1462 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1463 i_vmtruncate_work); 1464 struct inode *inode = &ci->vfs_inode; 1465 1466 dout("vmtruncate_work %p\n", inode); 1467 mutex_lock(&inode->i_mutex); 1468 __ceph_do_pending_vmtruncate(inode); 1469 mutex_unlock(&inode->i_mutex); 1470 iput(inode); 1471 } 1472 1473 /* 1474 * Queue an async vmtruncate. If we fail to queue work, we will handle 1475 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1476 */ 1477 void ceph_queue_vmtruncate(struct inode *inode) 1478 { 1479 struct ceph_inode_info *ci = ceph_inode(inode); 1480 1481 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1482 &ci->i_vmtruncate_work)) { 1483 dout("ceph_queue_vmtruncate %p\n", inode); 1484 ihold(inode); 1485 } else { 1486 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1487 inode, ci->i_truncate_pending); 1488 } 1489 } 1490 1491 /* 1492 * called with i_mutex held. 1493 * 1494 * Make sure any pending truncation is applied before doing anything 1495 * that may depend on it. 1496 */ 1497 void __ceph_do_pending_vmtruncate(struct inode *inode) 1498 { 1499 struct ceph_inode_info *ci = ceph_inode(inode); 1500 u64 to; 1501 int wrbuffer_refs, wake = 0; 1502 1503 retry: 1504 spin_lock(&inode->i_lock); 1505 if (ci->i_truncate_pending == 0) { 1506 dout("__do_pending_vmtruncate %p none pending\n", inode); 1507 spin_unlock(&inode->i_lock); 1508 return; 1509 } 1510 1511 /* 1512 * make sure any dirty snapped pages are flushed before we 1513 * possibly truncate them.. so write AND block! 1514 */ 1515 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1516 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1517 inode); 1518 spin_unlock(&inode->i_lock); 1519 filemap_write_and_wait_range(&inode->i_data, 0, 1520 inode->i_sb->s_maxbytes); 1521 goto retry; 1522 } 1523 1524 to = ci->i_truncate_size; 1525 wrbuffer_refs = ci->i_wrbuffer_ref; 1526 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1527 ci->i_truncate_pending, to); 1528 spin_unlock(&inode->i_lock); 1529 1530 truncate_inode_pages(inode->i_mapping, to); 1531 1532 spin_lock(&inode->i_lock); 1533 ci->i_truncate_pending--; 1534 if (ci->i_truncate_pending == 0) 1535 wake = 1; 1536 spin_unlock(&inode->i_lock); 1537 1538 if (wrbuffer_refs == 0) 1539 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1540 if (wake) 1541 wake_up_all(&ci->i_cap_wq); 1542 } 1543 1544 1545 /* 1546 * symlinks 1547 */ 1548 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1549 { 1550 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1551 nd_set_link(nd, ci->i_symlink); 1552 return NULL; 1553 } 1554 1555 static const struct inode_operations ceph_symlink_iops = { 1556 .readlink = generic_readlink, 1557 .follow_link = ceph_sym_follow_link, 1558 }; 1559 1560 /* 1561 * setattr 1562 */ 1563 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1564 { 1565 struct inode *inode = dentry->d_inode; 1566 struct ceph_inode_info *ci = ceph_inode(inode); 1567 struct inode *parent_inode; 1568 const unsigned int ia_valid = attr->ia_valid; 1569 struct ceph_mds_request *req; 1570 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; 1571 int issued; 1572 int release = 0, dirtied = 0; 1573 int mask = 0; 1574 int err = 0; 1575 int inode_dirty_flags = 0; 1576 1577 if (ceph_snap(inode) != CEPH_NOSNAP) 1578 return -EROFS; 1579 1580 __ceph_do_pending_vmtruncate(inode); 1581 1582 err = inode_change_ok(inode, attr); 1583 if (err != 0) 1584 return err; 1585 1586 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1587 USE_AUTH_MDS); 1588 if (IS_ERR(req)) 1589 return PTR_ERR(req); 1590 1591 spin_lock(&inode->i_lock); 1592 issued = __ceph_caps_issued(ci, NULL); 1593 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1594 1595 if (ia_valid & ATTR_UID) { 1596 dout("setattr %p uid %d -> %d\n", inode, 1597 inode->i_uid, attr->ia_uid); 1598 if (issued & CEPH_CAP_AUTH_EXCL) { 1599 inode->i_uid = attr->ia_uid; 1600 dirtied |= CEPH_CAP_AUTH_EXCL; 1601 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1602 attr->ia_uid != inode->i_uid) { 1603 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); 1604 mask |= CEPH_SETATTR_UID; 1605 release |= CEPH_CAP_AUTH_SHARED; 1606 } 1607 } 1608 if (ia_valid & ATTR_GID) { 1609 dout("setattr %p gid %d -> %d\n", inode, 1610 inode->i_gid, attr->ia_gid); 1611 if (issued & CEPH_CAP_AUTH_EXCL) { 1612 inode->i_gid = attr->ia_gid; 1613 dirtied |= CEPH_CAP_AUTH_EXCL; 1614 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1615 attr->ia_gid != inode->i_gid) { 1616 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); 1617 mask |= CEPH_SETATTR_GID; 1618 release |= CEPH_CAP_AUTH_SHARED; 1619 } 1620 } 1621 if (ia_valid & ATTR_MODE) { 1622 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1623 attr->ia_mode); 1624 if (issued & CEPH_CAP_AUTH_EXCL) { 1625 inode->i_mode = attr->ia_mode; 1626 dirtied |= CEPH_CAP_AUTH_EXCL; 1627 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1628 attr->ia_mode != inode->i_mode) { 1629 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1630 mask |= CEPH_SETATTR_MODE; 1631 release |= CEPH_CAP_AUTH_SHARED; 1632 } 1633 } 1634 1635 if (ia_valid & ATTR_ATIME) { 1636 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1637 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1638 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1639 if (issued & CEPH_CAP_FILE_EXCL) { 1640 ci->i_time_warp_seq++; 1641 inode->i_atime = attr->ia_atime; 1642 dirtied |= CEPH_CAP_FILE_EXCL; 1643 } else if ((issued & CEPH_CAP_FILE_WR) && 1644 timespec_compare(&inode->i_atime, 1645 &attr->ia_atime) < 0) { 1646 inode->i_atime = attr->ia_atime; 1647 dirtied |= CEPH_CAP_FILE_WR; 1648 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1649 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1650 ceph_encode_timespec(&req->r_args.setattr.atime, 1651 &attr->ia_atime); 1652 mask |= CEPH_SETATTR_ATIME; 1653 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1654 CEPH_CAP_FILE_WR; 1655 } 1656 } 1657 if (ia_valid & ATTR_MTIME) { 1658 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1659 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1660 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1661 if (issued & CEPH_CAP_FILE_EXCL) { 1662 ci->i_time_warp_seq++; 1663 inode->i_mtime = attr->ia_mtime; 1664 dirtied |= CEPH_CAP_FILE_EXCL; 1665 } else if ((issued & CEPH_CAP_FILE_WR) && 1666 timespec_compare(&inode->i_mtime, 1667 &attr->ia_mtime) < 0) { 1668 inode->i_mtime = attr->ia_mtime; 1669 dirtied |= CEPH_CAP_FILE_WR; 1670 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1671 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1672 ceph_encode_timespec(&req->r_args.setattr.mtime, 1673 &attr->ia_mtime); 1674 mask |= CEPH_SETATTR_MTIME; 1675 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1676 CEPH_CAP_FILE_WR; 1677 } 1678 } 1679 if (ia_valid & ATTR_SIZE) { 1680 dout("setattr %p size %lld -> %lld\n", inode, 1681 inode->i_size, attr->ia_size); 1682 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1683 err = -EINVAL; 1684 goto out; 1685 } 1686 if ((issued & CEPH_CAP_FILE_EXCL) && 1687 attr->ia_size > inode->i_size) { 1688 inode->i_size = attr->ia_size; 1689 inode->i_blocks = 1690 (attr->ia_size + (1 << 9) - 1) >> 9; 1691 inode->i_ctime = attr->ia_ctime; 1692 ci->i_reported_size = attr->ia_size; 1693 dirtied |= CEPH_CAP_FILE_EXCL; 1694 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1695 attr->ia_size != inode->i_size) { 1696 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1697 req->r_args.setattr.old_size = 1698 cpu_to_le64(inode->i_size); 1699 mask |= CEPH_SETATTR_SIZE; 1700 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1701 CEPH_CAP_FILE_WR; 1702 } 1703 } 1704 1705 /* these do nothing */ 1706 if (ia_valid & ATTR_CTIME) { 1707 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1708 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1709 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1710 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1711 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1712 only ? "ctime only" : "ignored"); 1713 inode->i_ctime = attr->ia_ctime; 1714 if (only) { 1715 /* 1716 * if kernel wants to dirty ctime but nothing else, 1717 * we need to choose a cap to dirty under, or do 1718 * a almost-no-op setattr 1719 */ 1720 if (issued & CEPH_CAP_AUTH_EXCL) 1721 dirtied |= CEPH_CAP_AUTH_EXCL; 1722 else if (issued & CEPH_CAP_FILE_EXCL) 1723 dirtied |= CEPH_CAP_FILE_EXCL; 1724 else if (issued & CEPH_CAP_XATTR_EXCL) 1725 dirtied |= CEPH_CAP_XATTR_EXCL; 1726 else 1727 mask |= CEPH_SETATTR_CTIME; 1728 } 1729 } 1730 if (ia_valid & ATTR_FILE) 1731 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1732 1733 if (dirtied) { 1734 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); 1735 inode->i_ctime = CURRENT_TIME; 1736 } 1737 1738 release &= issued; 1739 spin_unlock(&inode->i_lock); 1740 1741 if (inode_dirty_flags) 1742 __mark_inode_dirty(inode, inode_dirty_flags); 1743 1744 if (mask) { 1745 req->r_inode = inode; 1746 ihold(inode); 1747 req->r_inode_drop = release; 1748 req->r_args.setattr.mask = cpu_to_le32(mask); 1749 req->r_num_caps = 1; 1750 parent_inode = ceph_get_dentry_parent_inode(dentry); 1751 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1752 iput(parent_inode); 1753 } 1754 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1755 ceph_cap_string(dirtied), mask); 1756 1757 ceph_mdsc_put_request(req); 1758 __ceph_do_pending_vmtruncate(inode); 1759 return err; 1760 out: 1761 spin_unlock(&inode->i_lock); 1762 ceph_mdsc_put_request(req); 1763 return err; 1764 } 1765 1766 /* 1767 * Verify that we have a lease on the given mask. If not, 1768 * do a getattr against an mds. 1769 */ 1770 int ceph_do_getattr(struct inode *inode, int mask) 1771 { 1772 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 1773 struct ceph_mds_client *mdsc = fsc->mdsc; 1774 struct ceph_mds_request *req; 1775 int err; 1776 1777 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1778 dout("do_getattr inode %p SNAPDIR\n", inode); 1779 return 0; 1780 } 1781 1782 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode); 1783 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1784 return 0; 1785 1786 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1787 if (IS_ERR(req)) 1788 return PTR_ERR(req); 1789 req->r_inode = inode; 1790 ihold(inode); 1791 req->r_num_caps = 1; 1792 req->r_args.getattr.mask = cpu_to_le32(mask); 1793 err = ceph_mdsc_do_request(mdsc, NULL, req); 1794 ceph_mdsc_put_request(req); 1795 dout("do_getattr result=%d\n", err); 1796 return err; 1797 } 1798 1799 1800 /* 1801 * Check inode permissions. We verify we have a valid value for 1802 * the AUTH cap, then call the generic handler. 1803 */ 1804 int ceph_permission(struct inode *inode, int mask) 1805 { 1806 int err; 1807 1808 if (mask & MAY_NOT_BLOCK) 1809 return -ECHILD; 1810 1811 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1812 1813 if (!err) 1814 err = generic_permission(inode, mask); 1815 return err; 1816 } 1817 1818 /* 1819 * Get all attributes. Hopefully somedata we'll have a statlite() 1820 * and can limit the fields we require to be accurate. 1821 */ 1822 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1823 struct kstat *stat) 1824 { 1825 struct inode *inode = dentry->d_inode; 1826 struct ceph_inode_info *ci = ceph_inode(inode); 1827 int err; 1828 1829 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1830 if (!err) { 1831 generic_fillattr(inode, stat); 1832 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); 1833 if (ceph_snap(inode) != CEPH_NOSNAP) 1834 stat->dev = ceph_snap(inode); 1835 else 1836 stat->dev = 0; 1837 if (S_ISDIR(inode->i_mode)) { 1838 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 1839 RBYTES)) 1840 stat->size = ci->i_rbytes; 1841 else 1842 stat->size = ci->i_files + ci->i_subdirs; 1843 stat->blocks = 0; 1844 stat->blksize = 65536; 1845 } 1846 } 1847 return err; 1848 } 1849