1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/writeback.h> 10 #include <linux/vmalloc.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl.h> 13 #include <linux/random.h> 14 #include <linux/sort.h> 15 16 #include "super.h" 17 #include "mds_client.h" 18 #include "cache.h" 19 #include <linux/ceph/decode.h> 20 21 /* 22 * Ceph inode operations 23 * 24 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 25 * setattr, etc.), xattr helpers, and helpers for assimilating 26 * metadata returned by the MDS into our cache. 27 * 28 * Also define helpers for doing asynchronous writeback, invalidation, 29 * and truncation for the benefit of those who can't afford to block 30 * (typically because they are in the message handler path). 31 */ 32 33 static const struct inode_operations ceph_symlink_iops; 34 35 static void ceph_invalidate_work(struct work_struct *work); 36 static void ceph_writeback_work(struct work_struct *work); 37 static void ceph_vmtruncate_work(struct work_struct *work); 38 39 /* 40 * find or create an inode, given the ceph ino number 41 */ 42 static int ceph_set_ino_cb(struct inode *inode, void *data) 43 { 44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); 46 return 0; 47 } 48 49 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 50 { 51 struct inode *inode; 52 ino_t t = ceph_vino_to_ino(vino); 53 54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 55 if (inode == NULL) 56 return ERR_PTR(-ENOMEM); 57 if (inode->i_state & I_NEW) { 58 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 59 inode, ceph_vinop(inode), (u64)inode->i_ino); 60 unlock_new_inode(inode); 61 } 62 63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 64 vino.snap, inode); 65 return inode; 66 } 67 68 /* 69 * get/constuct snapdir inode for a given directory 70 */ 71 struct inode *ceph_get_snapdir(struct inode *parent) 72 { 73 struct ceph_vino vino = { 74 .ino = ceph_ino(parent), 75 .snap = CEPH_SNAPDIR, 76 }; 77 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 78 struct ceph_inode_info *ci = ceph_inode(inode); 79 80 BUG_ON(!S_ISDIR(parent->i_mode)); 81 if (IS_ERR(inode)) 82 return inode; 83 inode->i_mode = parent->i_mode; 84 inode->i_uid = parent->i_uid; 85 inode->i_gid = parent->i_gid; 86 inode->i_op = &ceph_snapdir_iops; 87 inode->i_fop = &ceph_snapdir_fops; 88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 89 ci->i_rbytes = 0; 90 return inode; 91 } 92 93 const struct inode_operations ceph_file_iops = { 94 .permission = ceph_permission, 95 .setattr = ceph_setattr, 96 .getattr = ceph_getattr, 97 .listxattr = ceph_listxattr, 98 .get_acl = ceph_get_acl, 99 .set_acl = ceph_set_acl, 100 }; 101 102 103 /* 104 * We use a 'frag tree' to keep track of the MDS's directory fragments 105 * for a given inode (usually there is just a single fragment). We 106 * need to know when a child frag is delegated to a new MDS, or when 107 * it is flagged as replicated, so we can direct our requests 108 * accordingly. 109 */ 110 111 /* 112 * find/create a frag in the tree 113 */ 114 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 115 u32 f) 116 { 117 struct rb_node **p; 118 struct rb_node *parent = NULL; 119 struct ceph_inode_frag *frag; 120 int c; 121 122 p = &ci->i_fragtree.rb_node; 123 while (*p) { 124 parent = *p; 125 frag = rb_entry(parent, struct ceph_inode_frag, node); 126 c = ceph_frag_compare(f, frag->frag); 127 if (c < 0) 128 p = &(*p)->rb_left; 129 else if (c > 0) 130 p = &(*p)->rb_right; 131 else 132 return frag; 133 } 134 135 frag = kmalloc(sizeof(*frag), GFP_NOFS); 136 if (!frag) { 137 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 138 "frag %x\n", &ci->vfs_inode, 139 ceph_vinop(&ci->vfs_inode), f); 140 return ERR_PTR(-ENOMEM); 141 } 142 frag->frag = f; 143 frag->split_by = 0; 144 frag->mds = -1; 145 frag->ndist = 0; 146 147 rb_link_node(&frag->node, parent, p); 148 rb_insert_color(&frag->node, &ci->i_fragtree); 149 150 dout("get_or_create_frag added %llx.%llx frag %x\n", 151 ceph_vinop(&ci->vfs_inode), f); 152 return frag; 153 } 154 155 /* 156 * find a specific frag @f 157 */ 158 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 159 { 160 struct rb_node *n = ci->i_fragtree.rb_node; 161 162 while (n) { 163 struct ceph_inode_frag *frag = 164 rb_entry(n, struct ceph_inode_frag, node); 165 int c = ceph_frag_compare(f, frag->frag); 166 if (c < 0) 167 n = n->rb_left; 168 else if (c > 0) 169 n = n->rb_right; 170 else 171 return frag; 172 } 173 return NULL; 174 } 175 176 /* 177 * Choose frag containing the given value @v. If @pfrag is 178 * specified, copy the frag delegation info to the caller if 179 * it is present. 180 */ 181 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 182 struct ceph_inode_frag *pfrag, int *found) 183 { 184 u32 t = ceph_frag_make(0, 0); 185 struct ceph_inode_frag *frag; 186 unsigned nway, i; 187 u32 n; 188 189 if (found) 190 *found = 0; 191 192 while (1) { 193 WARN_ON(!ceph_frag_contains_value(t, v)); 194 frag = __ceph_find_frag(ci, t); 195 if (!frag) 196 break; /* t is a leaf */ 197 if (frag->split_by == 0) { 198 if (pfrag) 199 memcpy(pfrag, frag, sizeof(*pfrag)); 200 if (found) 201 *found = 1; 202 break; 203 } 204 205 /* choose child */ 206 nway = 1 << frag->split_by; 207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 208 frag->split_by, nway); 209 for (i = 0; i < nway; i++) { 210 n = ceph_frag_make_child(t, frag->split_by, i); 211 if (ceph_frag_contains_value(n, v)) { 212 t = n; 213 break; 214 } 215 } 216 BUG_ON(i == nway); 217 } 218 dout("choose_frag(%x) = %x\n", v, t); 219 220 return t; 221 } 222 223 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 224 struct ceph_inode_frag *pfrag, int *found) 225 { 226 u32 ret; 227 mutex_lock(&ci->i_fragtree_mutex); 228 ret = __ceph_choose_frag(ci, v, pfrag, found); 229 mutex_unlock(&ci->i_fragtree_mutex); 230 return ret; 231 } 232 233 /* 234 * Process dirfrag (delegation) info from the mds. Include leaf 235 * fragment in tree ONLY if ndist > 0. Otherwise, only 236 * branches/splits are included in i_fragtree) 237 */ 238 static int ceph_fill_dirfrag(struct inode *inode, 239 struct ceph_mds_reply_dirfrag *dirinfo) 240 { 241 struct ceph_inode_info *ci = ceph_inode(inode); 242 struct ceph_inode_frag *frag; 243 u32 id = le32_to_cpu(dirinfo->frag); 244 int mds = le32_to_cpu(dirinfo->auth); 245 int ndist = le32_to_cpu(dirinfo->ndist); 246 int diri_auth = -1; 247 int i; 248 int err = 0; 249 250 spin_lock(&ci->i_ceph_lock); 251 if (ci->i_auth_cap) 252 diri_auth = ci->i_auth_cap->mds; 253 spin_unlock(&ci->i_ceph_lock); 254 255 if (mds == -1) /* CDIR_AUTH_PARENT */ 256 mds = diri_auth; 257 258 mutex_lock(&ci->i_fragtree_mutex); 259 if (ndist == 0 && mds == diri_auth) { 260 /* no delegation info needed. */ 261 frag = __ceph_find_frag(ci, id); 262 if (!frag) 263 goto out; 264 if (frag->split_by == 0) { 265 /* tree leaf, remove */ 266 dout("fill_dirfrag removed %llx.%llx frag %x" 267 " (no ref)\n", ceph_vinop(inode), id); 268 rb_erase(&frag->node, &ci->i_fragtree); 269 kfree(frag); 270 } else { 271 /* tree branch, keep and clear */ 272 dout("fill_dirfrag cleared %llx.%llx frag %x" 273 " referral\n", ceph_vinop(inode), id); 274 frag->mds = -1; 275 frag->ndist = 0; 276 } 277 goto out; 278 } 279 280 281 /* find/add this frag to store mds delegation info */ 282 frag = __get_or_create_frag(ci, id); 283 if (IS_ERR(frag)) { 284 /* this is not the end of the world; we can continue 285 with bad/inaccurate delegation info */ 286 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 287 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 288 err = -ENOMEM; 289 goto out; 290 } 291 292 frag->mds = mds; 293 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 294 for (i = 0; i < frag->ndist; i++) 295 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 296 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 297 ceph_vinop(inode), frag->frag, frag->ndist); 298 299 out: 300 mutex_unlock(&ci->i_fragtree_mutex); 301 return err; 302 } 303 304 static int frag_tree_split_cmp(const void *l, const void *r) 305 { 306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 308 return ceph_frag_compare(ls->frag, rs->frag); 309 } 310 311 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 312 { 313 if (!frag) 314 return f == ceph_frag_make(0, 0); 315 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by) 316 return false; 317 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f)); 318 } 319 320 static int ceph_fill_fragtree(struct inode *inode, 321 struct ceph_frag_tree_head *fragtree, 322 struct ceph_mds_reply_dirfrag *dirinfo) 323 { 324 struct ceph_inode_info *ci = ceph_inode(inode); 325 struct ceph_inode_frag *frag, *prev_frag = NULL; 326 struct rb_node *rb_node; 327 unsigned i, split_by, nsplits; 328 u32 id; 329 bool update = false; 330 331 mutex_lock(&ci->i_fragtree_mutex); 332 nsplits = le32_to_cpu(fragtree->nsplits); 333 if (nsplits != ci->i_fragtree_nsplits) { 334 update = true; 335 } else if (nsplits) { 336 i = prandom_u32() % nsplits; 337 id = le32_to_cpu(fragtree->splits[i].frag); 338 if (!__ceph_find_frag(ci, id)) 339 update = true; 340 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) { 341 rb_node = rb_first(&ci->i_fragtree); 342 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 343 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node)) 344 update = true; 345 } 346 if (!update && dirinfo) { 347 id = le32_to_cpu(dirinfo->frag); 348 if (id != __ceph_choose_frag(ci, id, NULL, NULL)) 349 update = true; 350 } 351 if (!update) 352 goto out_unlock; 353 354 if (nsplits > 1) { 355 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]), 356 frag_tree_split_cmp, NULL); 357 } 358 359 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode)); 360 rb_node = rb_first(&ci->i_fragtree); 361 for (i = 0; i < nsplits; i++) { 362 id = le32_to_cpu(fragtree->splits[i].frag); 363 split_by = le32_to_cpu(fragtree->splits[i].by); 364 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) { 365 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, " 366 "frag %x split by %d\n", ceph_vinop(inode), 367 i, nsplits, id, split_by); 368 continue; 369 } 370 frag = NULL; 371 while (rb_node) { 372 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 373 if (ceph_frag_compare(frag->frag, id) >= 0) { 374 if (frag->frag != id) 375 frag = NULL; 376 else 377 rb_node = rb_next(rb_node); 378 break; 379 } 380 rb_node = rb_next(rb_node); 381 /* delete stale split/leaf node */ 382 if (frag->split_by > 0 || 383 !is_frag_child(frag->frag, prev_frag)) { 384 rb_erase(&frag->node, &ci->i_fragtree); 385 if (frag->split_by > 0) 386 ci->i_fragtree_nsplits--; 387 kfree(frag); 388 } 389 frag = NULL; 390 } 391 if (!frag) { 392 frag = __get_or_create_frag(ci, id); 393 if (IS_ERR(frag)) 394 continue; 395 } 396 if (frag->split_by == 0) 397 ci->i_fragtree_nsplits++; 398 frag->split_by = split_by; 399 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 400 prev_frag = frag; 401 } 402 while (rb_node) { 403 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 404 rb_node = rb_next(rb_node); 405 /* delete stale split/leaf node */ 406 if (frag->split_by > 0 || 407 !is_frag_child(frag->frag, prev_frag)) { 408 rb_erase(&frag->node, &ci->i_fragtree); 409 if (frag->split_by > 0) 410 ci->i_fragtree_nsplits--; 411 kfree(frag); 412 } 413 } 414 out_unlock: 415 mutex_unlock(&ci->i_fragtree_mutex); 416 return 0; 417 } 418 419 /* 420 * initialize a newly allocated inode. 421 */ 422 struct inode *ceph_alloc_inode(struct super_block *sb) 423 { 424 struct ceph_inode_info *ci; 425 int i; 426 427 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 428 if (!ci) 429 return NULL; 430 431 dout("alloc_inode %p\n", &ci->vfs_inode); 432 433 spin_lock_init(&ci->i_ceph_lock); 434 435 ci->i_version = 0; 436 ci->i_inline_version = 0; 437 ci->i_time_warp_seq = 0; 438 ci->i_ceph_flags = 0; 439 atomic64_set(&ci->i_ordered_count, 1); 440 atomic64_set(&ci->i_release_count, 1); 441 atomic64_set(&ci->i_complete_seq[0], 0); 442 atomic64_set(&ci->i_complete_seq[1], 0); 443 ci->i_symlink = NULL; 444 445 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 446 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL); 447 448 ci->i_fragtree = RB_ROOT; 449 mutex_init(&ci->i_fragtree_mutex); 450 451 ci->i_xattrs.blob = NULL; 452 ci->i_xattrs.prealloc_blob = NULL; 453 ci->i_xattrs.dirty = false; 454 ci->i_xattrs.index = RB_ROOT; 455 ci->i_xattrs.count = 0; 456 ci->i_xattrs.names_size = 0; 457 ci->i_xattrs.vals_size = 0; 458 ci->i_xattrs.version = 0; 459 ci->i_xattrs.index_version = 0; 460 461 ci->i_caps = RB_ROOT; 462 ci->i_auth_cap = NULL; 463 ci->i_dirty_caps = 0; 464 ci->i_flushing_caps = 0; 465 INIT_LIST_HEAD(&ci->i_dirty_item); 466 INIT_LIST_HEAD(&ci->i_flushing_item); 467 ci->i_prealloc_cap_flush = NULL; 468 INIT_LIST_HEAD(&ci->i_cap_flush_list); 469 init_waitqueue_head(&ci->i_cap_wq); 470 ci->i_hold_caps_min = 0; 471 ci->i_hold_caps_max = 0; 472 INIT_LIST_HEAD(&ci->i_cap_delay_list); 473 INIT_LIST_HEAD(&ci->i_cap_snaps); 474 ci->i_head_snapc = NULL; 475 ci->i_snap_caps = 0; 476 477 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) 478 ci->i_nr_by_mode[i] = 0; 479 480 mutex_init(&ci->i_truncate_mutex); 481 ci->i_truncate_seq = 0; 482 ci->i_truncate_size = 0; 483 ci->i_truncate_pending = 0; 484 485 ci->i_max_size = 0; 486 ci->i_reported_size = 0; 487 ci->i_wanted_max_size = 0; 488 ci->i_requested_max_size = 0; 489 490 ci->i_pin_ref = 0; 491 ci->i_rd_ref = 0; 492 ci->i_rdcache_ref = 0; 493 ci->i_wr_ref = 0; 494 ci->i_wb_ref = 0; 495 ci->i_wrbuffer_ref = 0; 496 ci->i_wrbuffer_ref_head = 0; 497 ci->i_shared_gen = 0; 498 ci->i_rdcache_gen = 0; 499 ci->i_rdcache_revoking = 0; 500 501 INIT_LIST_HEAD(&ci->i_unsafe_writes); 502 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 503 INIT_LIST_HEAD(&ci->i_unsafe_iops); 504 spin_lock_init(&ci->i_unsafe_lock); 505 506 ci->i_snap_realm = NULL; 507 INIT_LIST_HEAD(&ci->i_snap_realm_item); 508 INIT_LIST_HEAD(&ci->i_snap_flush_item); 509 510 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 511 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 512 513 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 514 515 ceph_fscache_inode_init(ci); 516 517 return &ci->vfs_inode; 518 } 519 520 static void ceph_i_callback(struct rcu_head *head) 521 { 522 struct inode *inode = container_of(head, struct inode, i_rcu); 523 struct ceph_inode_info *ci = ceph_inode(inode); 524 525 kmem_cache_free(ceph_inode_cachep, ci); 526 } 527 528 void ceph_destroy_inode(struct inode *inode) 529 { 530 struct ceph_inode_info *ci = ceph_inode(inode); 531 struct ceph_inode_frag *frag; 532 struct rb_node *n; 533 534 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 535 536 ceph_fscache_unregister_inode_cookie(ci); 537 538 ceph_queue_caps_release(inode); 539 540 /* 541 * we may still have a snap_realm reference if there are stray 542 * caps in i_snap_caps. 543 */ 544 if (ci->i_snap_realm) { 545 struct ceph_mds_client *mdsc = 546 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 547 struct ceph_snap_realm *realm = ci->i_snap_realm; 548 549 dout(" dropping residual ref to snap realm %p\n", realm); 550 spin_lock(&realm->inodes_with_caps_lock); 551 list_del_init(&ci->i_snap_realm_item); 552 spin_unlock(&realm->inodes_with_caps_lock); 553 ceph_put_snap_realm(mdsc, realm); 554 } 555 556 kfree(ci->i_symlink); 557 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 558 frag = rb_entry(n, struct ceph_inode_frag, node); 559 rb_erase(n, &ci->i_fragtree); 560 kfree(frag); 561 } 562 ci->i_fragtree_nsplits = 0; 563 564 __ceph_destroy_xattrs(ci); 565 if (ci->i_xattrs.blob) 566 ceph_buffer_put(ci->i_xattrs.blob); 567 if (ci->i_xattrs.prealloc_blob) 568 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 569 570 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns)); 571 572 call_rcu(&inode->i_rcu, ceph_i_callback); 573 } 574 575 int ceph_drop_inode(struct inode *inode) 576 { 577 /* 578 * Positve dentry and corresponding inode are always accompanied 579 * in MDS reply. So no need to keep inode in the cache after 580 * dropping all its aliases. 581 */ 582 return 1; 583 } 584 585 void ceph_evict_inode(struct inode *inode) 586 { 587 /* wait unsafe sync writes */ 588 ceph_sync_write_wait(inode); 589 truncate_inode_pages_final(&inode->i_data); 590 clear_inode(inode); 591 } 592 593 static inline blkcnt_t calc_inode_blocks(u64 size) 594 { 595 return (size + (1<<9) - 1) >> 9; 596 } 597 598 /* 599 * Helpers to fill in size, ctime, mtime, and atime. We have to be 600 * careful because either the client or MDS may have more up to date 601 * info, depending on which capabilities are held, and whether 602 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 603 * and size are monotonically increasing, except when utimes() or 604 * truncate() increments the corresponding _seq values.) 605 */ 606 int ceph_fill_file_size(struct inode *inode, int issued, 607 u32 truncate_seq, u64 truncate_size, u64 size) 608 { 609 struct ceph_inode_info *ci = ceph_inode(inode); 610 int queue_trunc = 0; 611 612 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 613 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 614 dout("size %lld -> %llu\n", inode->i_size, size); 615 if (size > 0 && S_ISDIR(inode->i_mode)) { 616 pr_err("fill_file_size non-zero size for directory\n"); 617 size = 0; 618 } 619 i_size_write(inode, size); 620 inode->i_blocks = calc_inode_blocks(size); 621 ci->i_reported_size = size; 622 if (truncate_seq != ci->i_truncate_seq) { 623 dout("truncate_seq %u -> %u\n", 624 ci->i_truncate_seq, truncate_seq); 625 ci->i_truncate_seq = truncate_seq; 626 627 /* the MDS should have revoked these caps */ 628 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | 629 CEPH_CAP_FILE_RD | 630 CEPH_CAP_FILE_WR | 631 CEPH_CAP_FILE_LAZYIO)); 632 /* 633 * If we hold relevant caps, or in the case where we're 634 * not the only client referencing this file and we 635 * don't hold those caps, then we need to check whether 636 * the file is either opened or mmaped 637 */ 638 if ((issued & (CEPH_CAP_FILE_CACHE| 639 CEPH_CAP_FILE_BUFFER)) || 640 mapping_mapped(inode->i_mapping) || 641 __ceph_caps_file_wanted(ci)) { 642 ci->i_truncate_pending++; 643 queue_trunc = 1; 644 } 645 } 646 } 647 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 648 ci->i_truncate_size != truncate_size) { 649 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 650 truncate_size); 651 ci->i_truncate_size = truncate_size; 652 } 653 654 if (queue_trunc) 655 ceph_fscache_invalidate(inode); 656 657 return queue_trunc; 658 } 659 660 void ceph_fill_file_time(struct inode *inode, int issued, 661 u64 time_warp_seq, struct timespec *ctime, 662 struct timespec *mtime, struct timespec *atime) 663 { 664 struct ceph_inode_info *ci = ceph_inode(inode); 665 int warn = 0; 666 667 if (issued & (CEPH_CAP_FILE_EXCL| 668 CEPH_CAP_FILE_WR| 669 CEPH_CAP_FILE_BUFFER| 670 CEPH_CAP_AUTH_EXCL| 671 CEPH_CAP_XATTR_EXCL)) { 672 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 673 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 674 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 675 ctime->tv_sec, ctime->tv_nsec); 676 inode->i_ctime = *ctime; 677 } 678 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 679 /* the MDS did a utimes() */ 680 dout("mtime %ld.%09ld -> %ld.%09ld " 681 "tw %d -> %d\n", 682 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 683 mtime->tv_sec, mtime->tv_nsec, 684 ci->i_time_warp_seq, (int)time_warp_seq); 685 686 inode->i_mtime = *mtime; 687 inode->i_atime = *atime; 688 ci->i_time_warp_seq = time_warp_seq; 689 } else if (time_warp_seq == ci->i_time_warp_seq) { 690 /* nobody did utimes(); take the max */ 691 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 692 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 693 inode->i_mtime.tv_sec, 694 inode->i_mtime.tv_nsec, 695 mtime->tv_sec, mtime->tv_nsec); 696 inode->i_mtime = *mtime; 697 } 698 if (timespec_compare(atime, &inode->i_atime) > 0) { 699 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 700 inode->i_atime.tv_sec, 701 inode->i_atime.tv_nsec, 702 atime->tv_sec, atime->tv_nsec); 703 inode->i_atime = *atime; 704 } 705 } else if (issued & CEPH_CAP_FILE_EXCL) { 706 /* we did a utimes(); ignore mds values */ 707 } else { 708 warn = 1; 709 } 710 } else { 711 /* we have no write|excl caps; whatever the MDS says is true */ 712 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 713 inode->i_ctime = *ctime; 714 inode->i_mtime = *mtime; 715 inode->i_atime = *atime; 716 ci->i_time_warp_seq = time_warp_seq; 717 } else { 718 warn = 1; 719 } 720 } 721 if (warn) /* time_warp_seq shouldn't go backwards */ 722 dout("%p mds time_warp_seq %llu < %u\n", 723 inode, time_warp_seq, ci->i_time_warp_seq); 724 } 725 726 /* 727 * Populate an inode based on info from mds. May be called on new or 728 * existing inodes. 729 */ 730 static int fill_inode(struct inode *inode, struct page *locked_page, 731 struct ceph_mds_reply_info_in *iinfo, 732 struct ceph_mds_reply_dirfrag *dirinfo, 733 struct ceph_mds_session *session, 734 unsigned long ttl_from, int cap_fmode, 735 struct ceph_cap_reservation *caps_reservation) 736 { 737 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 738 struct ceph_mds_reply_inode *info = iinfo->in; 739 struct ceph_inode_info *ci = ceph_inode(inode); 740 int issued = 0, implemented, new_issued; 741 struct timespec mtime, atime, ctime; 742 struct ceph_buffer *xattr_blob = NULL; 743 struct ceph_string *pool_ns = NULL; 744 struct ceph_cap *new_cap = NULL; 745 int err = 0; 746 bool wake = false; 747 bool queue_trunc = false; 748 bool new_version = false; 749 bool fill_inline = false; 750 751 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 752 inode, ceph_vinop(inode), le64_to_cpu(info->version), 753 ci->i_version); 754 755 /* prealloc new cap struct */ 756 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP) 757 new_cap = ceph_get_cap(mdsc, caps_reservation); 758 759 /* 760 * prealloc xattr data, if it looks like we'll need it. only 761 * if len > 4 (meaning there are actually xattrs; the first 4 762 * bytes are the xattr count). 763 */ 764 if (iinfo->xattr_len > 4) { 765 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 766 if (!xattr_blob) 767 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 768 iinfo->xattr_len); 769 } 770 771 if (iinfo->pool_ns_len > 0) 772 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data, 773 iinfo->pool_ns_len); 774 775 spin_lock(&ci->i_ceph_lock); 776 777 /* 778 * provided version will be odd if inode value is projected, 779 * even if stable. skip the update if we have newer stable 780 * info (ours>=theirs, e.g. due to racing mds replies), unless 781 * we are getting projected (unstable) info (in which case the 782 * version is odd, and we want ours>theirs). 783 * us them 784 * 2 2 skip 785 * 3 2 skip 786 * 3 3 update 787 */ 788 if (ci->i_version == 0 || 789 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 790 le64_to_cpu(info->version) > (ci->i_version & ~1))) 791 new_version = true; 792 793 issued = __ceph_caps_issued(ci, &implemented); 794 issued |= implemented | __ceph_caps_dirty(ci); 795 new_issued = ~issued & le32_to_cpu(info->cap.caps); 796 797 /* update inode */ 798 ci->i_version = le64_to_cpu(info->version); 799 inode->i_version++; 800 inode->i_rdev = le32_to_cpu(info->rdev); 801 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 802 803 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) && 804 (issued & CEPH_CAP_AUTH_EXCL) == 0) { 805 inode->i_mode = le32_to_cpu(info->mode); 806 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 807 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 808 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 809 from_kuid(&init_user_ns, inode->i_uid), 810 from_kgid(&init_user_ns, inode->i_gid)); 811 } 812 813 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) && 814 (issued & CEPH_CAP_LINK_EXCL) == 0) 815 set_nlink(inode, le32_to_cpu(info->nlink)); 816 817 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) { 818 /* be careful with mtime, atime, size */ 819 ceph_decode_timespec(&atime, &info->atime); 820 ceph_decode_timespec(&mtime, &info->mtime); 821 ceph_decode_timespec(&ctime, &info->ctime); 822 ceph_fill_file_time(inode, issued, 823 le32_to_cpu(info->time_warp_seq), 824 &ctime, &mtime, &atime); 825 } 826 827 if (new_version || 828 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { 829 s64 old_pool = ci->i_layout.pool_id; 830 struct ceph_string *old_ns; 831 832 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout); 833 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, 834 lockdep_is_held(&ci->i_ceph_lock)); 835 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns); 836 837 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns) 838 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 839 840 pool_ns = old_ns; 841 842 queue_trunc = ceph_fill_file_size(inode, issued, 843 le32_to_cpu(info->truncate_seq), 844 le64_to_cpu(info->truncate_size), 845 le64_to_cpu(info->size)); 846 /* only update max_size on auth cap */ 847 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 848 ci->i_max_size != le64_to_cpu(info->max_size)) { 849 dout("max_size %lld -> %llu\n", ci->i_max_size, 850 le64_to_cpu(info->max_size)); 851 ci->i_max_size = le64_to_cpu(info->max_size); 852 } 853 } 854 855 /* xattrs */ 856 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 857 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 858 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 859 if (ci->i_xattrs.blob) 860 ceph_buffer_put(ci->i_xattrs.blob); 861 ci->i_xattrs.blob = xattr_blob; 862 if (xattr_blob) 863 memcpy(ci->i_xattrs.blob->vec.iov_base, 864 iinfo->xattr_data, iinfo->xattr_len); 865 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 866 ceph_forget_all_cached_acls(inode); 867 xattr_blob = NULL; 868 } 869 870 inode->i_mapping->a_ops = &ceph_aops; 871 872 switch (inode->i_mode & S_IFMT) { 873 case S_IFIFO: 874 case S_IFBLK: 875 case S_IFCHR: 876 case S_IFSOCK: 877 init_special_inode(inode, inode->i_mode, inode->i_rdev); 878 inode->i_op = &ceph_file_iops; 879 break; 880 case S_IFREG: 881 inode->i_op = &ceph_file_iops; 882 inode->i_fop = &ceph_file_fops; 883 break; 884 case S_IFLNK: 885 inode->i_op = &ceph_symlink_iops; 886 if (!ci->i_symlink) { 887 u32 symlen = iinfo->symlink_len; 888 char *sym; 889 890 spin_unlock(&ci->i_ceph_lock); 891 892 if (symlen != i_size_read(inode)) { 893 pr_err("fill_inode %llx.%llx BAD symlink " 894 "size %lld\n", ceph_vinop(inode), 895 i_size_read(inode)); 896 i_size_write(inode, symlen); 897 inode->i_blocks = calc_inode_blocks(symlen); 898 } 899 900 err = -ENOMEM; 901 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 902 if (!sym) 903 goto out; 904 905 spin_lock(&ci->i_ceph_lock); 906 if (!ci->i_symlink) 907 ci->i_symlink = sym; 908 else 909 kfree(sym); /* lost a race */ 910 } 911 inode->i_link = ci->i_symlink; 912 break; 913 case S_IFDIR: 914 inode->i_op = &ceph_dir_iops; 915 inode->i_fop = &ceph_dir_fops; 916 917 ci->i_dir_layout = iinfo->dir_layout; 918 919 ci->i_files = le64_to_cpu(info->files); 920 ci->i_subdirs = le64_to_cpu(info->subdirs); 921 ci->i_rbytes = le64_to_cpu(info->rbytes); 922 ci->i_rfiles = le64_to_cpu(info->rfiles); 923 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 924 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 925 break; 926 default: 927 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 928 ceph_vinop(inode), inode->i_mode); 929 } 930 931 /* were we issued a capability? */ 932 if (info->cap.caps) { 933 if (ceph_snap(inode) == CEPH_NOSNAP) { 934 unsigned caps = le32_to_cpu(info->cap.caps); 935 ceph_add_cap(inode, session, 936 le64_to_cpu(info->cap.cap_id), 937 cap_fmode, caps, 938 le32_to_cpu(info->cap.wanted), 939 le32_to_cpu(info->cap.seq), 940 le32_to_cpu(info->cap.mseq), 941 le64_to_cpu(info->cap.realm), 942 info->cap.flags, &new_cap); 943 944 /* set dir completion flag? */ 945 if (S_ISDIR(inode->i_mode) && 946 ci->i_files == 0 && ci->i_subdirs == 0 && 947 (caps & CEPH_CAP_FILE_SHARED) && 948 (issued & CEPH_CAP_FILE_EXCL) == 0 && 949 !__ceph_dir_is_complete(ci)) { 950 dout(" marking %p complete (empty)\n", inode); 951 i_size_write(inode, 0); 952 __ceph_dir_set_complete(ci, 953 atomic64_read(&ci->i_release_count), 954 atomic64_read(&ci->i_ordered_count)); 955 } 956 957 wake = true; 958 } else { 959 dout(" %p got snap_caps %s\n", inode, 960 ceph_cap_string(le32_to_cpu(info->cap.caps))); 961 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 962 if (cap_fmode >= 0) 963 __ceph_get_fmode(ci, cap_fmode); 964 } 965 } else if (cap_fmode >= 0) { 966 pr_warn("mds issued no caps on %llx.%llx\n", 967 ceph_vinop(inode)); 968 __ceph_get_fmode(ci, cap_fmode); 969 } 970 971 if (iinfo->inline_version > 0 && 972 iinfo->inline_version >= ci->i_inline_version) { 973 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 974 ci->i_inline_version = iinfo->inline_version; 975 if (ci->i_inline_version != CEPH_INLINE_NONE && 976 (locked_page || 977 (le32_to_cpu(info->cap.caps) & cache_caps))) 978 fill_inline = true; 979 } 980 981 spin_unlock(&ci->i_ceph_lock); 982 983 if (fill_inline) 984 ceph_fill_inline_data(inode, locked_page, 985 iinfo->inline_data, iinfo->inline_len); 986 987 if (wake) 988 wake_up_all(&ci->i_cap_wq); 989 990 /* queue truncate if we saw i_size decrease */ 991 if (queue_trunc) 992 ceph_queue_vmtruncate(inode); 993 994 /* populate frag tree */ 995 if (S_ISDIR(inode->i_mode)) 996 ceph_fill_fragtree(inode, &info->fragtree, dirinfo); 997 998 /* update delegation info? */ 999 if (dirinfo) 1000 ceph_fill_dirfrag(inode, dirinfo); 1001 1002 err = 0; 1003 out: 1004 if (new_cap) 1005 ceph_put_cap(mdsc, new_cap); 1006 if (xattr_blob) 1007 ceph_buffer_put(xattr_blob); 1008 ceph_put_string(pool_ns); 1009 return err; 1010 } 1011 1012 /* 1013 * caller should hold session s_mutex. 1014 */ 1015 static void update_dentry_lease(struct dentry *dentry, 1016 struct ceph_mds_reply_lease *lease, 1017 struct ceph_mds_session *session, 1018 unsigned long from_time) 1019 { 1020 struct ceph_dentry_info *di = ceph_dentry(dentry); 1021 long unsigned duration = le32_to_cpu(lease->duration_ms); 1022 long unsigned ttl = from_time + (duration * HZ) / 1000; 1023 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 1024 struct inode *dir; 1025 1026 spin_lock(&dentry->d_lock); 1027 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 1028 dentry, duration, ttl); 1029 1030 /* make lease_rdcache_gen match directory */ 1031 dir = d_inode(dentry->d_parent); 1032 1033 /* only track leases on regular dentries */ 1034 if (ceph_snap(dir) != CEPH_NOSNAP) 1035 goto out_unlock; 1036 1037 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 1038 1039 if (duration == 0) 1040 goto out_unlock; 1041 1042 if (di->lease_gen == session->s_cap_gen && 1043 time_before(ttl, di->time)) 1044 goto out_unlock; /* we already have a newer lease. */ 1045 1046 if (di->lease_session && di->lease_session != session) 1047 goto out_unlock; 1048 1049 ceph_dentry_lru_touch(dentry); 1050 1051 if (!di->lease_session) 1052 di->lease_session = ceph_get_mds_session(session); 1053 di->lease_gen = session->s_cap_gen; 1054 di->lease_seq = le32_to_cpu(lease->seq); 1055 di->lease_renew_after = half_ttl; 1056 di->lease_renew_from = 0; 1057 di->time = ttl; 1058 out_unlock: 1059 spin_unlock(&dentry->d_lock); 1060 return; 1061 } 1062 1063 /* 1064 * splice a dentry to an inode. 1065 * caller must hold directory i_mutex for this to be safe. 1066 */ 1067 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) 1068 { 1069 struct dentry *realdn; 1070 1071 BUG_ON(d_inode(dn)); 1072 1073 /* dn must be unhashed */ 1074 if (!d_unhashed(dn)) 1075 d_drop(dn); 1076 realdn = d_splice_alias(in, dn); 1077 if (IS_ERR(realdn)) { 1078 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 1079 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 1080 dn = realdn; /* note realdn contains the error */ 1081 goto out; 1082 } else if (realdn) { 1083 dout("dn %p (%d) spliced with %p (%d) " 1084 "inode %p ino %llx.%llx\n", 1085 dn, d_count(dn), 1086 realdn, d_count(realdn), 1087 d_inode(realdn), ceph_vinop(d_inode(realdn))); 1088 dput(dn); 1089 dn = realdn; 1090 } else { 1091 BUG_ON(!ceph_dentry(dn)); 1092 dout("dn %p attached to %p ino %llx.%llx\n", 1093 dn, d_inode(dn), ceph_vinop(d_inode(dn))); 1094 } 1095 out: 1096 return dn; 1097 } 1098 1099 /* 1100 * Incorporate results into the local cache. This is either just 1101 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 1102 * after a lookup). 1103 * 1104 * A reply may contain 1105 * a directory inode along with a dentry. 1106 * and/or a target inode 1107 * 1108 * Called with snap_rwsem (read). 1109 */ 1110 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 1111 struct ceph_mds_session *session) 1112 { 1113 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1114 struct inode *in = NULL; 1115 struct ceph_vino vino; 1116 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 1117 int err = 0; 1118 1119 dout("fill_trace %p is_dentry %d is_target %d\n", req, 1120 rinfo->head->is_dentry, rinfo->head->is_target); 1121 1122 #if 0 1123 /* 1124 * Debugging hook: 1125 * 1126 * If we resend completed ops to a recovering mds, we get no 1127 * trace. Since that is very rare, pretend this is the case 1128 * to ensure the 'no trace' handlers in the callers behave. 1129 * 1130 * Fill in inodes unconditionally to avoid breaking cap 1131 * invariants. 1132 */ 1133 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 1134 pr_info("fill_trace faking empty trace on %lld %s\n", 1135 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 1136 if (rinfo->head->is_dentry) { 1137 rinfo->head->is_dentry = 0; 1138 err = fill_inode(req->r_locked_dir, 1139 &rinfo->diri, rinfo->dirfrag, 1140 session, req->r_request_started, -1); 1141 } 1142 if (rinfo->head->is_target) { 1143 rinfo->head->is_target = 0; 1144 ininfo = rinfo->targeti.in; 1145 vino.ino = le64_to_cpu(ininfo->ino); 1146 vino.snap = le64_to_cpu(ininfo->snapid); 1147 in = ceph_get_inode(sb, vino); 1148 err = fill_inode(in, &rinfo->targeti, NULL, 1149 session, req->r_request_started, 1150 req->r_fmode); 1151 iput(in); 1152 } 1153 } 1154 #endif 1155 1156 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 1157 dout("fill_trace reply is empty!\n"); 1158 if (rinfo->head->result == 0 && req->r_locked_dir) 1159 ceph_invalidate_dir_request(req); 1160 return 0; 1161 } 1162 1163 if (rinfo->head->is_dentry) { 1164 struct inode *dir = req->r_locked_dir; 1165 1166 if (dir) { 1167 err = fill_inode(dir, NULL, 1168 &rinfo->diri, rinfo->dirfrag, 1169 session, req->r_request_started, -1, 1170 &req->r_caps_reservation); 1171 if (err < 0) 1172 goto done; 1173 } else { 1174 WARN_ON_ONCE(1); 1175 } 1176 1177 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) { 1178 struct qstr dname; 1179 struct dentry *dn, *parent; 1180 1181 BUG_ON(!rinfo->head->is_target); 1182 BUG_ON(req->r_dentry); 1183 1184 parent = d_find_any_alias(dir); 1185 BUG_ON(!parent); 1186 1187 dname.name = rinfo->dname; 1188 dname.len = rinfo->dname_len; 1189 dname.hash = full_name_hash(parent, dname.name, dname.len); 1190 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1191 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1192 retry_lookup: 1193 dn = d_lookup(parent, &dname); 1194 dout("d_lookup on parent=%p name=%.*s got %p\n", 1195 parent, dname.len, dname.name, dn); 1196 1197 if (!dn) { 1198 dn = d_alloc(parent, &dname); 1199 dout("d_alloc %p '%.*s' = %p\n", parent, 1200 dname.len, dname.name, dn); 1201 if (dn == NULL) { 1202 dput(parent); 1203 err = -ENOMEM; 1204 goto done; 1205 } 1206 err = 0; 1207 } else if (d_really_is_positive(dn) && 1208 (ceph_ino(d_inode(dn)) != vino.ino || 1209 ceph_snap(d_inode(dn)) != vino.snap)) { 1210 dout(" dn %p points to wrong inode %p\n", 1211 dn, d_inode(dn)); 1212 d_delete(dn); 1213 dput(dn); 1214 goto retry_lookup; 1215 } 1216 1217 req->r_dentry = dn; 1218 dput(parent); 1219 } 1220 } 1221 1222 if (rinfo->head->is_target) { 1223 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1224 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1225 1226 in = ceph_get_inode(sb, vino); 1227 if (IS_ERR(in)) { 1228 err = PTR_ERR(in); 1229 goto done; 1230 } 1231 req->r_target_inode = in; 1232 1233 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL, 1234 session, req->r_request_started, 1235 (!req->r_aborted && rinfo->head->result == 0) ? 1236 req->r_fmode : -1, 1237 &req->r_caps_reservation); 1238 if (err < 0) { 1239 pr_err("fill_inode badness %p %llx.%llx\n", 1240 in, ceph_vinop(in)); 1241 goto done; 1242 } 1243 } 1244 1245 /* 1246 * ignore null lease/binding on snapdir ENOENT, or else we 1247 * will have trouble splicing in the virtual snapdir later 1248 */ 1249 if (rinfo->head->is_dentry && !req->r_aborted && 1250 req->r_locked_dir && 1251 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1252 fsc->mount_options->snapdir_name, 1253 req->r_dentry->d_name.len))) { 1254 /* 1255 * lookup link rename : null -> possibly existing inode 1256 * mknod symlink mkdir : null -> new inode 1257 * unlink : linked -> null 1258 */ 1259 struct inode *dir = req->r_locked_dir; 1260 struct dentry *dn = req->r_dentry; 1261 bool have_dir_cap, have_lease; 1262 1263 BUG_ON(!dn); 1264 BUG_ON(!dir); 1265 BUG_ON(d_inode(dn->d_parent) != dir); 1266 BUG_ON(ceph_ino(dir) != 1267 le64_to_cpu(rinfo->diri.in->ino)); 1268 BUG_ON(ceph_snap(dir) != 1269 le64_to_cpu(rinfo->diri.in->snapid)); 1270 1271 /* do we have a lease on the whole dir? */ 1272 have_dir_cap = 1273 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1274 CEPH_CAP_FILE_SHARED); 1275 1276 /* do we have a dn lease? */ 1277 have_lease = have_dir_cap || 1278 le32_to_cpu(rinfo->dlease->duration_ms); 1279 if (!have_lease) 1280 dout("fill_trace no dentry lease or dir cap\n"); 1281 1282 /* rename? */ 1283 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1284 struct inode *olddir = req->r_old_dentry_dir; 1285 BUG_ON(!olddir); 1286 1287 dout(" src %p '%pd' dst %p '%pd'\n", 1288 req->r_old_dentry, 1289 req->r_old_dentry, 1290 dn, dn); 1291 dout("fill_trace doing d_move %p -> %p\n", 1292 req->r_old_dentry, dn); 1293 1294 /* d_move screws up sibling dentries' offsets */ 1295 ceph_dir_clear_ordered(dir); 1296 ceph_dir_clear_ordered(olddir); 1297 1298 d_move(req->r_old_dentry, dn); 1299 dout(" src %p '%pd' dst %p '%pd'\n", 1300 req->r_old_dentry, 1301 req->r_old_dentry, 1302 dn, dn); 1303 1304 /* ensure target dentry is invalidated, despite 1305 rehashing bug in vfs_rename_dir */ 1306 ceph_invalidate_dentry_lease(dn); 1307 1308 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1309 ceph_dentry(req->r_old_dentry)->offset); 1310 1311 dn = req->r_old_dentry; /* use old_dentry */ 1312 } 1313 1314 /* null dentry? */ 1315 if (!rinfo->head->is_target) { 1316 dout("fill_trace null dentry\n"); 1317 if (d_really_is_positive(dn)) { 1318 ceph_dir_clear_ordered(dir); 1319 dout("d_delete %p\n", dn); 1320 d_delete(dn); 1321 } else { 1322 if (have_lease && d_unhashed(dn)) 1323 d_add(dn, NULL); 1324 update_dentry_lease(dn, rinfo->dlease, 1325 session, 1326 req->r_request_started); 1327 } 1328 goto done; 1329 } 1330 1331 /* attach proper inode */ 1332 if (d_really_is_negative(dn)) { 1333 ceph_dir_clear_ordered(dir); 1334 ihold(in); 1335 dn = splice_dentry(dn, in); 1336 if (IS_ERR(dn)) { 1337 err = PTR_ERR(dn); 1338 goto done; 1339 } 1340 req->r_dentry = dn; /* may have spliced */ 1341 } else if (d_really_is_positive(dn) && d_inode(dn) != in) { 1342 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1343 dn, d_inode(dn), ceph_vinop(d_inode(dn)), 1344 ceph_vinop(in)); 1345 d_invalidate(dn); 1346 have_lease = false; 1347 } 1348 1349 if (have_lease) 1350 update_dentry_lease(dn, rinfo->dlease, session, 1351 req->r_request_started); 1352 dout(" final dn %p\n", dn); 1353 } else if (!req->r_aborted && 1354 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1355 req->r_op == CEPH_MDS_OP_MKSNAP)) { 1356 struct dentry *dn = req->r_dentry; 1357 struct inode *dir = req->r_locked_dir; 1358 1359 /* fill out a snapdir LOOKUPSNAP dentry */ 1360 BUG_ON(!dn); 1361 BUG_ON(!dir); 1362 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); 1363 dout(" linking snapped dir %p to dn %p\n", in, dn); 1364 ceph_dir_clear_ordered(dir); 1365 ihold(in); 1366 dn = splice_dentry(dn, in); 1367 if (IS_ERR(dn)) { 1368 err = PTR_ERR(dn); 1369 goto done; 1370 } 1371 req->r_dentry = dn; /* may have spliced */ 1372 } 1373 done: 1374 dout("fill_trace done err=%d\n", err); 1375 return err; 1376 } 1377 1378 /* 1379 * Prepopulate our cache with readdir results, leases, etc. 1380 */ 1381 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, 1382 struct ceph_mds_session *session) 1383 { 1384 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1385 int i, err = 0; 1386 1387 for (i = 0; i < rinfo->dir_nr; i++) { 1388 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1389 struct ceph_vino vino; 1390 struct inode *in; 1391 int rc; 1392 1393 vino.ino = le64_to_cpu(rde->inode.in->ino); 1394 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1395 1396 in = ceph_get_inode(req->r_dentry->d_sb, vino); 1397 if (IS_ERR(in)) { 1398 err = PTR_ERR(in); 1399 dout("new_inode badness got %d\n", err); 1400 continue; 1401 } 1402 rc = fill_inode(in, NULL, &rde->inode, NULL, session, 1403 req->r_request_started, -1, 1404 &req->r_caps_reservation); 1405 if (rc < 0) { 1406 pr_err("fill_inode badness on %p got %d\n", in, rc); 1407 err = rc; 1408 } 1409 iput(in); 1410 } 1411 1412 return err; 1413 } 1414 1415 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) 1416 { 1417 if (ctl->page) { 1418 kunmap(ctl->page); 1419 put_page(ctl->page); 1420 ctl->page = NULL; 1421 } 1422 } 1423 1424 static int fill_readdir_cache(struct inode *dir, struct dentry *dn, 1425 struct ceph_readdir_cache_control *ctl, 1426 struct ceph_mds_request *req) 1427 { 1428 struct ceph_inode_info *ci = ceph_inode(dir); 1429 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); 1430 unsigned idx = ctl->index % nsize; 1431 pgoff_t pgoff = ctl->index / nsize; 1432 1433 if (!ctl->page || pgoff != page_index(ctl->page)) { 1434 ceph_readdir_cache_release(ctl); 1435 if (idx == 0) 1436 ctl->page = grab_cache_page(&dir->i_data, pgoff); 1437 else 1438 ctl->page = find_lock_page(&dir->i_data, pgoff); 1439 if (!ctl->page) { 1440 ctl->index = -1; 1441 return idx == 0 ? -ENOMEM : 0; 1442 } 1443 /* reading/filling the cache are serialized by 1444 * i_mutex, no need to use page lock */ 1445 unlock_page(ctl->page); 1446 ctl->dentries = kmap(ctl->page); 1447 if (idx == 0) 1448 memset(ctl->dentries, 0, PAGE_SIZE); 1449 } 1450 1451 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1452 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { 1453 dout("readdir cache dn %p idx %d\n", dn, ctl->index); 1454 ctl->dentries[idx] = dn; 1455 ctl->index++; 1456 } else { 1457 dout("disable readdir cache\n"); 1458 ctl->index = -1; 1459 } 1460 return 0; 1461 } 1462 1463 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1464 struct ceph_mds_session *session) 1465 { 1466 struct dentry *parent = req->r_dentry; 1467 struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); 1468 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1469 struct qstr dname; 1470 struct dentry *dn; 1471 struct inode *in; 1472 int err = 0, skipped = 0, ret, i; 1473 struct inode *snapdir = NULL; 1474 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1475 u32 frag = le32_to_cpu(rhead->args.readdir.frag); 1476 u32 last_hash = 0; 1477 u32 fpos_offset; 1478 struct ceph_readdir_cache_control cache_ctl = {}; 1479 1480 if (req->r_aborted) 1481 return readdir_prepopulate_inodes_only(req, session); 1482 1483 if (rinfo->hash_order && req->r_path2) { 1484 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1485 req->r_path2, strlen(req->r_path2)); 1486 last_hash = ceph_frag_value(last_hash); 1487 } 1488 1489 if (rinfo->dir_dir && 1490 le32_to_cpu(rinfo->dir_dir->frag) != frag) { 1491 dout("readdir_prepopulate got new frag %x -> %x\n", 1492 frag, le32_to_cpu(rinfo->dir_dir->frag)); 1493 frag = le32_to_cpu(rinfo->dir_dir->frag); 1494 if (!rinfo->hash_order) 1495 req->r_readdir_offset = 2; 1496 } 1497 1498 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1499 snapdir = ceph_get_snapdir(d_inode(parent)); 1500 parent = d_find_alias(snapdir); 1501 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1502 rinfo->dir_nr, parent); 1503 } else { 1504 dout("readdir_prepopulate %d items under dn %p\n", 1505 rinfo->dir_nr, parent); 1506 if (rinfo->dir_dir) 1507 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); 1508 } 1509 1510 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 && 1511 !(rinfo->hash_order && req->r_path2)) { 1512 /* note dir version at start of readdir so we can tell 1513 * if any dentries get dropped */ 1514 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); 1515 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count); 1516 req->r_readdir_cache_idx = 0; 1517 } 1518 1519 cache_ctl.index = req->r_readdir_cache_idx; 1520 fpos_offset = req->r_readdir_offset; 1521 1522 /* FIXME: release caps/leases if error occurs */ 1523 for (i = 0; i < rinfo->dir_nr; i++) { 1524 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1525 struct ceph_vino vino; 1526 1527 dname.name = rde->name; 1528 dname.len = rde->name_len; 1529 dname.hash = full_name_hash(parent, dname.name, dname.len); 1530 1531 vino.ino = le64_to_cpu(rde->inode.in->ino); 1532 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1533 1534 if (rinfo->hash_order) { 1535 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1536 rde->name, rde->name_len); 1537 hash = ceph_frag_value(hash); 1538 if (hash != last_hash) 1539 fpos_offset = 2; 1540 last_hash = hash; 1541 rde->offset = ceph_make_fpos(hash, fpos_offset++, true); 1542 } else { 1543 rde->offset = ceph_make_fpos(frag, fpos_offset++, false); 1544 } 1545 1546 retry_lookup: 1547 dn = d_lookup(parent, &dname); 1548 dout("d_lookup on parent=%p name=%.*s got %p\n", 1549 parent, dname.len, dname.name, dn); 1550 1551 if (!dn) { 1552 dn = d_alloc(parent, &dname); 1553 dout("d_alloc %p '%.*s' = %p\n", parent, 1554 dname.len, dname.name, dn); 1555 if (dn == NULL) { 1556 dout("d_alloc badness\n"); 1557 err = -ENOMEM; 1558 goto out; 1559 } 1560 } else if (d_really_is_positive(dn) && 1561 (ceph_ino(d_inode(dn)) != vino.ino || 1562 ceph_snap(d_inode(dn)) != vino.snap)) { 1563 dout(" dn %p points to wrong inode %p\n", 1564 dn, d_inode(dn)); 1565 d_delete(dn); 1566 dput(dn); 1567 goto retry_lookup; 1568 } 1569 1570 /* inode */ 1571 if (d_really_is_positive(dn)) { 1572 in = d_inode(dn); 1573 } else { 1574 in = ceph_get_inode(parent->d_sb, vino); 1575 if (IS_ERR(in)) { 1576 dout("new_inode badness\n"); 1577 d_drop(dn); 1578 dput(dn); 1579 err = PTR_ERR(in); 1580 goto out; 1581 } 1582 } 1583 1584 ret = fill_inode(in, NULL, &rde->inode, NULL, session, 1585 req->r_request_started, -1, 1586 &req->r_caps_reservation); 1587 if (ret < 0) { 1588 pr_err("fill_inode badness on %p\n", in); 1589 if (d_really_is_negative(dn)) 1590 iput(in); 1591 d_drop(dn); 1592 err = ret; 1593 goto next_item; 1594 } 1595 1596 if (d_really_is_negative(dn)) { 1597 struct dentry *realdn; 1598 1599 if (ceph_security_xattr_deadlock(in)) { 1600 dout(" skip splicing dn %p to inode %p" 1601 " (security xattr deadlock)\n", dn, in); 1602 iput(in); 1603 skipped++; 1604 goto next_item; 1605 } 1606 1607 realdn = splice_dentry(dn, in); 1608 if (IS_ERR(realdn)) { 1609 err = PTR_ERR(realdn); 1610 d_drop(dn); 1611 dn = NULL; 1612 goto next_item; 1613 } 1614 dn = realdn; 1615 } 1616 1617 ceph_dentry(dn)->offset = rde->offset; 1618 1619 update_dentry_lease(dn, rde->lease, req->r_session, 1620 req->r_request_started); 1621 1622 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { 1623 ret = fill_readdir_cache(d_inode(parent), dn, 1624 &cache_ctl, req); 1625 if (ret < 0) 1626 err = ret; 1627 } 1628 next_item: 1629 if (dn) 1630 dput(dn); 1631 } 1632 out: 1633 if (err == 0 && skipped == 0) { 1634 req->r_did_prepopulate = true; 1635 req->r_readdir_cache_idx = cache_ctl.index; 1636 } 1637 ceph_readdir_cache_release(&cache_ctl); 1638 if (snapdir) { 1639 iput(snapdir); 1640 dput(parent); 1641 } 1642 dout("readdir_prepopulate done\n"); 1643 return err; 1644 } 1645 1646 int ceph_inode_set_size(struct inode *inode, loff_t size) 1647 { 1648 struct ceph_inode_info *ci = ceph_inode(inode); 1649 int ret = 0; 1650 1651 spin_lock(&ci->i_ceph_lock); 1652 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1653 i_size_write(inode, size); 1654 inode->i_blocks = calc_inode_blocks(size); 1655 1656 /* tell the MDS if we are approaching max_size */ 1657 if ((size << 1) >= ci->i_max_size && 1658 (ci->i_reported_size << 1) < ci->i_max_size) 1659 ret = 1; 1660 1661 spin_unlock(&ci->i_ceph_lock); 1662 return ret; 1663 } 1664 1665 /* 1666 * Write back inode data in a worker thread. (This can't be done 1667 * in the message handler context.) 1668 */ 1669 void ceph_queue_writeback(struct inode *inode) 1670 { 1671 ihold(inode); 1672 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1673 &ceph_inode(inode)->i_wb_work)) { 1674 dout("ceph_queue_writeback %p\n", inode); 1675 } else { 1676 dout("ceph_queue_writeback %p failed\n", inode); 1677 iput(inode); 1678 } 1679 } 1680 1681 static void ceph_writeback_work(struct work_struct *work) 1682 { 1683 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1684 i_wb_work); 1685 struct inode *inode = &ci->vfs_inode; 1686 1687 dout("writeback %p\n", inode); 1688 filemap_fdatawrite(&inode->i_data); 1689 iput(inode); 1690 } 1691 1692 /* 1693 * queue an async invalidation 1694 */ 1695 void ceph_queue_invalidate(struct inode *inode) 1696 { 1697 ihold(inode); 1698 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1699 &ceph_inode(inode)->i_pg_inv_work)) { 1700 dout("ceph_queue_invalidate %p\n", inode); 1701 } else { 1702 dout("ceph_queue_invalidate %p failed\n", inode); 1703 iput(inode); 1704 } 1705 } 1706 1707 /* 1708 * Invalidate inode pages in a worker thread. (This can't be done 1709 * in the message handler context.) 1710 */ 1711 static void ceph_invalidate_work(struct work_struct *work) 1712 { 1713 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1714 i_pg_inv_work); 1715 struct inode *inode = &ci->vfs_inode; 1716 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1717 u32 orig_gen; 1718 int check = 0; 1719 1720 mutex_lock(&ci->i_truncate_mutex); 1721 1722 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 1723 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n", 1724 inode, ceph_ino(inode)); 1725 mapping_set_error(inode->i_mapping, -EIO); 1726 truncate_pagecache(inode, 0); 1727 mutex_unlock(&ci->i_truncate_mutex); 1728 goto out; 1729 } 1730 1731 spin_lock(&ci->i_ceph_lock); 1732 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1733 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1734 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1735 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1736 check = 1; 1737 spin_unlock(&ci->i_ceph_lock); 1738 mutex_unlock(&ci->i_truncate_mutex); 1739 goto out; 1740 } 1741 orig_gen = ci->i_rdcache_gen; 1742 spin_unlock(&ci->i_ceph_lock); 1743 1744 if (invalidate_inode_pages2(inode->i_mapping) < 0) { 1745 pr_err("invalidate_pages %p fails\n", inode); 1746 } 1747 1748 spin_lock(&ci->i_ceph_lock); 1749 if (orig_gen == ci->i_rdcache_gen && 1750 orig_gen == ci->i_rdcache_revoking) { 1751 dout("invalidate_pages %p gen %d successful\n", inode, 1752 ci->i_rdcache_gen); 1753 ci->i_rdcache_revoking--; 1754 check = 1; 1755 } else { 1756 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1757 inode, orig_gen, ci->i_rdcache_gen, 1758 ci->i_rdcache_revoking); 1759 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1760 check = 1; 1761 } 1762 spin_unlock(&ci->i_ceph_lock); 1763 mutex_unlock(&ci->i_truncate_mutex); 1764 out: 1765 if (check) 1766 ceph_check_caps(ci, 0, NULL); 1767 iput(inode); 1768 } 1769 1770 1771 /* 1772 * called by trunc_wq; 1773 * 1774 * We also truncate in a separate thread as well. 1775 */ 1776 static void ceph_vmtruncate_work(struct work_struct *work) 1777 { 1778 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1779 i_vmtruncate_work); 1780 struct inode *inode = &ci->vfs_inode; 1781 1782 dout("vmtruncate_work %p\n", inode); 1783 __ceph_do_pending_vmtruncate(inode); 1784 iput(inode); 1785 } 1786 1787 /* 1788 * Queue an async vmtruncate. If we fail to queue work, we will handle 1789 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1790 */ 1791 void ceph_queue_vmtruncate(struct inode *inode) 1792 { 1793 struct ceph_inode_info *ci = ceph_inode(inode); 1794 1795 ihold(inode); 1796 1797 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1798 &ci->i_vmtruncate_work)) { 1799 dout("ceph_queue_vmtruncate %p\n", inode); 1800 } else { 1801 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1802 inode, ci->i_truncate_pending); 1803 iput(inode); 1804 } 1805 } 1806 1807 /* 1808 * Make sure any pending truncation is applied before doing anything 1809 * that may depend on it. 1810 */ 1811 void __ceph_do_pending_vmtruncate(struct inode *inode) 1812 { 1813 struct ceph_inode_info *ci = ceph_inode(inode); 1814 u64 to; 1815 int wrbuffer_refs, finish = 0; 1816 1817 mutex_lock(&ci->i_truncate_mutex); 1818 retry: 1819 spin_lock(&ci->i_ceph_lock); 1820 if (ci->i_truncate_pending == 0) { 1821 dout("__do_pending_vmtruncate %p none pending\n", inode); 1822 spin_unlock(&ci->i_ceph_lock); 1823 mutex_unlock(&ci->i_truncate_mutex); 1824 return; 1825 } 1826 1827 /* 1828 * make sure any dirty snapped pages are flushed before we 1829 * possibly truncate them.. so write AND block! 1830 */ 1831 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1832 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1833 inode); 1834 spin_unlock(&ci->i_ceph_lock); 1835 filemap_write_and_wait_range(&inode->i_data, 0, 1836 inode->i_sb->s_maxbytes); 1837 goto retry; 1838 } 1839 1840 /* there should be no reader or writer */ 1841 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); 1842 1843 to = ci->i_truncate_size; 1844 wrbuffer_refs = ci->i_wrbuffer_ref; 1845 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1846 ci->i_truncate_pending, to); 1847 spin_unlock(&ci->i_ceph_lock); 1848 1849 truncate_pagecache(inode, to); 1850 1851 spin_lock(&ci->i_ceph_lock); 1852 if (to == ci->i_truncate_size) { 1853 ci->i_truncate_pending = 0; 1854 finish = 1; 1855 } 1856 spin_unlock(&ci->i_ceph_lock); 1857 if (!finish) 1858 goto retry; 1859 1860 mutex_unlock(&ci->i_truncate_mutex); 1861 1862 if (wrbuffer_refs == 0) 1863 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1864 1865 wake_up_all(&ci->i_cap_wq); 1866 } 1867 1868 /* 1869 * symlinks 1870 */ 1871 static const struct inode_operations ceph_symlink_iops = { 1872 .get_link = simple_get_link, 1873 .setattr = ceph_setattr, 1874 .getattr = ceph_getattr, 1875 .listxattr = ceph_listxattr, 1876 }; 1877 1878 int __ceph_setattr(struct inode *inode, struct iattr *attr) 1879 { 1880 struct ceph_inode_info *ci = ceph_inode(inode); 1881 const unsigned int ia_valid = attr->ia_valid; 1882 struct ceph_mds_request *req; 1883 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1884 struct ceph_cap_flush *prealloc_cf; 1885 int issued; 1886 int release = 0, dirtied = 0; 1887 int mask = 0; 1888 int err = 0; 1889 int inode_dirty_flags = 0; 1890 bool lock_snap_rwsem = false; 1891 1892 prealloc_cf = ceph_alloc_cap_flush(); 1893 if (!prealloc_cf) 1894 return -ENOMEM; 1895 1896 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1897 USE_AUTH_MDS); 1898 if (IS_ERR(req)) { 1899 ceph_free_cap_flush(prealloc_cf); 1900 return PTR_ERR(req); 1901 } 1902 1903 spin_lock(&ci->i_ceph_lock); 1904 issued = __ceph_caps_issued(ci, NULL); 1905 1906 if (!ci->i_head_snapc && 1907 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) { 1908 lock_snap_rwsem = true; 1909 if (!down_read_trylock(&mdsc->snap_rwsem)) { 1910 spin_unlock(&ci->i_ceph_lock); 1911 down_read(&mdsc->snap_rwsem); 1912 spin_lock(&ci->i_ceph_lock); 1913 issued = __ceph_caps_issued(ci, NULL); 1914 } 1915 } 1916 1917 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1918 1919 if (ia_valid & ATTR_UID) { 1920 dout("setattr %p uid %d -> %d\n", inode, 1921 from_kuid(&init_user_ns, inode->i_uid), 1922 from_kuid(&init_user_ns, attr->ia_uid)); 1923 if (issued & CEPH_CAP_AUTH_EXCL) { 1924 inode->i_uid = attr->ia_uid; 1925 dirtied |= CEPH_CAP_AUTH_EXCL; 1926 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1927 !uid_eq(attr->ia_uid, inode->i_uid)) { 1928 req->r_args.setattr.uid = cpu_to_le32( 1929 from_kuid(&init_user_ns, attr->ia_uid)); 1930 mask |= CEPH_SETATTR_UID; 1931 release |= CEPH_CAP_AUTH_SHARED; 1932 } 1933 } 1934 if (ia_valid & ATTR_GID) { 1935 dout("setattr %p gid %d -> %d\n", inode, 1936 from_kgid(&init_user_ns, inode->i_gid), 1937 from_kgid(&init_user_ns, attr->ia_gid)); 1938 if (issued & CEPH_CAP_AUTH_EXCL) { 1939 inode->i_gid = attr->ia_gid; 1940 dirtied |= CEPH_CAP_AUTH_EXCL; 1941 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1942 !gid_eq(attr->ia_gid, inode->i_gid)) { 1943 req->r_args.setattr.gid = cpu_to_le32( 1944 from_kgid(&init_user_ns, attr->ia_gid)); 1945 mask |= CEPH_SETATTR_GID; 1946 release |= CEPH_CAP_AUTH_SHARED; 1947 } 1948 } 1949 if (ia_valid & ATTR_MODE) { 1950 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1951 attr->ia_mode); 1952 if (issued & CEPH_CAP_AUTH_EXCL) { 1953 inode->i_mode = attr->ia_mode; 1954 dirtied |= CEPH_CAP_AUTH_EXCL; 1955 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1956 attr->ia_mode != inode->i_mode) { 1957 inode->i_mode = attr->ia_mode; 1958 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1959 mask |= CEPH_SETATTR_MODE; 1960 release |= CEPH_CAP_AUTH_SHARED; 1961 } 1962 } 1963 1964 if (ia_valid & ATTR_ATIME) { 1965 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1966 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1967 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1968 if (issued & CEPH_CAP_FILE_EXCL) { 1969 ci->i_time_warp_seq++; 1970 inode->i_atime = attr->ia_atime; 1971 dirtied |= CEPH_CAP_FILE_EXCL; 1972 } else if ((issued & CEPH_CAP_FILE_WR) && 1973 timespec_compare(&inode->i_atime, 1974 &attr->ia_atime) < 0) { 1975 inode->i_atime = attr->ia_atime; 1976 dirtied |= CEPH_CAP_FILE_WR; 1977 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1978 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1979 ceph_encode_timespec(&req->r_args.setattr.atime, 1980 &attr->ia_atime); 1981 mask |= CEPH_SETATTR_ATIME; 1982 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1983 CEPH_CAP_FILE_WR; 1984 } 1985 } 1986 if (ia_valid & ATTR_MTIME) { 1987 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1988 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1989 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1990 if (issued & CEPH_CAP_FILE_EXCL) { 1991 ci->i_time_warp_seq++; 1992 inode->i_mtime = attr->ia_mtime; 1993 dirtied |= CEPH_CAP_FILE_EXCL; 1994 } else if ((issued & CEPH_CAP_FILE_WR) && 1995 timespec_compare(&inode->i_mtime, 1996 &attr->ia_mtime) < 0) { 1997 inode->i_mtime = attr->ia_mtime; 1998 dirtied |= CEPH_CAP_FILE_WR; 1999 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2000 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 2001 ceph_encode_timespec(&req->r_args.setattr.mtime, 2002 &attr->ia_mtime); 2003 mask |= CEPH_SETATTR_MTIME; 2004 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 2005 CEPH_CAP_FILE_WR; 2006 } 2007 } 2008 if (ia_valid & ATTR_SIZE) { 2009 dout("setattr %p size %lld -> %lld\n", inode, 2010 inode->i_size, attr->ia_size); 2011 if ((issued & CEPH_CAP_FILE_EXCL) && 2012 attr->ia_size > inode->i_size) { 2013 i_size_write(inode, attr->ia_size); 2014 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2015 inode->i_ctime = attr->ia_ctime; 2016 ci->i_reported_size = attr->ia_size; 2017 dirtied |= CEPH_CAP_FILE_EXCL; 2018 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2019 attr->ia_size != inode->i_size) { 2020 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 2021 req->r_args.setattr.old_size = 2022 cpu_to_le64(inode->i_size); 2023 mask |= CEPH_SETATTR_SIZE; 2024 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 2025 CEPH_CAP_FILE_WR; 2026 } 2027 } 2028 2029 /* these do nothing */ 2030 if (ia_valid & ATTR_CTIME) { 2031 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 2032 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 2033 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 2034 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2035 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2036 only ? "ctime only" : "ignored"); 2037 inode->i_ctime = attr->ia_ctime; 2038 if (only) { 2039 /* 2040 * if kernel wants to dirty ctime but nothing else, 2041 * we need to choose a cap to dirty under, or do 2042 * a almost-no-op setattr 2043 */ 2044 if (issued & CEPH_CAP_AUTH_EXCL) 2045 dirtied |= CEPH_CAP_AUTH_EXCL; 2046 else if (issued & CEPH_CAP_FILE_EXCL) 2047 dirtied |= CEPH_CAP_FILE_EXCL; 2048 else if (issued & CEPH_CAP_XATTR_EXCL) 2049 dirtied |= CEPH_CAP_XATTR_EXCL; 2050 else 2051 mask |= CEPH_SETATTR_CTIME; 2052 } 2053 } 2054 if (ia_valid & ATTR_FILE) 2055 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 2056 2057 if (dirtied) { 2058 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2059 &prealloc_cf); 2060 inode->i_ctime = current_time(inode); 2061 } 2062 2063 release &= issued; 2064 spin_unlock(&ci->i_ceph_lock); 2065 if (lock_snap_rwsem) 2066 up_read(&mdsc->snap_rwsem); 2067 2068 if (inode_dirty_flags) 2069 __mark_inode_dirty(inode, inode_dirty_flags); 2070 2071 if (ia_valid & ATTR_MODE) { 2072 err = posix_acl_chmod(inode, attr->ia_mode); 2073 if (err) 2074 goto out_put; 2075 } 2076 2077 if (mask) { 2078 req->r_inode = inode; 2079 ihold(inode); 2080 req->r_inode_drop = release; 2081 req->r_args.setattr.mask = cpu_to_le32(mask); 2082 req->r_num_caps = 1; 2083 err = ceph_mdsc_do_request(mdsc, NULL, req); 2084 } 2085 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2086 ceph_cap_string(dirtied), mask); 2087 2088 ceph_mdsc_put_request(req); 2089 if (mask & CEPH_SETATTR_SIZE) 2090 __ceph_do_pending_vmtruncate(inode); 2091 ceph_free_cap_flush(prealloc_cf); 2092 return err; 2093 out_put: 2094 ceph_mdsc_put_request(req); 2095 ceph_free_cap_flush(prealloc_cf); 2096 return err; 2097 } 2098 2099 /* 2100 * setattr 2101 */ 2102 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 2103 { 2104 struct inode *inode = d_inode(dentry); 2105 int err; 2106 2107 if (ceph_snap(inode) != CEPH_NOSNAP) 2108 return -EROFS; 2109 2110 err = setattr_prepare(dentry, attr); 2111 if (err != 0) 2112 return err; 2113 2114 return __ceph_setattr(inode, attr); 2115 } 2116 2117 /* 2118 * Verify that we have a lease on the given mask. If not, 2119 * do a getattr against an mds. 2120 */ 2121 int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 2122 int mask, bool force) 2123 { 2124 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 2125 struct ceph_mds_client *mdsc = fsc->mdsc; 2126 struct ceph_mds_request *req; 2127 int err; 2128 2129 if (ceph_snap(inode) == CEPH_SNAPDIR) { 2130 dout("do_getattr inode %p SNAPDIR\n", inode); 2131 return 0; 2132 } 2133 2134 dout("do_getattr inode %p mask %s mode 0%o\n", 2135 inode, ceph_cap_string(mask), inode->i_mode); 2136 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 2137 return 0; 2138 2139 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 2140 if (IS_ERR(req)) 2141 return PTR_ERR(req); 2142 req->r_inode = inode; 2143 ihold(inode); 2144 req->r_num_caps = 1; 2145 req->r_args.getattr.mask = cpu_to_le32(mask); 2146 req->r_locked_page = locked_page; 2147 err = ceph_mdsc_do_request(mdsc, NULL, req); 2148 if (locked_page && err == 0) { 2149 u64 inline_version = req->r_reply_info.targeti.inline_version; 2150 if (inline_version == 0) { 2151 /* the reply is supposed to contain inline data */ 2152 err = -EINVAL; 2153 } else if (inline_version == CEPH_INLINE_NONE) { 2154 err = -ENODATA; 2155 } else { 2156 err = req->r_reply_info.targeti.inline_len; 2157 } 2158 } 2159 ceph_mdsc_put_request(req); 2160 dout("do_getattr result=%d\n", err); 2161 return err; 2162 } 2163 2164 2165 /* 2166 * Check inode permissions. We verify we have a valid value for 2167 * the AUTH cap, then call the generic handler. 2168 */ 2169 int ceph_permission(struct inode *inode, int mask) 2170 { 2171 int err; 2172 2173 if (mask & MAY_NOT_BLOCK) 2174 return -ECHILD; 2175 2176 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false); 2177 2178 if (!err) 2179 err = generic_permission(inode, mask); 2180 return err; 2181 } 2182 2183 /* 2184 * Get all attributes. Hopefully somedata we'll have a statlite() 2185 * and can limit the fields we require to be accurate. 2186 */ 2187 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 2188 struct kstat *stat) 2189 { 2190 struct inode *inode = d_inode(dentry); 2191 struct ceph_inode_info *ci = ceph_inode(inode); 2192 int err; 2193 2194 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false); 2195 if (!err) { 2196 generic_fillattr(inode, stat); 2197 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); 2198 if (ceph_snap(inode) != CEPH_NOSNAP) 2199 stat->dev = ceph_snap(inode); 2200 else 2201 stat->dev = 0; 2202 if (S_ISDIR(inode->i_mode)) { 2203 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 2204 RBYTES)) 2205 stat->size = ci->i_rbytes; 2206 else 2207 stat->size = ci->i_files + ci->i_subdirs; 2208 stat->blocks = 0; 2209 stat->blksize = 65536; 2210 } 2211 } 2212 return err; 2213 } 2214