1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/writeback.h> 10 #include <linux/vmalloc.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl.h> 13 #include <linux/random.h> 14 #include <linux/sort.h> 15 16 #include "super.h" 17 #include "mds_client.h" 18 #include "cache.h" 19 #include <linux/ceph/decode.h> 20 21 /* 22 * Ceph inode operations 23 * 24 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 25 * setattr, etc.), xattr helpers, and helpers for assimilating 26 * metadata returned by the MDS into our cache. 27 * 28 * Also define helpers for doing asynchronous writeback, invalidation, 29 * and truncation for the benefit of those who can't afford to block 30 * (typically because they are in the message handler path). 31 */ 32 33 static const struct inode_operations ceph_symlink_iops; 34 35 static void ceph_invalidate_work(struct work_struct *work); 36 static void ceph_writeback_work(struct work_struct *work); 37 static void ceph_vmtruncate_work(struct work_struct *work); 38 39 /* 40 * find or create an inode, given the ceph ino number 41 */ 42 static int ceph_set_ino_cb(struct inode *inode, void *data) 43 { 44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); 46 return 0; 47 } 48 49 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 50 { 51 struct inode *inode; 52 ino_t t = ceph_vino_to_ino(vino); 53 54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 55 if (inode == NULL) 56 return ERR_PTR(-ENOMEM); 57 if (inode->i_state & I_NEW) { 58 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 59 inode, ceph_vinop(inode), (u64)inode->i_ino); 60 unlock_new_inode(inode); 61 } 62 63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 64 vino.snap, inode); 65 return inode; 66 } 67 68 /* 69 * get/constuct snapdir inode for a given directory 70 */ 71 struct inode *ceph_get_snapdir(struct inode *parent) 72 { 73 struct ceph_vino vino = { 74 .ino = ceph_ino(parent), 75 .snap = CEPH_SNAPDIR, 76 }; 77 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 78 struct ceph_inode_info *ci = ceph_inode(inode); 79 80 BUG_ON(!S_ISDIR(parent->i_mode)); 81 if (IS_ERR(inode)) 82 return inode; 83 inode->i_mode = parent->i_mode; 84 inode->i_uid = parent->i_uid; 85 inode->i_gid = parent->i_gid; 86 inode->i_op = &ceph_snapdir_iops; 87 inode->i_fop = &ceph_snapdir_fops; 88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 89 ci->i_rbytes = 0; 90 return inode; 91 } 92 93 const struct inode_operations ceph_file_iops = { 94 .permission = ceph_permission, 95 .setattr = ceph_setattr, 96 .getattr = ceph_getattr, 97 .listxattr = ceph_listxattr, 98 .get_acl = ceph_get_acl, 99 .set_acl = ceph_set_acl, 100 }; 101 102 103 /* 104 * We use a 'frag tree' to keep track of the MDS's directory fragments 105 * for a given inode (usually there is just a single fragment). We 106 * need to know when a child frag is delegated to a new MDS, or when 107 * it is flagged as replicated, so we can direct our requests 108 * accordingly. 109 */ 110 111 /* 112 * find/create a frag in the tree 113 */ 114 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 115 u32 f) 116 { 117 struct rb_node **p; 118 struct rb_node *parent = NULL; 119 struct ceph_inode_frag *frag; 120 int c; 121 122 p = &ci->i_fragtree.rb_node; 123 while (*p) { 124 parent = *p; 125 frag = rb_entry(parent, struct ceph_inode_frag, node); 126 c = ceph_frag_compare(f, frag->frag); 127 if (c < 0) 128 p = &(*p)->rb_left; 129 else if (c > 0) 130 p = &(*p)->rb_right; 131 else 132 return frag; 133 } 134 135 frag = kmalloc(sizeof(*frag), GFP_NOFS); 136 if (!frag) { 137 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 138 "frag %x\n", &ci->vfs_inode, 139 ceph_vinop(&ci->vfs_inode), f); 140 return ERR_PTR(-ENOMEM); 141 } 142 frag->frag = f; 143 frag->split_by = 0; 144 frag->mds = -1; 145 frag->ndist = 0; 146 147 rb_link_node(&frag->node, parent, p); 148 rb_insert_color(&frag->node, &ci->i_fragtree); 149 150 dout("get_or_create_frag added %llx.%llx frag %x\n", 151 ceph_vinop(&ci->vfs_inode), f); 152 return frag; 153 } 154 155 /* 156 * find a specific frag @f 157 */ 158 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 159 { 160 struct rb_node *n = ci->i_fragtree.rb_node; 161 162 while (n) { 163 struct ceph_inode_frag *frag = 164 rb_entry(n, struct ceph_inode_frag, node); 165 int c = ceph_frag_compare(f, frag->frag); 166 if (c < 0) 167 n = n->rb_left; 168 else if (c > 0) 169 n = n->rb_right; 170 else 171 return frag; 172 } 173 return NULL; 174 } 175 176 /* 177 * Choose frag containing the given value @v. If @pfrag is 178 * specified, copy the frag delegation info to the caller if 179 * it is present. 180 */ 181 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 182 struct ceph_inode_frag *pfrag, int *found) 183 { 184 u32 t = ceph_frag_make(0, 0); 185 struct ceph_inode_frag *frag; 186 unsigned nway, i; 187 u32 n; 188 189 if (found) 190 *found = 0; 191 192 while (1) { 193 WARN_ON(!ceph_frag_contains_value(t, v)); 194 frag = __ceph_find_frag(ci, t); 195 if (!frag) 196 break; /* t is a leaf */ 197 if (frag->split_by == 0) { 198 if (pfrag) 199 memcpy(pfrag, frag, sizeof(*pfrag)); 200 if (found) 201 *found = 1; 202 break; 203 } 204 205 /* choose child */ 206 nway = 1 << frag->split_by; 207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 208 frag->split_by, nway); 209 for (i = 0; i < nway; i++) { 210 n = ceph_frag_make_child(t, frag->split_by, i); 211 if (ceph_frag_contains_value(n, v)) { 212 t = n; 213 break; 214 } 215 } 216 BUG_ON(i == nway); 217 } 218 dout("choose_frag(%x) = %x\n", v, t); 219 220 return t; 221 } 222 223 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 224 struct ceph_inode_frag *pfrag, int *found) 225 { 226 u32 ret; 227 mutex_lock(&ci->i_fragtree_mutex); 228 ret = __ceph_choose_frag(ci, v, pfrag, found); 229 mutex_unlock(&ci->i_fragtree_mutex); 230 return ret; 231 } 232 233 /* 234 * Process dirfrag (delegation) info from the mds. Include leaf 235 * fragment in tree ONLY if ndist > 0. Otherwise, only 236 * branches/splits are included in i_fragtree) 237 */ 238 static int ceph_fill_dirfrag(struct inode *inode, 239 struct ceph_mds_reply_dirfrag *dirinfo) 240 { 241 struct ceph_inode_info *ci = ceph_inode(inode); 242 struct ceph_inode_frag *frag; 243 u32 id = le32_to_cpu(dirinfo->frag); 244 int mds = le32_to_cpu(dirinfo->auth); 245 int ndist = le32_to_cpu(dirinfo->ndist); 246 int diri_auth = -1; 247 int i; 248 int err = 0; 249 250 spin_lock(&ci->i_ceph_lock); 251 if (ci->i_auth_cap) 252 diri_auth = ci->i_auth_cap->mds; 253 spin_unlock(&ci->i_ceph_lock); 254 255 if (mds == -1) /* CDIR_AUTH_PARENT */ 256 mds = diri_auth; 257 258 mutex_lock(&ci->i_fragtree_mutex); 259 if (ndist == 0 && mds == diri_auth) { 260 /* no delegation info needed. */ 261 frag = __ceph_find_frag(ci, id); 262 if (!frag) 263 goto out; 264 if (frag->split_by == 0) { 265 /* tree leaf, remove */ 266 dout("fill_dirfrag removed %llx.%llx frag %x" 267 " (no ref)\n", ceph_vinop(inode), id); 268 rb_erase(&frag->node, &ci->i_fragtree); 269 kfree(frag); 270 } else { 271 /* tree branch, keep and clear */ 272 dout("fill_dirfrag cleared %llx.%llx frag %x" 273 " referral\n", ceph_vinop(inode), id); 274 frag->mds = -1; 275 frag->ndist = 0; 276 } 277 goto out; 278 } 279 280 281 /* find/add this frag to store mds delegation info */ 282 frag = __get_or_create_frag(ci, id); 283 if (IS_ERR(frag)) { 284 /* this is not the end of the world; we can continue 285 with bad/inaccurate delegation info */ 286 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 287 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 288 err = -ENOMEM; 289 goto out; 290 } 291 292 frag->mds = mds; 293 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 294 for (i = 0; i < frag->ndist; i++) 295 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 296 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 297 ceph_vinop(inode), frag->frag, frag->ndist); 298 299 out: 300 mutex_unlock(&ci->i_fragtree_mutex); 301 return err; 302 } 303 304 static int frag_tree_split_cmp(const void *l, const void *r) 305 { 306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 308 return ceph_frag_compare(le32_to_cpu(ls->frag), 309 le32_to_cpu(rs->frag)); 310 } 311 312 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 313 { 314 if (!frag) 315 return f == ceph_frag_make(0, 0); 316 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by) 317 return false; 318 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f)); 319 } 320 321 static int ceph_fill_fragtree(struct inode *inode, 322 struct ceph_frag_tree_head *fragtree, 323 struct ceph_mds_reply_dirfrag *dirinfo) 324 { 325 struct ceph_inode_info *ci = ceph_inode(inode); 326 struct ceph_inode_frag *frag, *prev_frag = NULL; 327 struct rb_node *rb_node; 328 unsigned i, split_by, nsplits; 329 u32 id; 330 bool update = false; 331 332 mutex_lock(&ci->i_fragtree_mutex); 333 nsplits = le32_to_cpu(fragtree->nsplits); 334 if (nsplits != ci->i_fragtree_nsplits) { 335 update = true; 336 } else if (nsplits) { 337 i = prandom_u32() % nsplits; 338 id = le32_to_cpu(fragtree->splits[i].frag); 339 if (!__ceph_find_frag(ci, id)) 340 update = true; 341 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) { 342 rb_node = rb_first(&ci->i_fragtree); 343 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 344 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node)) 345 update = true; 346 } 347 if (!update && dirinfo) { 348 id = le32_to_cpu(dirinfo->frag); 349 if (id != __ceph_choose_frag(ci, id, NULL, NULL)) 350 update = true; 351 } 352 if (!update) 353 goto out_unlock; 354 355 if (nsplits > 1) { 356 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]), 357 frag_tree_split_cmp, NULL); 358 } 359 360 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode)); 361 rb_node = rb_first(&ci->i_fragtree); 362 for (i = 0; i < nsplits; i++) { 363 id = le32_to_cpu(fragtree->splits[i].frag); 364 split_by = le32_to_cpu(fragtree->splits[i].by); 365 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) { 366 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, " 367 "frag %x split by %d\n", ceph_vinop(inode), 368 i, nsplits, id, split_by); 369 continue; 370 } 371 frag = NULL; 372 while (rb_node) { 373 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 374 if (ceph_frag_compare(frag->frag, id) >= 0) { 375 if (frag->frag != id) 376 frag = NULL; 377 else 378 rb_node = rb_next(rb_node); 379 break; 380 } 381 rb_node = rb_next(rb_node); 382 /* delete stale split/leaf node */ 383 if (frag->split_by > 0 || 384 !is_frag_child(frag->frag, prev_frag)) { 385 rb_erase(&frag->node, &ci->i_fragtree); 386 if (frag->split_by > 0) 387 ci->i_fragtree_nsplits--; 388 kfree(frag); 389 } 390 frag = NULL; 391 } 392 if (!frag) { 393 frag = __get_or_create_frag(ci, id); 394 if (IS_ERR(frag)) 395 continue; 396 } 397 if (frag->split_by == 0) 398 ci->i_fragtree_nsplits++; 399 frag->split_by = split_by; 400 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 401 prev_frag = frag; 402 } 403 while (rb_node) { 404 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 405 rb_node = rb_next(rb_node); 406 /* delete stale split/leaf node */ 407 if (frag->split_by > 0 || 408 !is_frag_child(frag->frag, prev_frag)) { 409 rb_erase(&frag->node, &ci->i_fragtree); 410 if (frag->split_by > 0) 411 ci->i_fragtree_nsplits--; 412 kfree(frag); 413 } 414 } 415 out_unlock: 416 mutex_unlock(&ci->i_fragtree_mutex); 417 return 0; 418 } 419 420 /* 421 * initialize a newly allocated inode. 422 */ 423 struct inode *ceph_alloc_inode(struct super_block *sb) 424 { 425 struct ceph_inode_info *ci; 426 int i; 427 428 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 429 if (!ci) 430 return NULL; 431 432 dout("alloc_inode %p\n", &ci->vfs_inode); 433 434 spin_lock_init(&ci->i_ceph_lock); 435 436 ci->i_version = 0; 437 ci->i_inline_version = 0; 438 ci->i_time_warp_seq = 0; 439 ci->i_ceph_flags = 0; 440 atomic64_set(&ci->i_ordered_count, 1); 441 atomic64_set(&ci->i_release_count, 1); 442 atomic64_set(&ci->i_complete_seq[0], 0); 443 atomic64_set(&ci->i_complete_seq[1], 0); 444 ci->i_symlink = NULL; 445 446 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 447 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL); 448 449 ci->i_fragtree = RB_ROOT; 450 mutex_init(&ci->i_fragtree_mutex); 451 452 ci->i_xattrs.blob = NULL; 453 ci->i_xattrs.prealloc_blob = NULL; 454 ci->i_xattrs.dirty = false; 455 ci->i_xattrs.index = RB_ROOT; 456 ci->i_xattrs.count = 0; 457 ci->i_xattrs.names_size = 0; 458 ci->i_xattrs.vals_size = 0; 459 ci->i_xattrs.version = 0; 460 ci->i_xattrs.index_version = 0; 461 462 ci->i_caps = RB_ROOT; 463 ci->i_auth_cap = NULL; 464 ci->i_dirty_caps = 0; 465 ci->i_flushing_caps = 0; 466 INIT_LIST_HEAD(&ci->i_dirty_item); 467 INIT_LIST_HEAD(&ci->i_flushing_item); 468 ci->i_prealloc_cap_flush = NULL; 469 INIT_LIST_HEAD(&ci->i_cap_flush_list); 470 init_waitqueue_head(&ci->i_cap_wq); 471 ci->i_hold_caps_min = 0; 472 ci->i_hold_caps_max = 0; 473 INIT_LIST_HEAD(&ci->i_cap_delay_list); 474 INIT_LIST_HEAD(&ci->i_cap_snaps); 475 ci->i_head_snapc = NULL; 476 ci->i_snap_caps = 0; 477 478 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) 479 ci->i_nr_by_mode[i] = 0; 480 481 mutex_init(&ci->i_truncate_mutex); 482 ci->i_truncate_seq = 0; 483 ci->i_truncate_size = 0; 484 ci->i_truncate_pending = 0; 485 486 ci->i_max_size = 0; 487 ci->i_reported_size = 0; 488 ci->i_wanted_max_size = 0; 489 ci->i_requested_max_size = 0; 490 491 ci->i_pin_ref = 0; 492 ci->i_rd_ref = 0; 493 ci->i_rdcache_ref = 0; 494 ci->i_wr_ref = 0; 495 ci->i_wb_ref = 0; 496 ci->i_wrbuffer_ref = 0; 497 ci->i_wrbuffer_ref_head = 0; 498 ci->i_shared_gen = 0; 499 ci->i_rdcache_gen = 0; 500 ci->i_rdcache_revoking = 0; 501 502 INIT_LIST_HEAD(&ci->i_unsafe_writes); 503 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 504 INIT_LIST_HEAD(&ci->i_unsafe_iops); 505 spin_lock_init(&ci->i_unsafe_lock); 506 507 ci->i_snap_realm = NULL; 508 INIT_LIST_HEAD(&ci->i_snap_realm_item); 509 INIT_LIST_HEAD(&ci->i_snap_flush_item); 510 511 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 512 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 513 514 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 515 516 ceph_fscache_inode_init(ci); 517 518 return &ci->vfs_inode; 519 } 520 521 static void ceph_i_callback(struct rcu_head *head) 522 { 523 struct inode *inode = container_of(head, struct inode, i_rcu); 524 struct ceph_inode_info *ci = ceph_inode(inode); 525 526 kmem_cache_free(ceph_inode_cachep, ci); 527 } 528 529 void ceph_destroy_inode(struct inode *inode) 530 { 531 struct ceph_inode_info *ci = ceph_inode(inode); 532 struct ceph_inode_frag *frag; 533 struct rb_node *n; 534 535 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 536 537 ceph_fscache_unregister_inode_cookie(ci); 538 539 ceph_queue_caps_release(inode); 540 541 /* 542 * we may still have a snap_realm reference if there are stray 543 * caps in i_snap_caps. 544 */ 545 if (ci->i_snap_realm) { 546 struct ceph_mds_client *mdsc = 547 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 548 struct ceph_snap_realm *realm = ci->i_snap_realm; 549 550 dout(" dropping residual ref to snap realm %p\n", realm); 551 spin_lock(&realm->inodes_with_caps_lock); 552 list_del_init(&ci->i_snap_realm_item); 553 spin_unlock(&realm->inodes_with_caps_lock); 554 ceph_put_snap_realm(mdsc, realm); 555 } 556 557 kfree(ci->i_symlink); 558 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 559 frag = rb_entry(n, struct ceph_inode_frag, node); 560 rb_erase(n, &ci->i_fragtree); 561 kfree(frag); 562 } 563 ci->i_fragtree_nsplits = 0; 564 565 __ceph_destroy_xattrs(ci); 566 if (ci->i_xattrs.blob) 567 ceph_buffer_put(ci->i_xattrs.blob); 568 if (ci->i_xattrs.prealloc_blob) 569 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 570 571 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns)); 572 573 call_rcu(&inode->i_rcu, ceph_i_callback); 574 } 575 576 int ceph_drop_inode(struct inode *inode) 577 { 578 /* 579 * Positve dentry and corresponding inode are always accompanied 580 * in MDS reply. So no need to keep inode in the cache after 581 * dropping all its aliases. 582 */ 583 return 1; 584 } 585 586 void ceph_evict_inode(struct inode *inode) 587 { 588 /* wait unsafe sync writes */ 589 ceph_sync_write_wait(inode); 590 truncate_inode_pages_final(&inode->i_data); 591 clear_inode(inode); 592 } 593 594 static inline blkcnt_t calc_inode_blocks(u64 size) 595 { 596 return (size + (1<<9) - 1) >> 9; 597 } 598 599 /* 600 * Helpers to fill in size, ctime, mtime, and atime. We have to be 601 * careful because either the client or MDS may have more up to date 602 * info, depending on which capabilities are held, and whether 603 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 604 * and size are monotonically increasing, except when utimes() or 605 * truncate() increments the corresponding _seq values.) 606 */ 607 int ceph_fill_file_size(struct inode *inode, int issued, 608 u32 truncate_seq, u64 truncate_size, u64 size) 609 { 610 struct ceph_inode_info *ci = ceph_inode(inode); 611 int queue_trunc = 0; 612 613 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 614 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 615 dout("size %lld -> %llu\n", inode->i_size, size); 616 if (size > 0 && S_ISDIR(inode->i_mode)) { 617 pr_err("fill_file_size non-zero size for directory\n"); 618 size = 0; 619 } 620 i_size_write(inode, size); 621 inode->i_blocks = calc_inode_blocks(size); 622 ci->i_reported_size = size; 623 if (truncate_seq != ci->i_truncate_seq) { 624 dout("truncate_seq %u -> %u\n", 625 ci->i_truncate_seq, truncate_seq); 626 ci->i_truncate_seq = truncate_seq; 627 628 /* the MDS should have revoked these caps */ 629 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | 630 CEPH_CAP_FILE_RD | 631 CEPH_CAP_FILE_WR | 632 CEPH_CAP_FILE_LAZYIO)); 633 /* 634 * If we hold relevant caps, or in the case where we're 635 * not the only client referencing this file and we 636 * don't hold those caps, then we need to check whether 637 * the file is either opened or mmaped 638 */ 639 if ((issued & (CEPH_CAP_FILE_CACHE| 640 CEPH_CAP_FILE_BUFFER)) || 641 mapping_mapped(inode->i_mapping) || 642 __ceph_caps_file_wanted(ci)) { 643 ci->i_truncate_pending++; 644 queue_trunc = 1; 645 } 646 } 647 } 648 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 649 ci->i_truncate_size != truncate_size) { 650 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 651 truncate_size); 652 ci->i_truncate_size = truncate_size; 653 } 654 655 if (queue_trunc) 656 ceph_fscache_invalidate(inode); 657 658 return queue_trunc; 659 } 660 661 void ceph_fill_file_time(struct inode *inode, int issued, 662 u64 time_warp_seq, struct timespec *ctime, 663 struct timespec *mtime, struct timespec *atime) 664 { 665 struct ceph_inode_info *ci = ceph_inode(inode); 666 int warn = 0; 667 668 if (issued & (CEPH_CAP_FILE_EXCL| 669 CEPH_CAP_FILE_WR| 670 CEPH_CAP_FILE_BUFFER| 671 CEPH_CAP_AUTH_EXCL| 672 CEPH_CAP_XATTR_EXCL)) { 673 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 674 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 675 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 676 ctime->tv_sec, ctime->tv_nsec); 677 inode->i_ctime = *ctime; 678 } 679 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 680 /* the MDS did a utimes() */ 681 dout("mtime %ld.%09ld -> %ld.%09ld " 682 "tw %d -> %d\n", 683 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 684 mtime->tv_sec, mtime->tv_nsec, 685 ci->i_time_warp_seq, (int)time_warp_seq); 686 687 inode->i_mtime = *mtime; 688 inode->i_atime = *atime; 689 ci->i_time_warp_seq = time_warp_seq; 690 } else if (time_warp_seq == ci->i_time_warp_seq) { 691 /* nobody did utimes(); take the max */ 692 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 693 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 694 inode->i_mtime.tv_sec, 695 inode->i_mtime.tv_nsec, 696 mtime->tv_sec, mtime->tv_nsec); 697 inode->i_mtime = *mtime; 698 } 699 if (timespec_compare(atime, &inode->i_atime) > 0) { 700 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 701 inode->i_atime.tv_sec, 702 inode->i_atime.tv_nsec, 703 atime->tv_sec, atime->tv_nsec); 704 inode->i_atime = *atime; 705 } 706 } else if (issued & CEPH_CAP_FILE_EXCL) { 707 /* we did a utimes(); ignore mds values */ 708 } else { 709 warn = 1; 710 } 711 } else { 712 /* we have no write|excl caps; whatever the MDS says is true */ 713 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 714 inode->i_ctime = *ctime; 715 inode->i_mtime = *mtime; 716 inode->i_atime = *atime; 717 ci->i_time_warp_seq = time_warp_seq; 718 } else { 719 warn = 1; 720 } 721 } 722 if (warn) /* time_warp_seq shouldn't go backwards */ 723 dout("%p mds time_warp_seq %llu < %u\n", 724 inode, time_warp_seq, ci->i_time_warp_seq); 725 } 726 727 /* 728 * Populate an inode based on info from mds. May be called on new or 729 * existing inodes. 730 */ 731 static int fill_inode(struct inode *inode, struct page *locked_page, 732 struct ceph_mds_reply_info_in *iinfo, 733 struct ceph_mds_reply_dirfrag *dirinfo, 734 struct ceph_mds_session *session, 735 unsigned long ttl_from, int cap_fmode, 736 struct ceph_cap_reservation *caps_reservation) 737 { 738 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 739 struct ceph_mds_reply_inode *info = iinfo->in; 740 struct ceph_inode_info *ci = ceph_inode(inode); 741 int issued = 0, implemented, new_issued; 742 struct timespec mtime, atime, ctime; 743 struct ceph_buffer *xattr_blob = NULL; 744 struct ceph_string *pool_ns = NULL; 745 struct ceph_cap *new_cap = NULL; 746 int err = 0; 747 bool wake = false; 748 bool queue_trunc = false; 749 bool new_version = false; 750 bool fill_inline = false; 751 752 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 753 inode, ceph_vinop(inode), le64_to_cpu(info->version), 754 ci->i_version); 755 756 /* prealloc new cap struct */ 757 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP) 758 new_cap = ceph_get_cap(mdsc, caps_reservation); 759 760 /* 761 * prealloc xattr data, if it looks like we'll need it. only 762 * if len > 4 (meaning there are actually xattrs; the first 4 763 * bytes are the xattr count). 764 */ 765 if (iinfo->xattr_len > 4) { 766 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 767 if (!xattr_blob) 768 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 769 iinfo->xattr_len); 770 } 771 772 if (iinfo->pool_ns_len > 0) 773 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data, 774 iinfo->pool_ns_len); 775 776 spin_lock(&ci->i_ceph_lock); 777 778 /* 779 * provided version will be odd if inode value is projected, 780 * even if stable. skip the update if we have newer stable 781 * info (ours>=theirs, e.g. due to racing mds replies), unless 782 * we are getting projected (unstable) info (in which case the 783 * version is odd, and we want ours>theirs). 784 * us them 785 * 2 2 skip 786 * 3 2 skip 787 * 3 3 update 788 */ 789 if (ci->i_version == 0 || 790 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 791 le64_to_cpu(info->version) > (ci->i_version & ~1))) 792 new_version = true; 793 794 issued = __ceph_caps_issued(ci, &implemented); 795 issued |= implemented | __ceph_caps_dirty(ci); 796 new_issued = ~issued & le32_to_cpu(info->cap.caps); 797 798 /* update inode */ 799 ci->i_version = le64_to_cpu(info->version); 800 inode->i_version++; 801 inode->i_rdev = le32_to_cpu(info->rdev); 802 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 803 804 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) && 805 (issued & CEPH_CAP_AUTH_EXCL) == 0) { 806 inode->i_mode = le32_to_cpu(info->mode); 807 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 808 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 809 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 810 from_kuid(&init_user_ns, inode->i_uid), 811 from_kgid(&init_user_ns, inode->i_gid)); 812 } 813 814 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) && 815 (issued & CEPH_CAP_LINK_EXCL) == 0) 816 set_nlink(inode, le32_to_cpu(info->nlink)); 817 818 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) { 819 /* be careful with mtime, atime, size */ 820 ceph_decode_timespec(&atime, &info->atime); 821 ceph_decode_timespec(&mtime, &info->mtime); 822 ceph_decode_timespec(&ctime, &info->ctime); 823 ceph_fill_file_time(inode, issued, 824 le32_to_cpu(info->time_warp_seq), 825 &ctime, &mtime, &atime); 826 } 827 828 if (new_version || 829 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { 830 s64 old_pool = ci->i_layout.pool_id; 831 struct ceph_string *old_ns; 832 833 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout); 834 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, 835 lockdep_is_held(&ci->i_ceph_lock)); 836 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns); 837 838 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns) 839 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 840 841 pool_ns = old_ns; 842 843 queue_trunc = ceph_fill_file_size(inode, issued, 844 le32_to_cpu(info->truncate_seq), 845 le64_to_cpu(info->truncate_size), 846 le64_to_cpu(info->size)); 847 /* only update max_size on auth cap */ 848 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 849 ci->i_max_size != le64_to_cpu(info->max_size)) { 850 dout("max_size %lld -> %llu\n", ci->i_max_size, 851 le64_to_cpu(info->max_size)); 852 ci->i_max_size = le64_to_cpu(info->max_size); 853 } 854 } 855 856 /* xattrs */ 857 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 858 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 859 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 860 if (ci->i_xattrs.blob) 861 ceph_buffer_put(ci->i_xattrs.blob); 862 ci->i_xattrs.blob = xattr_blob; 863 if (xattr_blob) 864 memcpy(ci->i_xattrs.blob->vec.iov_base, 865 iinfo->xattr_data, iinfo->xattr_len); 866 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 867 ceph_forget_all_cached_acls(inode); 868 xattr_blob = NULL; 869 } 870 871 inode->i_mapping->a_ops = &ceph_aops; 872 873 switch (inode->i_mode & S_IFMT) { 874 case S_IFIFO: 875 case S_IFBLK: 876 case S_IFCHR: 877 case S_IFSOCK: 878 init_special_inode(inode, inode->i_mode, inode->i_rdev); 879 inode->i_op = &ceph_file_iops; 880 break; 881 case S_IFREG: 882 inode->i_op = &ceph_file_iops; 883 inode->i_fop = &ceph_file_fops; 884 break; 885 case S_IFLNK: 886 inode->i_op = &ceph_symlink_iops; 887 if (!ci->i_symlink) { 888 u32 symlen = iinfo->symlink_len; 889 char *sym; 890 891 spin_unlock(&ci->i_ceph_lock); 892 893 if (symlen != i_size_read(inode)) { 894 pr_err("fill_inode %llx.%llx BAD symlink " 895 "size %lld\n", ceph_vinop(inode), 896 i_size_read(inode)); 897 i_size_write(inode, symlen); 898 inode->i_blocks = calc_inode_blocks(symlen); 899 } 900 901 err = -ENOMEM; 902 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 903 if (!sym) 904 goto out; 905 906 spin_lock(&ci->i_ceph_lock); 907 if (!ci->i_symlink) 908 ci->i_symlink = sym; 909 else 910 kfree(sym); /* lost a race */ 911 } 912 inode->i_link = ci->i_symlink; 913 break; 914 case S_IFDIR: 915 inode->i_op = &ceph_dir_iops; 916 inode->i_fop = &ceph_dir_fops; 917 918 ci->i_dir_layout = iinfo->dir_layout; 919 920 ci->i_files = le64_to_cpu(info->files); 921 ci->i_subdirs = le64_to_cpu(info->subdirs); 922 ci->i_rbytes = le64_to_cpu(info->rbytes); 923 ci->i_rfiles = le64_to_cpu(info->rfiles); 924 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 925 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 926 break; 927 default: 928 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 929 ceph_vinop(inode), inode->i_mode); 930 } 931 932 /* were we issued a capability? */ 933 if (info->cap.caps) { 934 if (ceph_snap(inode) == CEPH_NOSNAP) { 935 unsigned caps = le32_to_cpu(info->cap.caps); 936 ceph_add_cap(inode, session, 937 le64_to_cpu(info->cap.cap_id), 938 cap_fmode, caps, 939 le32_to_cpu(info->cap.wanted), 940 le32_to_cpu(info->cap.seq), 941 le32_to_cpu(info->cap.mseq), 942 le64_to_cpu(info->cap.realm), 943 info->cap.flags, &new_cap); 944 945 /* set dir completion flag? */ 946 if (S_ISDIR(inode->i_mode) && 947 ci->i_files == 0 && ci->i_subdirs == 0 && 948 (caps & CEPH_CAP_FILE_SHARED) && 949 (issued & CEPH_CAP_FILE_EXCL) == 0 && 950 !__ceph_dir_is_complete(ci)) { 951 dout(" marking %p complete (empty)\n", inode); 952 i_size_write(inode, 0); 953 __ceph_dir_set_complete(ci, 954 atomic64_read(&ci->i_release_count), 955 atomic64_read(&ci->i_ordered_count)); 956 } 957 958 wake = true; 959 } else { 960 dout(" %p got snap_caps %s\n", inode, 961 ceph_cap_string(le32_to_cpu(info->cap.caps))); 962 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 963 if (cap_fmode >= 0) 964 __ceph_get_fmode(ci, cap_fmode); 965 } 966 } else if (cap_fmode >= 0) { 967 pr_warn("mds issued no caps on %llx.%llx\n", 968 ceph_vinop(inode)); 969 __ceph_get_fmode(ci, cap_fmode); 970 } 971 972 if (iinfo->inline_version > 0 && 973 iinfo->inline_version >= ci->i_inline_version) { 974 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 975 ci->i_inline_version = iinfo->inline_version; 976 if (ci->i_inline_version != CEPH_INLINE_NONE && 977 (locked_page || 978 (le32_to_cpu(info->cap.caps) & cache_caps))) 979 fill_inline = true; 980 } 981 982 spin_unlock(&ci->i_ceph_lock); 983 984 if (fill_inline) 985 ceph_fill_inline_data(inode, locked_page, 986 iinfo->inline_data, iinfo->inline_len); 987 988 if (wake) 989 wake_up_all(&ci->i_cap_wq); 990 991 /* queue truncate if we saw i_size decrease */ 992 if (queue_trunc) 993 ceph_queue_vmtruncate(inode); 994 995 /* populate frag tree */ 996 if (S_ISDIR(inode->i_mode)) 997 ceph_fill_fragtree(inode, &info->fragtree, dirinfo); 998 999 /* update delegation info? */ 1000 if (dirinfo) 1001 ceph_fill_dirfrag(inode, dirinfo); 1002 1003 err = 0; 1004 out: 1005 if (new_cap) 1006 ceph_put_cap(mdsc, new_cap); 1007 if (xattr_blob) 1008 ceph_buffer_put(xattr_blob); 1009 ceph_put_string(pool_ns); 1010 return err; 1011 } 1012 1013 /* 1014 * caller should hold session s_mutex. 1015 */ 1016 static void update_dentry_lease(struct dentry *dentry, 1017 struct ceph_mds_reply_lease *lease, 1018 struct ceph_mds_session *session, 1019 unsigned long from_time) 1020 { 1021 struct ceph_dentry_info *di = ceph_dentry(dentry); 1022 long unsigned duration = le32_to_cpu(lease->duration_ms); 1023 long unsigned ttl = from_time + (duration * HZ) / 1000; 1024 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 1025 struct inode *dir; 1026 1027 spin_lock(&dentry->d_lock); 1028 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 1029 dentry, duration, ttl); 1030 1031 /* make lease_rdcache_gen match directory */ 1032 dir = d_inode(dentry->d_parent); 1033 1034 /* only track leases on regular dentries */ 1035 if (ceph_snap(dir) != CEPH_NOSNAP) 1036 goto out_unlock; 1037 1038 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 1039 1040 if (duration == 0) 1041 goto out_unlock; 1042 1043 if (di->lease_gen == session->s_cap_gen && 1044 time_before(ttl, di->time)) 1045 goto out_unlock; /* we already have a newer lease. */ 1046 1047 if (di->lease_session && di->lease_session != session) 1048 goto out_unlock; 1049 1050 ceph_dentry_lru_touch(dentry); 1051 1052 if (!di->lease_session) 1053 di->lease_session = ceph_get_mds_session(session); 1054 di->lease_gen = session->s_cap_gen; 1055 di->lease_seq = le32_to_cpu(lease->seq); 1056 di->lease_renew_after = half_ttl; 1057 di->lease_renew_from = 0; 1058 di->time = ttl; 1059 out_unlock: 1060 spin_unlock(&dentry->d_lock); 1061 return; 1062 } 1063 1064 /* 1065 * splice a dentry to an inode. 1066 * caller must hold directory i_mutex for this to be safe. 1067 */ 1068 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) 1069 { 1070 struct dentry *realdn; 1071 1072 BUG_ON(d_inode(dn)); 1073 1074 /* dn must be unhashed */ 1075 if (!d_unhashed(dn)) 1076 d_drop(dn); 1077 realdn = d_splice_alias(in, dn); 1078 if (IS_ERR(realdn)) { 1079 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 1080 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 1081 dn = realdn; /* note realdn contains the error */ 1082 goto out; 1083 } else if (realdn) { 1084 dout("dn %p (%d) spliced with %p (%d) " 1085 "inode %p ino %llx.%llx\n", 1086 dn, d_count(dn), 1087 realdn, d_count(realdn), 1088 d_inode(realdn), ceph_vinop(d_inode(realdn))); 1089 dput(dn); 1090 dn = realdn; 1091 } else { 1092 BUG_ON(!ceph_dentry(dn)); 1093 dout("dn %p attached to %p ino %llx.%llx\n", 1094 dn, d_inode(dn), ceph_vinop(d_inode(dn))); 1095 } 1096 out: 1097 return dn; 1098 } 1099 1100 /* 1101 * Incorporate results into the local cache. This is either just 1102 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 1103 * after a lookup). 1104 * 1105 * A reply may contain 1106 * a directory inode along with a dentry. 1107 * and/or a target inode 1108 * 1109 * Called with snap_rwsem (read). 1110 */ 1111 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) 1112 { 1113 struct ceph_mds_session *session = req->r_session; 1114 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1115 struct inode *in = NULL; 1116 struct ceph_vino vino; 1117 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 1118 int err = 0; 1119 1120 dout("fill_trace %p is_dentry %d is_target %d\n", req, 1121 rinfo->head->is_dentry, rinfo->head->is_target); 1122 1123 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 1124 dout("fill_trace reply is empty!\n"); 1125 if (rinfo->head->result == 0 && req->r_locked_dir) 1126 ceph_invalidate_dir_request(req); 1127 return 0; 1128 } 1129 1130 if (rinfo->head->is_dentry) { 1131 struct inode *dir = req->r_locked_dir; 1132 1133 if (dir) { 1134 err = fill_inode(dir, NULL, 1135 &rinfo->diri, rinfo->dirfrag, 1136 session, req->r_request_started, -1, 1137 &req->r_caps_reservation); 1138 if (err < 0) 1139 goto done; 1140 } else { 1141 WARN_ON_ONCE(1); 1142 } 1143 1144 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) { 1145 struct qstr dname; 1146 struct dentry *dn, *parent; 1147 1148 BUG_ON(!rinfo->head->is_target); 1149 BUG_ON(req->r_dentry); 1150 1151 parent = d_find_any_alias(dir); 1152 BUG_ON(!parent); 1153 1154 dname.name = rinfo->dname; 1155 dname.len = rinfo->dname_len; 1156 dname.hash = full_name_hash(parent, dname.name, dname.len); 1157 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1158 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1159 retry_lookup: 1160 dn = d_lookup(parent, &dname); 1161 dout("d_lookup on parent=%p name=%.*s got %p\n", 1162 parent, dname.len, dname.name, dn); 1163 1164 if (!dn) { 1165 dn = d_alloc(parent, &dname); 1166 dout("d_alloc %p '%.*s' = %p\n", parent, 1167 dname.len, dname.name, dn); 1168 if (dn == NULL) { 1169 dput(parent); 1170 err = -ENOMEM; 1171 goto done; 1172 } 1173 err = 0; 1174 } else if (d_really_is_positive(dn) && 1175 (ceph_ino(d_inode(dn)) != vino.ino || 1176 ceph_snap(d_inode(dn)) != vino.snap)) { 1177 dout(" dn %p points to wrong inode %p\n", 1178 dn, d_inode(dn)); 1179 d_delete(dn); 1180 dput(dn); 1181 goto retry_lookup; 1182 } 1183 1184 req->r_dentry = dn; 1185 dput(parent); 1186 } 1187 } 1188 1189 if (rinfo->head->is_target) { 1190 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1191 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1192 1193 in = ceph_get_inode(sb, vino); 1194 if (IS_ERR(in)) { 1195 err = PTR_ERR(in); 1196 goto done; 1197 } 1198 req->r_target_inode = in; 1199 1200 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL, 1201 session, req->r_request_started, 1202 (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && 1203 rinfo->head->result == 0) ? req->r_fmode : -1, 1204 &req->r_caps_reservation); 1205 if (err < 0) { 1206 pr_err("fill_inode badness %p %llx.%llx\n", 1207 in, ceph_vinop(in)); 1208 goto done; 1209 } 1210 } 1211 1212 /* 1213 * ignore null lease/binding on snapdir ENOENT, or else we 1214 * will have trouble splicing in the virtual snapdir later 1215 */ 1216 if (rinfo->head->is_dentry && req->r_locked_dir && 1217 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && 1218 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1219 fsc->mount_options->snapdir_name, 1220 req->r_dentry->d_name.len))) { 1221 /* 1222 * lookup link rename : null -> possibly existing inode 1223 * mknod symlink mkdir : null -> new inode 1224 * unlink : linked -> null 1225 */ 1226 struct inode *dir = req->r_locked_dir; 1227 struct dentry *dn = req->r_dentry; 1228 bool have_dir_cap, have_lease; 1229 1230 BUG_ON(!dn); 1231 BUG_ON(!dir); 1232 BUG_ON(d_inode(dn->d_parent) != dir); 1233 BUG_ON(ceph_ino(dir) != 1234 le64_to_cpu(rinfo->diri.in->ino)); 1235 BUG_ON(ceph_snap(dir) != 1236 le64_to_cpu(rinfo->diri.in->snapid)); 1237 1238 /* do we have a lease on the whole dir? */ 1239 have_dir_cap = 1240 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1241 CEPH_CAP_FILE_SHARED); 1242 1243 /* do we have a dn lease? */ 1244 have_lease = have_dir_cap || 1245 le32_to_cpu(rinfo->dlease->duration_ms); 1246 if (!have_lease) 1247 dout("fill_trace no dentry lease or dir cap\n"); 1248 1249 /* rename? */ 1250 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1251 struct inode *olddir = req->r_old_dentry_dir; 1252 BUG_ON(!olddir); 1253 1254 dout(" src %p '%pd' dst %p '%pd'\n", 1255 req->r_old_dentry, 1256 req->r_old_dentry, 1257 dn, dn); 1258 dout("fill_trace doing d_move %p -> %p\n", 1259 req->r_old_dentry, dn); 1260 1261 /* d_move screws up sibling dentries' offsets */ 1262 ceph_dir_clear_ordered(dir); 1263 ceph_dir_clear_ordered(olddir); 1264 1265 d_move(req->r_old_dentry, dn); 1266 dout(" src %p '%pd' dst %p '%pd'\n", 1267 req->r_old_dentry, 1268 req->r_old_dentry, 1269 dn, dn); 1270 1271 /* ensure target dentry is invalidated, despite 1272 rehashing bug in vfs_rename_dir */ 1273 ceph_invalidate_dentry_lease(dn); 1274 1275 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1276 ceph_dentry(req->r_old_dentry)->offset); 1277 1278 dn = req->r_old_dentry; /* use old_dentry */ 1279 } 1280 1281 /* null dentry? */ 1282 if (!rinfo->head->is_target) { 1283 dout("fill_trace null dentry\n"); 1284 if (d_really_is_positive(dn)) { 1285 ceph_dir_clear_ordered(dir); 1286 dout("d_delete %p\n", dn); 1287 d_delete(dn); 1288 } else { 1289 if (have_lease && d_unhashed(dn)) 1290 d_add(dn, NULL); 1291 update_dentry_lease(dn, rinfo->dlease, 1292 session, 1293 req->r_request_started); 1294 } 1295 goto done; 1296 } 1297 1298 /* attach proper inode */ 1299 if (d_really_is_negative(dn)) { 1300 ceph_dir_clear_ordered(dir); 1301 ihold(in); 1302 dn = splice_dentry(dn, in); 1303 if (IS_ERR(dn)) { 1304 err = PTR_ERR(dn); 1305 goto done; 1306 } 1307 req->r_dentry = dn; /* may have spliced */ 1308 } else if (d_really_is_positive(dn) && d_inode(dn) != in) { 1309 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1310 dn, d_inode(dn), ceph_vinop(d_inode(dn)), 1311 ceph_vinop(in)); 1312 d_invalidate(dn); 1313 have_lease = false; 1314 } 1315 1316 if (have_lease) 1317 update_dentry_lease(dn, rinfo->dlease, session, 1318 req->r_request_started); 1319 dout(" final dn %p\n", dn); 1320 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1321 req->r_op == CEPH_MDS_OP_MKSNAP) && 1322 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 1323 struct dentry *dn = req->r_dentry; 1324 struct inode *dir = req->r_locked_dir; 1325 1326 /* fill out a snapdir LOOKUPSNAP dentry */ 1327 BUG_ON(!dn); 1328 BUG_ON(!dir); 1329 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); 1330 dout(" linking snapped dir %p to dn %p\n", in, dn); 1331 ceph_dir_clear_ordered(dir); 1332 ihold(in); 1333 dn = splice_dentry(dn, in); 1334 if (IS_ERR(dn)) { 1335 err = PTR_ERR(dn); 1336 goto done; 1337 } 1338 req->r_dentry = dn; /* may have spliced */ 1339 } 1340 done: 1341 dout("fill_trace done err=%d\n", err); 1342 return err; 1343 } 1344 1345 /* 1346 * Prepopulate our cache with readdir results, leases, etc. 1347 */ 1348 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, 1349 struct ceph_mds_session *session) 1350 { 1351 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1352 int i, err = 0; 1353 1354 for (i = 0; i < rinfo->dir_nr; i++) { 1355 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1356 struct ceph_vino vino; 1357 struct inode *in; 1358 int rc; 1359 1360 vino.ino = le64_to_cpu(rde->inode.in->ino); 1361 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1362 1363 in = ceph_get_inode(req->r_dentry->d_sb, vino); 1364 if (IS_ERR(in)) { 1365 err = PTR_ERR(in); 1366 dout("new_inode badness got %d\n", err); 1367 continue; 1368 } 1369 rc = fill_inode(in, NULL, &rde->inode, NULL, session, 1370 req->r_request_started, -1, 1371 &req->r_caps_reservation); 1372 if (rc < 0) { 1373 pr_err("fill_inode badness on %p got %d\n", in, rc); 1374 err = rc; 1375 } 1376 iput(in); 1377 } 1378 1379 return err; 1380 } 1381 1382 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) 1383 { 1384 if (ctl->page) { 1385 kunmap(ctl->page); 1386 put_page(ctl->page); 1387 ctl->page = NULL; 1388 } 1389 } 1390 1391 static int fill_readdir_cache(struct inode *dir, struct dentry *dn, 1392 struct ceph_readdir_cache_control *ctl, 1393 struct ceph_mds_request *req) 1394 { 1395 struct ceph_inode_info *ci = ceph_inode(dir); 1396 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); 1397 unsigned idx = ctl->index % nsize; 1398 pgoff_t pgoff = ctl->index / nsize; 1399 1400 if (!ctl->page || pgoff != page_index(ctl->page)) { 1401 ceph_readdir_cache_release(ctl); 1402 if (idx == 0) 1403 ctl->page = grab_cache_page(&dir->i_data, pgoff); 1404 else 1405 ctl->page = find_lock_page(&dir->i_data, pgoff); 1406 if (!ctl->page) { 1407 ctl->index = -1; 1408 return idx == 0 ? -ENOMEM : 0; 1409 } 1410 /* reading/filling the cache are serialized by 1411 * i_mutex, no need to use page lock */ 1412 unlock_page(ctl->page); 1413 ctl->dentries = kmap(ctl->page); 1414 if (idx == 0) 1415 memset(ctl->dentries, 0, PAGE_SIZE); 1416 } 1417 1418 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1419 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { 1420 dout("readdir cache dn %p idx %d\n", dn, ctl->index); 1421 ctl->dentries[idx] = dn; 1422 ctl->index++; 1423 } else { 1424 dout("disable readdir cache\n"); 1425 ctl->index = -1; 1426 } 1427 return 0; 1428 } 1429 1430 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1431 struct ceph_mds_session *session) 1432 { 1433 struct dentry *parent = req->r_dentry; 1434 struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); 1435 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1436 struct qstr dname; 1437 struct dentry *dn; 1438 struct inode *in; 1439 int err = 0, skipped = 0, ret, i; 1440 struct inode *snapdir = NULL; 1441 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1442 u32 frag = le32_to_cpu(rhead->args.readdir.frag); 1443 u32 last_hash = 0; 1444 u32 fpos_offset; 1445 struct ceph_readdir_cache_control cache_ctl = {}; 1446 1447 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) 1448 return readdir_prepopulate_inodes_only(req, session); 1449 1450 if (rinfo->hash_order && req->r_path2) { 1451 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1452 req->r_path2, strlen(req->r_path2)); 1453 last_hash = ceph_frag_value(last_hash); 1454 } 1455 1456 if (rinfo->dir_dir && 1457 le32_to_cpu(rinfo->dir_dir->frag) != frag) { 1458 dout("readdir_prepopulate got new frag %x -> %x\n", 1459 frag, le32_to_cpu(rinfo->dir_dir->frag)); 1460 frag = le32_to_cpu(rinfo->dir_dir->frag); 1461 if (!rinfo->hash_order) 1462 req->r_readdir_offset = 2; 1463 } 1464 1465 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1466 snapdir = ceph_get_snapdir(d_inode(parent)); 1467 parent = d_find_alias(snapdir); 1468 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1469 rinfo->dir_nr, parent); 1470 } else { 1471 dout("readdir_prepopulate %d items under dn %p\n", 1472 rinfo->dir_nr, parent); 1473 if (rinfo->dir_dir) 1474 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); 1475 } 1476 1477 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 && 1478 !(rinfo->hash_order && req->r_path2)) { 1479 /* note dir version at start of readdir so we can tell 1480 * if any dentries get dropped */ 1481 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); 1482 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count); 1483 req->r_readdir_cache_idx = 0; 1484 } 1485 1486 cache_ctl.index = req->r_readdir_cache_idx; 1487 fpos_offset = req->r_readdir_offset; 1488 1489 /* FIXME: release caps/leases if error occurs */ 1490 for (i = 0; i < rinfo->dir_nr; i++) { 1491 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1492 struct ceph_vino vino; 1493 1494 dname.name = rde->name; 1495 dname.len = rde->name_len; 1496 dname.hash = full_name_hash(parent, dname.name, dname.len); 1497 1498 vino.ino = le64_to_cpu(rde->inode.in->ino); 1499 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1500 1501 if (rinfo->hash_order) { 1502 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1503 rde->name, rde->name_len); 1504 hash = ceph_frag_value(hash); 1505 if (hash != last_hash) 1506 fpos_offset = 2; 1507 last_hash = hash; 1508 rde->offset = ceph_make_fpos(hash, fpos_offset++, true); 1509 } else { 1510 rde->offset = ceph_make_fpos(frag, fpos_offset++, false); 1511 } 1512 1513 retry_lookup: 1514 dn = d_lookup(parent, &dname); 1515 dout("d_lookup on parent=%p name=%.*s got %p\n", 1516 parent, dname.len, dname.name, dn); 1517 1518 if (!dn) { 1519 dn = d_alloc(parent, &dname); 1520 dout("d_alloc %p '%.*s' = %p\n", parent, 1521 dname.len, dname.name, dn); 1522 if (dn == NULL) { 1523 dout("d_alloc badness\n"); 1524 err = -ENOMEM; 1525 goto out; 1526 } 1527 } else if (d_really_is_positive(dn) && 1528 (ceph_ino(d_inode(dn)) != vino.ino || 1529 ceph_snap(d_inode(dn)) != vino.snap)) { 1530 dout(" dn %p points to wrong inode %p\n", 1531 dn, d_inode(dn)); 1532 d_delete(dn); 1533 dput(dn); 1534 goto retry_lookup; 1535 } 1536 1537 /* inode */ 1538 if (d_really_is_positive(dn)) { 1539 in = d_inode(dn); 1540 } else { 1541 in = ceph_get_inode(parent->d_sb, vino); 1542 if (IS_ERR(in)) { 1543 dout("new_inode badness\n"); 1544 d_drop(dn); 1545 dput(dn); 1546 err = PTR_ERR(in); 1547 goto out; 1548 } 1549 } 1550 1551 ret = fill_inode(in, NULL, &rde->inode, NULL, session, 1552 req->r_request_started, -1, 1553 &req->r_caps_reservation); 1554 if (ret < 0) { 1555 pr_err("fill_inode badness on %p\n", in); 1556 if (d_really_is_negative(dn)) 1557 iput(in); 1558 d_drop(dn); 1559 err = ret; 1560 goto next_item; 1561 } 1562 1563 if (d_really_is_negative(dn)) { 1564 struct dentry *realdn; 1565 1566 if (ceph_security_xattr_deadlock(in)) { 1567 dout(" skip splicing dn %p to inode %p" 1568 " (security xattr deadlock)\n", dn, in); 1569 iput(in); 1570 skipped++; 1571 goto next_item; 1572 } 1573 1574 realdn = splice_dentry(dn, in); 1575 if (IS_ERR(realdn)) { 1576 err = PTR_ERR(realdn); 1577 d_drop(dn); 1578 dn = NULL; 1579 goto next_item; 1580 } 1581 dn = realdn; 1582 } 1583 1584 ceph_dentry(dn)->offset = rde->offset; 1585 1586 update_dentry_lease(dn, rde->lease, req->r_session, 1587 req->r_request_started); 1588 1589 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { 1590 ret = fill_readdir_cache(d_inode(parent), dn, 1591 &cache_ctl, req); 1592 if (ret < 0) 1593 err = ret; 1594 } 1595 next_item: 1596 if (dn) 1597 dput(dn); 1598 } 1599 out: 1600 if (err == 0 && skipped == 0) { 1601 set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags); 1602 req->r_readdir_cache_idx = cache_ctl.index; 1603 } 1604 ceph_readdir_cache_release(&cache_ctl); 1605 if (snapdir) { 1606 iput(snapdir); 1607 dput(parent); 1608 } 1609 dout("readdir_prepopulate done\n"); 1610 return err; 1611 } 1612 1613 int ceph_inode_set_size(struct inode *inode, loff_t size) 1614 { 1615 struct ceph_inode_info *ci = ceph_inode(inode); 1616 int ret = 0; 1617 1618 spin_lock(&ci->i_ceph_lock); 1619 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1620 i_size_write(inode, size); 1621 inode->i_blocks = calc_inode_blocks(size); 1622 1623 /* tell the MDS if we are approaching max_size */ 1624 if ((size << 1) >= ci->i_max_size && 1625 (ci->i_reported_size << 1) < ci->i_max_size) 1626 ret = 1; 1627 1628 spin_unlock(&ci->i_ceph_lock); 1629 return ret; 1630 } 1631 1632 /* 1633 * Write back inode data in a worker thread. (This can't be done 1634 * in the message handler context.) 1635 */ 1636 void ceph_queue_writeback(struct inode *inode) 1637 { 1638 ihold(inode); 1639 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1640 &ceph_inode(inode)->i_wb_work)) { 1641 dout("ceph_queue_writeback %p\n", inode); 1642 } else { 1643 dout("ceph_queue_writeback %p failed\n", inode); 1644 iput(inode); 1645 } 1646 } 1647 1648 static void ceph_writeback_work(struct work_struct *work) 1649 { 1650 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1651 i_wb_work); 1652 struct inode *inode = &ci->vfs_inode; 1653 1654 dout("writeback %p\n", inode); 1655 filemap_fdatawrite(&inode->i_data); 1656 iput(inode); 1657 } 1658 1659 /* 1660 * queue an async invalidation 1661 */ 1662 void ceph_queue_invalidate(struct inode *inode) 1663 { 1664 ihold(inode); 1665 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1666 &ceph_inode(inode)->i_pg_inv_work)) { 1667 dout("ceph_queue_invalidate %p\n", inode); 1668 } else { 1669 dout("ceph_queue_invalidate %p failed\n", inode); 1670 iput(inode); 1671 } 1672 } 1673 1674 /* 1675 * Invalidate inode pages in a worker thread. (This can't be done 1676 * in the message handler context.) 1677 */ 1678 static void ceph_invalidate_work(struct work_struct *work) 1679 { 1680 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1681 i_pg_inv_work); 1682 struct inode *inode = &ci->vfs_inode; 1683 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1684 u32 orig_gen; 1685 int check = 0; 1686 1687 mutex_lock(&ci->i_truncate_mutex); 1688 1689 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 1690 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n", 1691 inode, ceph_ino(inode)); 1692 mapping_set_error(inode->i_mapping, -EIO); 1693 truncate_pagecache(inode, 0); 1694 mutex_unlock(&ci->i_truncate_mutex); 1695 goto out; 1696 } 1697 1698 spin_lock(&ci->i_ceph_lock); 1699 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1700 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1701 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1702 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1703 check = 1; 1704 spin_unlock(&ci->i_ceph_lock); 1705 mutex_unlock(&ci->i_truncate_mutex); 1706 goto out; 1707 } 1708 orig_gen = ci->i_rdcache_gen; 1709 spin_unlock(&ci->i_ceph_lock); 1710 1711 if (invalidate_inode_pages2(inode->i_mapping) < 0) { 1712 pr_err("invalidate_pages %p fails\n", inode); 1713 } 1714 1715 spin_lock(&ci->i_ceph_lock); 1716 if (orig_gen == ci->i_rdcache_gen && 1717 orig_gen == ci->i_rdcache_revoking) { 1718 dout("invalidate_pages %p gen %d successful\n", inode, 1719 ci->i_rdcache_gen); 1720 ci->i_rdcache_revoking--; 1721 check = 1; 1722 } else { 1723 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1724 inode, orig_gen, ci->i_rdcache_gen, 1725 ci->i_rdcache_revoking); 1726 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1727 check = 1; 1728 } 1729 spin_unlock(&ci->i_ceph_lock); 1730 mutex_unlock(&ci->i_truncate_mutex); 1731 out: 1732 if (check) 1733 ceph_check_caps(ci, 0, NULL); 1734 iput(inode); 1735 } 1736 1737 1738 /* 1739 * called by trunc_wq; 1740 * 1741 * We also truncate in a separate thread as well. 1742 */ 1743 static void ceph_vmtruncate_work(struct work_struct *work) 1744 { 1745 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1746 i_vmtruncate_work); 1747 struct inode *inode = &ci->vfs_inode; 1748 1749 dout("vmtruncate_work %p\n", inode); 1750 __ceph_do_pending_vmtruncate(inode); 1751 iput(inode); 1752 } 1753 1754 /* 1755 * Queue an async vmtruncate. If we fail to queue work, we will handle 1756 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1757 */ 1758 void ceph_queue_vmtruncate(struct inode *inode) 1759 { 1760 struct ceph_inode_info *ci = ceph_inode(inode); 1761 1762 ihold(inode); 1763 1764 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1765 &ci->i_vmtruncate_work)) { 1766 dout("ceph_queue_vmtruncate %p\n", inode); 1767 } else { 1768 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1769 inode, ci->i_truncate_pending); 1770 iput(inode); 1771 } 1772 } 1773 1774 /* 1775 * Make sure any pending truncation is applied before doing anything 1776 * that may depend on it. 1777 */ 1778 void __ceph_do_pending_vmtruncate(struct inode *inode) 1779 { 1780 struct ceph_inode_info *ci = ceph_inode(inode); 1781 u64 to; 1782 int wrbuffer_refs, finish = 0; 1783 1784 mutex_lock(&ci->i_truncate_mutex); 1785 retry: 1786 spin_lock(&ci->i_ceph_lock); 1787 if (ci->i_truncate_pending == 0) { 1788 dout("__do_pending_vmtruncate %p none pending\n", inode); 1789 spin_unlock(&ci->i_ceph_lock); 1790 mutex_unlock(&ci->i_truncate_mutex); 1791 return; 1792 } 1793 1794 /* 1795 * make sure any dirty snapped pages are flushed before we 1796 * possibly truncate them.. so write AND block! 1797 */ 1798 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1799 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1800 inode); 1801 spin_unlock(&ci->i_ceph_lock); 1802 filemap_write_and_wait_range(&inode->i_data, 0, 1803 inode->i_sb->s_maxbytes); 1804 goto retry; 1805 } 1806 1807 /* there should be no reader or writer */ 1808 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); 1809 1810 to = ci->i_truncate_size; 1811 wrbuffer_refs = ci->i_wrbuffer_ref; 1812 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1813 ci->i_truncate_pending, to); 1814 spin_unlock(&ci->i_ceph_lock); 1815 1816 truncate_pagecache(inode, to); 1817 1818 spin_lock(&ci->i_ceph_lock); 1819 if (to == ci->i_truncate_size) { 1820 ci->i_truncate_pending = 0; 1821 finish = 1; 1822 } 1823 spin_unlock(&ci->i_ceph_lock); 1824 if (!finish) 1825 goto retry; 1826 1827 mutex_unlock(&ci->i_truncate_mutex); 1828 1829 if (wrbuffer_refs == 0) 1830 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1831 1832 wake_up_all(&ci->i_cap_wq); 1833 } 1834 1835 /* 1836 * symlinks 1837 */ 1838 static const struct inode_operations ceph_symlink_iops = { 1839 .get_link = simple_get_link, 1840 .setattr = ceph_setattr, 1841 .getattr = ceph_getattr, 1842 .listxattr = ceph_listxattr, 1843 }; 1844 1845 int __ceph_setattr(struct inode *inode, struct iattr *attr) 1846 { 1847 struct ceph_inode_info *ci = ceph_inode(inode); 1848 const unsigned int ia_valid = attr->ia_valid; 1849 struct ceph_mds_request *req; 1850 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1851 struct ceph_cap_flush *prealloc_cf; 1852 int issued; 1853 int release = 0, dirtied = 0; 1854 int mask = 0; 1855 int err = 0; 1856 int inode_dirty_flags = 0; 1857 bool lock_snap_rwsem = false; 1858 1859 prealloc_cf = ceph_alloc_cap_flush(); 1860 if (!prealloc_cf) 1861 return -ENOMEM; 1862 1863 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1864 USE_AUTH_MDS); 1865 if (IS_ERR(req)) { 1866 ceph_free_cap_flush(prealloc_cf); 1867 return PTR_ERR(req); 1868 } 1869 1870 spin_lock(&ci->i_ceph_lock); 1871 issued = __ceph_caps_issued(ci, NULL); 1872 1873 if (!ci->i_head_snapc && 1874 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) { 1875 lock_snap_rwsem = true; 1876 if (!down_read_trylock(&mdsc->snap_rwsem)) { 1877 spin_unlock(&ci->i_ceph_lock); 1878 down_read(&mdsc->snap_rwsem); 1879 spin_lock(&ci->i_ceph_lock); 1880 issued = __ceph_caps_issued(ci, NULL); 1881 } 1882 } 1883 1884 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1885 1886 if (ia_valid & ATTR_UID) { 1887 dout("setattr %p uid %d -> %d\n", inode, 1888 from_kuid(&init_user_ns, inode->i_uid), 1889 from_kuid(&init_user_ns, attr->ia_uid)); 1890 if (issued & CEPH_CAP_AUTH_EXCL) { 1891 inode->i_uid = attr->ia_uid; 1892 dirtied |= CEPH_CAP_AUTH_EXCL; 1893 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1894 !uid_eq(attr->ia_uid, inode->i_uid)) { 1895 req->r_args.setattr.uid = cpu_to_le32( 1896 from_kuid(&init_user_ns, attr->ia_uid)); 1897 mask |= CEPH_SETATTR_UID; 1898 release |= CEPH_CAP_AUTH_SHARED; 1899 } 1900 } 1901 if (ia_valid & ATTR_GID) { 1902 dout("setattr %p gid %d -> %d\n", inode, 1903 from_kgid(&init_user_ns, inode->i_gid), 1904 from_kgid(&init_user_ns, attr->ia_gid)); 1905 if (issued & CEPH_CAP_AUTH_EXCL) { 1906 inode->i_gid = attr->ia_gid; 1907 dirtied |= CEPH_CAP_AUTH_EXCL; 1908 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1909 !gid_eq(attr->ia_gid, inode->i_gid)) { 1910 req->r_args.setattr.gid = cpu_to_le32( 1911 from_kgid(&init_user_ns, attr->ia_gid)); 1912 mask |= CEPH_SETATTR_GID; 1913 release |= CEPH_CAP_AUTH_SHARED; 1914 } 1915 } 1916 if (ia_valid & ATTR_MODE) { 1917 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1918 attr->ia_mode); 1919 if (issued & CEPH_CAP_AUTH_EXCL) { 1920 inode->i_mode = attr->ia_mode; 1921 dirtied |= CEPH_CAP_AUTH_EXCL; 1922 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1923 attr->ia_mode != inode->i_mode) { 1924 inode->i_mode = attr->ia_mode; 1925 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1926 mask |= CEPH_SETATTR_MODE; 1927 release |= CEPH_CAP_AUTH_SHARED; 1928 } 1929 } 1930 1931 if (ia_valid & ATTR_ATIME) { 1932 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1933 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1934 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1935 if (issued & CEPH_CAP_FILE_EXCL) { 1936 ci->i_time_warp_seq++; 1937 inode->i_atime = attr->ia_atime; 1938 dirtied |= CEPH_CAP_FILE_EXCL; 1939 } else if ((issued & CEPH_CAP_FILE_WR) && 1940 timespec_compare(&inode->i_atime, 1941 &attr->ia_atime) < 0) { 1942 inode->i_atime = attr->ia_atime; 1943 dirtied |= CEPH_CAP_FILE_WR; 1944 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1945 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1946 ceph_encode_timespec(&req->r_args.setattr.atime, 1947 &attr->ia_atime); 1948 mask |= CEPH_SETATTR_ATIME; 1949 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1950 CEPH_CAP_FILE_WR; 1951 } 1952 } 1953 if (ia_valid & ATTR_MTIME) { 1954 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1955 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1956 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1957 if (issued & CEPH_CAP_FILE_EXCL) { 1958 ci->i_time_warp_seq++; 1959 inode->i_mtime = attr->ia_mtime; 1960 dirtied |= CEPH_CAP_FILE_EXCL; 1961 } else if ((issued & CEPH_CAP_FILE_WR) && 1962 timespec_compare(&inode->i_mtime, 1963 &attr->ia_mtime) < 0) { 1964 inode->i_mtime = attr->ia_mtime; 1965 dirtied |= CEPH_CAP_FILE_WR; 1966 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1967 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1968 ceph_encode_timespec(&req->r_args.setattr.mtime, 1969 &attr->ia_mtime); 1970 mask |= CEPH_SETATTR_MTIME; 1971 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1972 CEPH_CAP_FILE_WR; 1973 } 1974 } 1975 if (ia_valid & ATTR_SIZE) { 1976 dout("setattr %p size %lld -> %lld\n", inode, 1977 inode->i_size, attr->ia_size); 1978 if ((issued & CEPH_CAP_FILE_EXCL) && 1979 attr->ia_size > inode->i_size) { 1980 i_size_write(inode, attr->ia_size); 1981 inode->i_blocks = calc_inode_blocks(attr->ia_size); 1982 inode->i_ctime = attr->ia_ctime; 1983 ci->i_reported_size = attr->ia_size; 1984 dirtied |= CEPH_CAP_FILE_EXCL; 1985 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1986 attr->ia_size != inode->i_size) { 1987 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1988 req->r_args.setattr.old_size = 1989 cpu_to_le64(inode->i_size); 1990 mask |= CEPH_SETATTR_SIZE; 1991 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1992 CEPH_CAP_FILE_WR; 1993 } 1994 } 1995 1996 /* these do nothing */ 1997 if (ia_valid & ATTR_CTIME) { 1998 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1999 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 2000 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 2001 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2002 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2003 only ? "ctime only" : "ignored"); 2004 inode->i_ctime = attr->ia_ctime; 2005 if (only) { 2006 /* 2007 * if kernel wants to dirty ctime but nothing else, 2008 * we need to choose a cap to dirty under, or do 2009 * a almost-no-op setattr 2010 */ 2011 if (issued & CEPH_CAP_AUTH_EXCL) 2012 dirtied |= CEPH_CAP_AUTH_EXCL; 2013 else if (issued & CEPH_CAP_FILE_EXCL) 2014 dirtied |= CEPH_CAP_FILE_EXCL; 2015 else if (issued & CEPH_CAP_XATTR_EXCL) 2016 dirtied |= CEPH_CAP_XATTR_EXCL; 2017 else 2018 mask |= CEPH_SETATTR_CTIME; 2019 } 2020 } 2021 if (ia_valid & ATTR_FILE) 2022 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 2023 2024 if (dirtied) { 2025 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2026 &prealloc_cf); 2027 inode->i_ctime = current_time(inode); 2028 } 2029 2030 release &= issued; 2031 spin_unlock(&ci->i_ceph_lock); 2032 if (lock_snap_rwsem) 2033 up_read(&mdsc->snap_rwsem); 2034 2035 if (inode_dirty_flags) 2036 __mark_inode_dirty(inode, inode_dirty_flags); 2037 2038 if (ia_valid & ATTR_MODE) { 2039 err = posix_acl_chmod(inode, attr->ia_mode); 2040 if (err) 2041 goto out_put; 2042 } 2043 2044 if (mask) { 2045 req->r_inode = inode; 2046 ihold(inode); 2047 req->r_inode_drop = release; 2048 req->r_args.setattr.mask = cpu_to_le32(mask); 2049 req->r_num_caps = 1; 2050 err = ceph_mdsc_do_request(mdsc, NULL, req); 2051 } 2052 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2053 ceph_cap_string(dirtied), mask); 2054 2055 ceph_mdsc_put_request(req); 2056 if (mask & CEPH_SETATTR_SIZE) 2057 __ceph_do_pending_vmtruncate(inode); 2058 ceph_free_cap_flush(prealloc_cf); 2059 return err; 2060 out_put: 2061 ceph_mdsc_put_request(req); 2062 ceph_free_cap_flush(prealloc_cf); 2063 return err; 2064 } 2065 2066 /* 2067 * setattr 2068 */ 2069 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 2070 { 2071 struct inode *inode = d_inode(dentry); 2072 int err; 2073 2074 if (ceph_snap(inode) != CEPH_NOSNAP) 2075 return -EROFS; 2076 2077 err = setattr_prepare(dentry, attr); 2078 if (err != 0) 2079 return err; 2080 2081 return __ceph_setattr(inode, attr); 2082 } 2083 2084 /* 2085 * Verify that we have a lease on the given mask. If not, 2086 * do a getattr against an mds. 2087 */ 2088 int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 2089 int mask, bool force) 2090 { 2091 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 2092 struct ceph_mds_client *mdsc = fsc->mdsc; 2093 struct ceph_mds_request *req; 2094 int err; 2095 2096 if (ceph_snap(inode) == CEPH_SNAPDIR) { 2097 dout("do_getattr inode %p SNAPDIR\n", inode); 2098 return 0; 2099 } 2100 2101 dout("do_getattr inode %p mask %s mode 0%o\n", 2102 inode, ceph_cap_string(mask), inode->i_mode); 2103 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 2104 return 0; 2105 2106 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 2107 if (IS_ERR(req)) 2108 return PTR_ERR(req); 2109 req->r_inode = inode; 2110 ihold(inode); 2111 req->r_num_caps = 1; 2112 req->r_args.getattr.mask = cpu_to_le32(mask); 2113 req->r_locked_page = locked_page; 2114 err = ceph_mdsc_do_request(mdsc, NULL, req); 2115 if (locked_page && err == 0) { 2116 u64 inline_version = req->r_reply_info.targeti.inline_version; 2117 if (inline_version == 0) { 2118 /* the reply is supposed to contain inline data */ 2119 err = -EINVAL; 2120 } else if (inline_version == CEPH_INLINE_NONE) { 2121 err = -ENODATA; 2122 } else { 2123 err = req->r_reply_info.targeti.inline_len; 2124 } 2125 } 2126 ceph_mdsc_put_request(req); 2127 dout("do_getattr result=%d\n", err); 2128 return err; 2129 } 2130 2131 2132 /* 2133 * Check inode permissions. We verify we have a valid value for 2134 * the AUTH cap, then call the generic handler. 2135 */ 2136 int ceph_permission(struct inode *inode, int mask) 2137 { 2138 int err; 2139 2140 if (mask & MAY_NOT_BLOCK) 2141 return -ECHILD; 2142 2143 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false); 2144 2145 if (!err) 2146 err = generic_permission(inode, mask); 2147 return err; 2148 } 2149 2150 /* 2151 * Get all attributes. Hopefully somedata we'll have a statlite() 2152 * and can limit the fields we require to be accurate. 2153 */ 2154 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 2155 struct kstat *stat) 2156 { 2157 struct inode *inode = d_inode(dentry); 2158 struct ceph_inode_info *ci = ceph_inode(inode); 2159 int err; 2160 2161 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false); 2162 if (!err) { 2163 generic_fillattr(inode, stat); 2164 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); 2165 if (ceph_snap(inode) != CEPH_NOSNAP) 2166 stat->dev = ceph_snap(inode); 2167 else 2168 stat->dev = 0; 2169 if (S_ISDIR(inode->i_mode)) { 2170 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 2171 RBYTES)) 2172 stat->size = ci->i_rbytes; 2173 else 2174 stat->size = ci->i_files + ci->i_subdirs; 2175 stat->blocks = 0; 2176 stat->blksize = 65536; 2177 } 2178 } 2179 return err; 2180 } 2181