1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/namei.h> 10 #include <linux/writeback.h> 11 #include <linux/vmalloc.h> 12 #include <linux/pagevec.h> 13 14 #include "super.h" 15 #include "mds_client.h" 16 #include <linux/ceph/decode.h> 17 18 /* 19 * Ceph inode operations 20 * 21 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 22 * setattr, etc.), xattr helpers, and helpers for assimilating 23 * metadata returned by the MDS into our cache. 24 * 25 * Also define helpers for doing asynchronous writeback, invalidation, 26 * and truncation for the benefit of those who can't afford to block 27 * (typically because they are in the message handler path). 28 */ 29 30 static const struct inode_operations ceph_symlink_iops; 31 32 static void ceph_invalidate_work(struct work_struct *work); 33 static void ceph_writeback_work(struct work_struct *work); 34 static void ceph_vmtruncate_work(struct work_struct *work); 35 36 /* 37 * find or create an inode, given the ceph ino number 38 */ 39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 40 { 41 struct inode *inode; 42 ino_t t = ceph_vino_to_ino(vino); 43 44 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 45 if (inode == NULL) 46 return ERR_PTR(-ENOMEM); 47 if (inode->i_state & I_NEW) { 48 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 49 inode, ceph_vinop(inode), (u64)inode->i_ino); 50 unlock_new_inode(inode); 51 } 52 53 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 54 vino.snap, inode); 55 return inode; 56 } 57 58 /* 59 * get/constuct snapdir inode for a given directory 60 */ 61 struct inode *ceph_get_snapdir(struct inode *parent) 62 { 63 struct ceph_vino vino = { 64 .ino = ceph_ino(parent), 65 .snap = CEPH_SNAPDIR, 66 }; 67 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 68 struct ceph_inode_info *ci = ceph_inode(inode); 69 70 BUG_ON(!S_ISDIR(parent->i_mode)); 71 if (IS_ERR(inode)) 72 return inode; 73 inode->i_mode = parent->i_mode; 74 inode->i_uid = parent->i_uid; 75 inode->i_gid = parent->i_gid; 76 inode->i_op = &ceph_dir_iops; 77 inode->i_fop = &ceph_dir_fops; 78 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 79 ci->i_rbytes = 0; 80 return inode; 81 } 82 83 const struct inode_operations ceph_file_iops = { 84 .permission = ceph_permission, 85 .setattr = ceph_setattr, 86 .getattr = ceph_getattr, 87 .setxattr = ceph_setxattr, 88 .getxattr = ceph_getxattr, 89 .listxattr = ceph_listxattr, 90 .removexattr = ceph_removexattr, 91 }; 92 93 94 /* 95 * We use a 'frag tree' to keep track of the MDS's directory fragments 96 * for a given inode (usually there is just a single fragment). We 97 * need to know when a child frag is delegated to a new MDS, or when 98 * it is flagged as replicated, so we can direct our requests 99 * accordingly. 100 */ 101 102 /* 103 * find/create a frag in the tree 104 */ 105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 106 u32 f) 107 { 108 struct rb_node **p; 109 struct rb_node *parent = NULL; 110 struct ceph_inode_frag *frag; 111 int c; 112 113 p = &ci->i_fragtree.rb_node; 114 while (*p) { 115 parent = *p; 116 frag = rb_entry(parent, struct ceph_inode_frag, node); 117 c = ceph_frag_compare(f, frag->frag); 118 if (c < 0) 119 p = &(*p)->rb_left; 120 else if (c > 0) 121 p = &(*p)->rb_right; 122 else 123 return frag; 124 } 125 126 frag = kmalloc(sizeof(*frag), GFP_NOFS); 127 if (!frag) { 128 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 129 "frag %x\n", &ci->vfs_inode, 130 ceph_vinop(&ci->vfs_inode), f); 131 return ERR_PTR(-ENOMEM); 132 } 133 frag->frag = f; 134 frag->split_by = 0; 135 frag->mds = -1; 136 frag->ndist = 0; 137 138 rb_link_node(&frag->node, parent, p); 139 rb_insert_color(&frag->node, &ci->i_fragtree); 140 141 dout("get_or_create_frag added %llx.%llx frag %x\n", 142 ceph_vinop(&ci->vfs_inode), f); 143 return frag; 144 } 145 146 /* 147 * find a specific frag @f 148 */ 149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 150 { 151 struct rb_node *n = ci->i_fragtree.rb_node; 152 153 while (n) { 154 struct ceph_inode_frag *frag = 155 rb_entry(n, struct ceph_inode_frag, node); 156 int c = ceph_frag_compare(f, frag->frag); 157 if (c < 0) 158 n = n->rb_left; 159 else if (c > 0) 160 n = n->rb_right; 161 else 162 return frag; 163 } 164 return NULL; 165 } 166 167 /* 168 * Choose frag containing the given value @v. If @pfrag is 169 * specified, copy the frag delegation info to the caller if 170 * it is present. 171 */ 172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 173 struct ceph_inode_frag *pfrag, 174 int *found) 175 { 176 u32 t = ceph_frag_make(0, 0); 177 struct ceph_inode_frag *frag; 178 unsigned nway, i; 179 u32 n; 180 181 if (found) 182 *found = 0; 183 184 mutex_lock(&ci->i_fragtree_mutex); 185 while (1) { 186 WARN_ON(!ceph_frag_contains_value(t, v)); 187 frag = __ceph_find_frag(ci, t); 188 if (!frag) 189 break; /* t is a leaf */ 190 if (frag->split_by == 0) { 191 if (pfrag) 192 memcpy(pfrag, frag, sizeof(*pfrag)); 193 if (found) 194 *found = 1; 195 break; 196 } 197 198 /* choose child */ 199 nway = 1 << frag->split_by; 200 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 201 frag->split_by, nway); 202 for (i = 0; i < nway; i++) { 203 n = ceph_frag_make_child(t, frag->split_by, i); 204 if (ceph_frag_contains_value(n, v)) { 205 t = n; 206 break; 207 } 208 } 209 BUG_ON(i == nway); 210 } 211 dout("choose_frag(%x) = %x\n", v, t); 212 213 mutex_unlock(&ci->i_fragtree_mutex); 214 return t; 215 } 216 217 /* 218 * Process dirfrag (delegation) info from the mds. Include leaf 219 * fragment in tree ONLY if ndist > 0. Otherwise, only 220 * branches/splits are included in i_fragtree) 221 */ 222 static int ceph_fill_dirfrag(struct inode *inode, 223 struct ceph_mds_reply_dirfrag *dirinfo) 224 { 225 struct ceph_inode_info *ci = ceph_inode(inode); 226 struct ceph_inode_frag *frag; 227 u32 id = le32_to_cpu(dirinfo->frag); 228 int mds = le32_to_cpu(dirinfo->auth); 229 int ndist = le32_to_cpu(dirinfo->ndist); 230 int i; 231 int err = 0; 232 233 mutex_lock(&ci->i_fragtree_mutex); 234 if (ndist == 0) { 235 /* no delegation info needed. */ 236 frag = __ceph_find_frag(ci, id); 237 if (!frag) 238 goto out; 239 if (frag->split_by == 0) { 240 /* tree leaf, remove */ 241 dout("fill_dirfrag removed %llx.%llx frag %x" 242 " (no ref)\n", ceph_vinop(inode), id); 243 rb_erase(&frag->node, &ci->i_fragtree); 244 kfree(frag); 245 } else { 246 /* tree branch, keep and clear */ 247 dout("fill_dirfrag cleared %llx.%llx frag %x" 248 " referral\n", ceph_vinop(inode), id); 249 frag->mds = -1; 250 frag->ndist = 0; 251 } 252 goto out; 253 } 254 255 256 /* find/add this frag to store mds delegation info */ 257 frag = __get_or_create_frag(ci, id); 258 if (IS_ERR(frag)) { 259 /* this is not the end of the world; we can continue 260 with bad/inaccurate delegation info */ 261 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 262 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 263 err = -ENOMEM; 264 goto out; 265 } 266 267 frag->mds = mds; 268 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 269 for (i = 0; i < frag->ndist; i++) 270 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 271 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 272 ceph_vinop(inode), frag->frag, frag->ndist); 273 274 out: 275 mutex_unlock(&ci->i_fragtree_mutex); 276 return err; 277 } 278 279 280 /* 281 * initialize a newly allocated inode. 282 */ 283 struct inode *ceph_alloc_inode(struct super_block *sb) 284 { 285 struct ceph_inode_info *ci; 286 int i; 287 288 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 289 if (!ci) 290 return NULL; 291 292 dout("alloc_inode %p\n", &ci->vfs_inode); 293 294 ci->i_version = 0; 295 ci->i_time_warp_seq = 0; 296 ci->i_ceph_flags = 0; 297 ci->i_release_count = 0; 298 ci->i_symlink = NULL; 299 300 ci->i_fragtree = RB_ROOT; 301 mutex_init(&ci->i_fragtree_mutex); 302 303 ci->i_xattrs.blob = NULL; 304 ci->i_xattrs.prealloc_blob = NULL; 305 ci->i_xattrs.dirty = false; 306 ci->i_xattrs.index = RB_ROOT; 307 ci->i_xattrs.count = 0; 308 ci->i_xattrs.names_size = 0; 309 ci->i_xattrs.vals_size = 0; 310 ci->i_xattrs.version = 0; 311 ci->i_xattrs.index_version = 0; 312 313 ci->i_caps = RB_ROOT; 314 ci->i_auth_cap = NULL; 315 ci->i_dirty_caps = 0; 316 ci->i_flushing_caps = 0; 317 INIT_LIST_HEAD(&ci->i_dirty_item); 318 INIT_LIST_HEAD(&ci->i_flushing_item); 319 ci->i_cap_flush_seq = 0; 320 ci->i_cap_flush_last_tid = 0; 321 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 322 init_waitqueue_head(&ci->i_cap_wq); 323 ci->i_hold_caps_min = 0; 324 ci->i_hold_caps_max = 0; 325 INIT_LIST_HEAD(&ci->i_cap_delay_list); 326 ci->i_cap_exporting_mds = 0; 327 ci->i_cap_exporting_mseq = 0; 328 ci->i_cap_exporting_issued = 0; 329 INIT_LIST_HEAD(&ci->i_cap_snaps); 330 ci->i_head_snapc = NULL; 331 ci->i_snap_caps = 0; 332 333 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 334 ci->i_nr_by_mode[i] = 0; 335 336 ci->i_truncate_seq = 0; 337 ci->i_truncate_size = 0; 338 ci->i_truncate_pending = 0; 339 340 ci->i_max_size = 0; 341 ci->i_reported_size = 0; 342 ci->i_wanted_max_size = 0; 343 ci->i_requested_max_size = 0; 344 345 ci->i_pin_ref = 0; 346 ci->i_rd_ref = 0; 347 ci->i_rdcache_ref = 0; 348 ci->i_wr_ref = 0; 349 ci->i_wrbuffer_ref = 0; 350 ci->i_wrbuffer_ref_head = 0; 351 ci->i_shared_gen = 0; 352 ci->i_rdcache_gen = 0; 353 ci->i_rdcache_revoking = 0; 354 355 INIT_LIST_HEAD(&ci->i_unsafe_writes); 356 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 357 spin_lock_init(&ci->i_unsafe_lock); 358 359 ci->i_snap_realm = NULL; 360 INIT_LIST_HEAD(&ci->i_snap_realm_item); 361 INIT_LIST_HEAD(&ci->i_snap_flush_item); 362 363 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 364 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 365 366 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 367 368 return &ci->vfs_inode; 369 } 370 371 void ceph_destroy_inode(struct inode *inode) 372 { 373 struct ceph_inode_info *ci = ceph_inode(inode); 374 struct ceph_inode_frag *frag; 375 struct rb_node *n; 376 377 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 378 379 ceph_queue_caps_release(inode); 380 381 /* 382 * we may still have a snap_realm reference if there are stray 383 * caps in i_cap_exporting_issued or i_snap_caps. 384 */ 385 if (ci->i_snap_realm) { 386 struct ceph_mds_client *mdsc = 387 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 388 struct ceph_snap_realm *realm = ci->i_snap_realm; 389 390 dout(" dropping residual ref to snap realm %p\n", realm); 391 spin_lock(&realm->inodes_with_caps_lock); 392 list_del_init(&ci->i_snap_realm_item); 393 spin_unlock(&realm->inodes_with_caps_lock); 394 ceph_put_snap_realm(mdsc, realm); 395 } 396 397 kfree(ci->i_symlink); 398 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 399 frag = rb_entry(n, struct ceph_inode_frag, node); 400 rb_erase(n, &ci->i_fragtree); 401 kfree(frag); 402 } 403 404 __ceph_destroy_xattrs(ci); 405 if (ci->i_xattrs.blob) 406 ceph_buffer_put(ci->i_xattrs.blob); 407 if (ci->i_xattrs.prealloc_blob) 408 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 409 410 kmem_cache_free(ceph_inode_cachep, ci); 411 } 412 413 414 /* 415 * Helpers to fill in size, ctime, mtime, and atime. We have to be 416 * careful because either the client or MDS may have more up to date 417 * info, depending on which capabilities are held, and whether 418 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 419 * and size are monotonically increasing, except when utimes() or 420 * truncate() increments the corresponding _seq values.) 421 */ 422 int ceph_fill_file_size(struct inode *inode, int issued, 423 u32 truncate_seq, u64 truncate_size, u64 size) 424 { 425 struct ceph_inode_info *ci = ceph_inode(inode); 426 int queue_trunc = 0; 427 428 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 429 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 430 dout("size %lld -> %llu\n", inode->i_size, size); 431 inode->i_size = size; 432 inode->i_blocks = (size + (1<<9) - 1) >> 9; 433 ci->i_reported_size = size; 434 if (truncate_seq != ci->i_truncate_seq) { 435 dout("truncate_seq %u -> %u\n", 436 ci->i_truncate_seq, truncate_seq); 437 ci->i_truncate_seq = truncate_seq; 438 /* 439 * If we hold relevant caps, or in the case where we're 440 * not the only client referencing this file and we 441 * don't hold those caps, then we need to check whether 442 * the file is either opened or mmaped 443 */ 444 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| 445 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| 446 CEPH_CAP_FILE_EXCL| 447 CEPH_CAP_FILE_LAZYIO)) || 448 mapping_mapped(inode->i_mapping) || 449 __ceph_caps_file_wanted(ci)) { 450 ci->i_truncate_pending++; 451 queue_trunc = 1; 452 } 453 } 454 } 455 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 456 ci->i_truncate_size != truncate_size) { 457 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 458 truncate_size); 459 ci->i_truncate_size = truncate_size; 460 } 461 return queue_trunc; 462 } 463 464 void ceph_fill_file_time(struct inode *inode, int issued, 465 u64 time_warp_seq, struct timespec *ctime, 466 struct timespec *mtime, struct timespec *atime) 467 { 468 struct ceph_inode_info *ci = ceph_inode(inode); 469 int warn = 0; 470 471 if (issued & (CEPH_CAP_FILE_EXCL| 472 CEPH_CAP_FILE_WR| 473 CEPH_CAP_FILE_BUFFER| 474 CEPH_CAP_AUTH_EXCL| 475 CEPH_CAP_XATTR_EXCL)) { 476 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 477 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 478 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 479 ctime->tv_sec, ctime->tv_nsec); 480 inode->i_ctime = *ctime; 481 } 482 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 483 /* the MDS did a utimes() */ 484 dout("mtime %ld.%09ld -> %ld.%09ld " 485 "tw %d -> %d\n", 486 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 487 mtime->tv_sec, mtime->tv_nsec, 488 ci->i_time_warp_seq, (int)time_warp_seq); 489 490 inode->i_mtime = *mtime; 491 inode->i_atime = *atime; 492 ci->i_time_warp_seq = time_warp_seq; 493 } else if (time_warp_seq == ci->i_time_warp_seq) { 494 /* nobody did utimes(); take the max */ 495 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 496 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 497 inode->i_mtime.tv_sec, 498 inode->i_mtime.tv_nsec, 499 mtime->tv_sec, mtime->tv_nsec); 500 inode->i_mtime = *mtime; 501 } 502 if (timespec_compare(atime, &inode->i_atime) > 0) { 503 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 504 inode->i_atime.tv_sec, 505 inode->i_atime.tv_nsec, 506 atime->tv_sec, atime->tv_nsec); 507 inode->i_atime = *atime; 508 } 509 } else if (issued & CEPH_CAP_FILE_EXCL) { 510 /* we did a utimes(); ignore mds values */ 511 } else { 512 warn = 1; 513 } 514 } else { 515 /* we have no write|excl caps; whatever the MDS says is true */ 516 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 517 inode->i_ctime = *ctime; 518 inode->i_mtime = *mtime; 519 inode->i_atime = *atime; 520 ci->i_time_warp_seq = time_warp_seq; 521 } else { 522 warn = 1; 523 } 524 } 525 if (warn) /* time_warp_seq shouldn't go backwards */ 526 dout("%p mds time_warp_seq %llu < %u\n", 527 inode, time_warp_seq, ci->i_time_warp_seq); 528 } 529 530 /* 531 * Populate an inode based on info from mds. May be called on new or 532 * existing inodes. 533 */ 534 static int fill_inode(struct inode *inode, 535 struct ceph_mds_reply_info_in *iinfo, 536 struct ceph_mds_reply_dirfrag *dirinfo, 537 struct ceph_mds_session *session, 538 unsigned long ttl_from, int cap_fmode, 539 struct ceph_cap_reservation *caps_reservation) 540 { 541 struct ceph_mds_reply_inode *info = iinfo->in; 542 struct ceph_inode_info *ci = ceph_inode(inode); 543 int i; 544 int issued, implemented; 545 struct timespec mtime, atime, ctime; 546 u32 nsplits; 547 struct ceph_buffer *xattr_blob = NULL; 548 int err = 0; 549 int queue_trunc = 0; 550 551 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 552 inode, ceph_vinop(inode), le64_to_cpu(info->version), 553 ci->i_version); 554 555 /* 556 * prealloc xattr data, if it looks like we'll need it. only 557 * if len > 4 (meaning there are actually xattrs; the first 4 558 * bytes are the xattr count). 559 */ 560 if (iinfo->xattr_len > 4) { 561 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 562 if (!xattr_blob) 563 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 564 iinfo->xattr_len); 565 } 566 567 spin_lock(&inode->i_lock); 568 569 /* 570 * provided version will be odd if inode value is projected, 571 * even if stable. skip the update if we have newer stable 572 * info (ours>=theirs, e.g. due to racing mds replies), unless 573 * we are getting projected (unstable) info (in which case the 574 * version is odd, and we want ours>theirs). 575 * us them 576 * 2 2 skip 577 * 3 2 skip 578 * 3 3 update 579 */ 580 if (le64_to_cpu(info->version) > 0 && 581 (ci->i_version & ~1) >= le64_to_cpu(info->version)) 582 goto no_change; 583 584 issued = __ceph_caps_issued(ci, &implemented); 585 issued |= implemented | __ceph_caps_dirty(ci); 586 587 /* update inode */ 588 ci->i_version = le64_to_cpu(info->version); 589 inode->i_version++; 590 inode->i_rdev = le32_to_cpu(info->rdev); 591 592 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 593 inode->i_mode = le32_to_cpu(info->mode); 594 inode->i_uid = le32_to_cpu(info->uid); 595 inode->i_gid = le32_to_cpu(info->gid); 596 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 597 inode->i_uid, inode->i_gid); 598 } 599 600 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 601 inode->i_nlink = le32_to_cpu(info->nlink); 602 603 /* be careful with mtime, atime, size */ 604 ceph_decode_timespec(&atime, &info->atime); 605 ceph_decode_timespec(&mtime, &info->mtime); 606 ceph_decode_timespec(&ctime, &info->ctime); 607 queue_trunc = ceph_fill_file_size(inode, issued, 608 le32_to_cpu(info->truncate_seq), 609 le64_to_cpu(info->truncate_size), 610 le64_to_cpu(info->size)); 611 ceph_fill_file_time(inode, issued, 612 le32_to_cpu(info->time_warp_seq), 613 &ctime, &mtime, &atime); 614 615 /* only update max_size on auth cap */ 616 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 617 ci->i_max_size != le64_to_cpu(info->max_size)) { 618 dout("max_size %lld -> %llu\n", ci->i_max_size, 619 le64_to_cpu(info->max_size)); 620 ci->i_max_size = le64_to_cpu(info->max_size); 621 } 622 623 ci->i_layout = info->layout; 624 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 625 626 /* xattrs */ 627 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 628 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 629 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 630 if (ci->i_xattrs.blob) 631 ceph_buffer_put(ci->i_xattrs.blob); 632 ci->i_xattrs.blob = xattr_blob; 633 if (xattr_blob) 634 memcpy(ci->i_xattrs.blob->vec.iov_base, 635 iinfo->xattr_data, iinfo->xattr_len); 636 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 637 xattr_blob = NULL; 638 } 639 640 inode->i_mapping->a_ops = &ceph_aops; 641 inode->i_mapping->backing_dev_info = 642 &ceph_sb_to_client(inode->i_sb)->backing_dev_info; 643 644 switch (inode->i_mode & S_IFMT) { 645 case S_IFIFO: 646 case S_IFBLK: 647 case S_IFCHR: 648 case S_IFSOCK: 649 init_special_inode(inode, inode->i_mode, inode->i_rdev); 650 inode->i_op = &ceph_file_iops; 651 break; 652 case S_IFREG: 653 inode->i_op = &ceph_file_iops; 654 inode->i_fop = &ceph_file_fops; 655 break; 656 case S_IFLNK: 657 inode->i_op = &ceph_symlink_iops; 658 if (!ci->i_symlink) { 659 int symlen = iinfo->symlink_len; 660 char *sym; 661 662 BUG_ON(symlen != inode->i_size); 663 spin_unlock(&inode->i_lock); 664 665 err = -ENOMEM; 666 sym = kmalloc(symlen+1, GFP_NOFS); 667 if (!sym) 668 goto out; 669 memcpy(sym, iinfo->symlink, symlen); 670 sym[symlen] = 0; 671 672 spin_lock(&inode->i_lock); 673 if (!ci->i_symlink) 674 ci->i_symlink = sym; 675 else 676 kfree(sym); /* lost a race */ 677 } 678 break; 679 case S_IFDIR: 680 inode->i_op = &ceph_dir_iops; 681 inode->i_fop = &ceph_dir_fops; 682 683 ci->i_files = le64_to_cpu(info->files); 684 ci->i_subdirs = le64_to_cpu(info->subdirs); 685 ci->i_rbytes = le64_to_cpu(info->rbytes); 686 ci->i_rfiles = le64_to_cpu(info->rfiles); 687 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 688 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 689 690 /* set dir completion flag? */ 691 if (ci->i_files == 0 && ci->i_subdirs == 0 && 692 ceph_snap(inode) == CEPH_NOSNAP && 693 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 694 (issued & CEPH_CAP_FILE_EXCL) == 0 && 695 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 696 dout(" marking %p complete (empty)\n", inode); 697 ci->i_ceph_flags |= CEPH_I_COMPLETE; 698 ci->i_max_offset = 2; 699 } 700 701 /* it may be better to set st_size in getattr instead? */ 702 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) 703 inode->i_size = ci->i_rbytes; 704 break; 705 default: 706 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 707 ceph_vinop(inode), inode->i_mode); 708 } 709 710 no_change: 711 spin_unlock(&inode->i_lock); 712 713 /* queue truncate if we saw i_size decrease */ 714 if (queue_trunc) 715 ceph_queue_vmtruncate(inode); 716 717 /* populate frag tree */ 718 /* FIXME: move me up, if/when version reflects fragtree changes */ 719 nsplits = le32_to_cpu(info->fragtree.nsplits); 720 mutex_lock(&ci->i_fragtree_mutex); 721 for (i = 0; i < nsplits; i++) { 722 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 723 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 724 725 if (IS_ERR(frag)) 726 continue; 727 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 728 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 729 } 730 mutex_unlock(&ci->i_fragtree_mutex); 731 732 /* were we issued a capability? */ 733 if (info->cap.caps) { 734 if (ceph_snap(inode) == CEPH_NOSNAP) { 735 ceph_add_cap(inode, session, 736 le64_to_cpu(info->cap.cap_id), 737 cap_fmode, 738 le32_to_cpu(info->cap.caps), 739 le32_to_cpu(info->cap.wanted), 740 le32_to_cpu(info->cap.seq), 741 le32_to_cpu(info->cap.mseq), 742 le64_to_cpu(info->cap.realm), 743 info->cap.flags, 744 caps_reservation); 745 } else { 746 spin_lock(&inode->i_lock); 747 dout(" %p got snap_caps %s\n", inode, 748 ceph_cap_string(le32_to_cpu(info->cap.caps))); 749 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 750 if (cap_fmode >= 0) 751 __ceph_get_fmode(ci, cap_fmode); 752 spin_unlock(&inode->i_lock); 753 } 754 } else if (cap_fmode >= 0) { 755 pr_warning("mds issued no caps on %llx.%llx\n", 756 ceph_vinop(inode)); 757 __ceph_get_fmode(ci, cap_fmode); 758 } 759 760 /* update delegation info? */ 761 if (dirinfo) 762 ceph_fill_dirfrag(inode, dirinfo); 763 764 err = 0; 765 766 out: 767 if (xattr_blob) 768 ceph_buffer_put(xattr_blob); 769 return err; 770 } 771 772 /* 773 * caller should hold session s_mutex. 774 */ 775 static void update_dentry_lease(struct dentry *dentry, 776 struct ceph_mds_reply_lease *lease, 777 struct ceph_mds_session *session, 778 unsigned long from_time) 779 { 780 struct ceph_dentry_info *di = ceph_dentry(dentry); 781 long unsigned duration = le32_to_cpu(lease->duration_ms); 782 long unsigned ttl = from_time + (duration * HZ) / 1000; 783 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 784 struct inode *dir; 785 786 /* only track leases on regular dentries */ 787 if (dentry->d_op != &ceph_dentry_ops) 788 return; 789 790 spin_lock(&dentry->d_lock); 791 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n", 792 dentry, le16_to_cpu(lease->mask), duration, ttl); 793 794 /* make lease_rdcache_gen match directory */ 795 dir = dentry->d_parent->d_inode; 796 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 797 798 if (lease->mask == 0) 799 goto out_unlock; 800 801 if (di->lease_gen == session->s_cap_gen && 802 time_before(ttl, dentry->d_time)) 803 goto out_unlock; /* we already have a newer lease. */ 804 805 if (di->lease_session && di->lease_session != session) 806 goto out_unlock; 807 808 ceph_dentry_lru_touch(dentry); 809 810 if (!di->lease_session) 811 di->lease_session = ceph_get_mds_session(session); 812 di->lease_gen = session->s_cap_gen; 813 di->lease_seq = le32_to_cpu(lease->seq); 814 di->lease_renew_after = half_ttl; 815 di->lease_renew_from = 0; 816 dentry->d_time = ttl; 817 out_unlock: 818 spin_unlock(&dentry->d_lock); 819 return; 820 } 821 822 /* 823 * Set dentry's directory position based on the current dir's max, and 824 * order it in d_subdirs, so that dcache_readdir behaves. 825 */ 826 static void ceph_set_dentry_offset(struct dentry *dn) 827 { 828 struct dentry *dir = dn->d_parent; 829 struct inode *inode = dn->d_parent->d_inode; 830 struct ceph_dentry_info *di; 831 832 BUG_ON(!inode); 833 834 di = ceph_dentry(dn); 835 836 spin_lock(&inode->i_lock); 837 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 838 spin_unlock(&inode->i_lock); 839 return; 840 } 841 di->offset = ceph_inode(inode)->i_max_offset++; 842 spin_unlock(&inode->i_lock); 843 844 spin_lock(&dcache_lock); 845 spin_lock(&dir->d_lock); 846 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 847 list_move(&dn->d_u.d_child, &dir->d_subdirs); 848 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 849 dn->d_u.d_child.prev, dn->d_u.d_child.next); 850 spin_unlock(&dn->d_lock); 851 spin_unlock(&dir->d_lock); 852 spin_unlock(&dcache_lock); 853 } 854 855 /* 856 * splice a dentry to an inode. 857 * caller must hold directory i_mutex for this to be safe. 858 * 859 * we will only rehash the resulting dentry if @prehash is 860 * true; @prehash will be set to false (for the benefit of 861 * the caller) if we fail. 862 */ 863 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 864 bool *prehash, bool set_offset) 865 { 866 struct dentry *realdn; 867 868 BUG_ON(dn->d_inode); 869 870 /* dn must be unhashed */ 871 if (!d_unhashed(dn)) 872 d_drop(dn); 873 realdn = d_materialise_unique(dn, in); 874 if (IS_ERR(realdn)) { 875 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 876 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 877 if (prehash) 878 *prehash = false; /* don't rehash on error */ 879 dn = realdn; /* note realdn contains the error */ 880 goto out; 881 } else if (realdn) { 882 dout("dn %p (%d) spliced with %p (%d) " 883 "inode %p ino %llx.%llx\n", 884 dn, dn->d_count, 885 realdn, realdn->d_count, 886 realdn->d_inode, ceph_vinop(realdn->d_inode)); 887 dput(dn); 888 dn = realdn; 889 } else { 890 BUG_ON(!ceph_dentry(dn)); 891 dout("dn %p attached to %p ino %llx.%llx\n", 892 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 893 } 894 if ((!prehash || *prehash) && d_unhashed(dn)) 895 d_rehash(dn); 896 if (set_offset) 897 ceph_set_dentry_offset(dn); 898 out: 899 return dn; 900 } 901 902 /* 903 * Incorporate results into the local cache. This is either just 904 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 905 * after a lookup). 906 * 907 * A reply may contain 908 * a directory inode along with a dentry. 909 * and/or a target inode 910 * 911 * Called with snap_rwsem (read). 912 */ 913 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 914 struct ceph_mds_session *session) 915 { 916 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 917 struct inode *in = NULL; 918 struct ceph_mds_reply_inode *ininfo; 919 struct ceph_vino vino; 920 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 921 int i = 0; 922 int err = 0; 923 924 dout("fill_trace %p is_dentry %d is_target %d\n", req, 925 rinfo->head->is_dentry, rinfo->head->is_target); 926 927 #if 0 928 /* 929 * Debugging hook: 930 * 931 * If we resend completed ops to a recovering mds, we get no 932 * trace. Since that is very rare, pretend this is the case 933 * to ensure the 'no trace' handlers in the callers behave. 934 * 935 * Fill in inodes unconditionally to avoid breaking cap 936 * invariants. 937 */ 938 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 939 pr_info("fill_trace faking empty trace on %lld %s\n", 940 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 941 if (rinfo->head->is_dentry) { 942 rinfo->head->is_dentry = 0; 943 err = fill_inode(req->r_locked_dir, 944 &rinfo->diri, rinfo->dirfrag, 945 session, req->r_request_started, -1); 946 } 947 if (rinfo->head->is_target) { 948 rinfo->head->is_target = 0; 949 ininfo = rinfo->targeti.in; 950 vino.ino = le64_to_cpu(ininfo->ino); 951 vino.snap = le64_to_cpu(ininfo->snapid); 952 in = ceph_get_inode(sb, vino); 953 err = fill_inode(in, &rinfo->targeti, NULL, 954 session, req->r_request_started, 955 req->r_fmode); 956 iput(in); 957 } 958 } 959 #endif 960 961 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 962 dout("fill_trace reply is empty!\n"); 963 if (rinfo->head->result == 0 && req->r_locked_dir) 964 ceph_invalidate_dir_request(req); 965 return 0; 966 } 967 968 if (rinfo->head->is_dentry) { 969 struct inode *dir = req->r_locked_dir; 970 971 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 972 session, req->r_request_started, -1, 973 &req->r_caps_reservation); 974 if (err < 0) 975 return err; 976 } 977 978 /* 979 * ignore null lease/binding on snapdir ENOENT, or else we 980 * will have trouble splicing in the virtual snapdir later 981 */ 982 if (rinfo->head->is_dentry && !req->r_aborted && 983 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 984 fsc->mount_options->snapdir_name, 985 req->r_dentry->d_name.len))) { 986 /* 987 * lookup link rename : null -> possibly existing inode 988 * mknod symlink mkdir : null -> new inode 989 * unlink : linked -> null 990 */ 991 struct inode *dir = req->r_locked_dir; 992 struct dentry *dn = req->r_dentry; 993 bool have_dir_cap, have_lease; 994 995 BUG_ON(!dn); 996 BUG_ON(!dir); 997 BUG_ON(dn->d_parent->d_inode != dir); 998 BUG_ON(ceph_ino(dir) != 999 le64_to_cpu(rinfo->diri.in->ino)); 1000 BUG_ON(ceph_snap(dir) != 1001 le64_to_cpu(rinfo->diri.in->snapid)); 1002 1003 /* do we have a lease on the whole dir? */ 1004 have_dir_cap = 1005 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1006 CEPH_CAP_FILE_SHARED); 1007 1008 /* do we have a dn lease? */ 1009 have_lease = have_dir_cap || 1010 (le16_to_cpu(rinfo->dlease->mask) & 1011 CEPH_LOCK_DN); 1012 1013 if (!have_lease) 1014 dout("fill_trace no dentry lease or dir cap\n"); 1015 1016 /* rename? */ 1017 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1018 dout(" src %p '%.*s' dst %p '%.*s'\n", 1019 req->r_old_dentry, 1020 req->r_old_dentry->d_name.len, 1021 req->r_old_dentry->d_name.name, 1022 dn, dn->d_name.len, dn->d_name.name); 1023 dout("fill_trace doing d_move %p -> %p\n", 1024 req->r_old_dentry, dn); 1025 1026 /* d_move screws up d_subdirs order */ 1027 ceph_i_clear(dir, CEPH_I_COMPLETE); 1028 1029 d_move(req->r_old_dentry, dn); 1030 dout(" src %p '%.*s' dst %p '%.*s'\n", 1031 req->r_old_dentry, 1032 req->r_old_dentry->d_name.len, 1033 req->r_old_dentry->d_name.name, 1034 dn, dn->d_name.len, dn->d_name.name); 1035 1036 /* ensure target dentry is invalidated, despite 1037 rehashing bug in vfs_rename_dir */ 1038 ceph_invalidate_dentry_lease(dn); 1039 1040 /* take overwritten dentry's readdir offset */ 1041 dout("dn %p gets %p offset %lld (old offset %lld)\n", 1042 req->r_old_dentry, dn, ceph_dentry(dn)->offset, 1043 ceph_dentry(req->r_old_dentry)->offset); 1044 ceph_dentry(req->r_old_dentry)->offset = 1045 ceph_dentry(dn)->offset; 1046 1047 dn = req->r_old_dentry; /* use old_dentry */ 1048 in = dn->d_inode; 1049 } 1050 1051 /* null dentry? */ 1052 if (!rinfo->head->is_target) { 1053 dout("fill_trace null dentry\n"); 1054 if (dn->d_inode) { 1055 dout("d_delete %p\n", dn); 1056 d_delete(dn); 1057 } else { 1058 dout("d_instantiate %p NULL\n", dn); 1059 d_instantiate(dn, NULL); 1060 if (have_lease && d_unhashed(dn)) 1061 d_rehash(dn); 1062 update_dentry_lease(dn, rinfo->dlease, 1063 session, 1064 req->r_request_started); 1065 } 1066 goto done; 1067 } 1068 1069 /* attach proper inode */ 1070 ininfo = rinfo->targeti.in; 1071 vino.ino = le64_to_cpu(ininfo->ino); 1072 vino.snap = le64_to_cpu(ininfo->snapid); 1073 in = dn->d_inode; 1074 if (!in) { 1075 in = ceph_get_inode(sb, vino); 1076 if (IS_ERR(in)) { 1077 pr_err("fill_trace bad get_inode " 1078 "%llx.%llx\n", vino.ino, vino.snap); 1079 err = PTR_ERR(in); 1080 d_delete(dn); 1081 goto done; 1082 } 1083 dn = splice_dentry(dn, in, &have_lease, true); 1084 if (IS_ERR(dn)) { 1085 err = PTR_ERR(dn); 1086 goto done; 1087 } 1088 req->r_dentry = dn; /* may have spliced */ 1089 igrab(in); 1090 } else if (ceph_ino(in) == vino.ino && 1091 ceph_snap(in) == vino.snap) { 1092 igrab(in); 1093 } else { 1094 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1095 dn, in, ceph_ino(in), ceph_snap(in), 1096 vino.ino, vino.snap); 1097 have_lease = false; 1098 in = NULL; 1099 } 1100 1101 if (have_lease) 1102 update_dentry_lease(dn, rinfo->dlease, session, 1103 req->r_request_started); 1104 dout(" final dn %p\n", dn); 1105 i++; 1106 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1107 req->r_op == CEPH_MDS_OP_MKSNAP) { 1108 struct dentry *dn = req->r_dentry; 1109 1110 /* fill out a snapdir LOOKUPSNAP dentry */ 1111 BUG_ON(!dn); 1112 BUG_ON(!req->r_locked_dir); 1113 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1114 ininfo = rinfo->targeti.in; 1115 vino.ino = le64_to_cpu(ininfo->ino); 1116 vino.snap = le64_to_cpu(ininfo->snapid); 1117 in = ceph_get_inode(sb, vino); 1118 if (IS_ERR(in)) { 1119 pr_err("fill_inode get_inode badness %llx.%llx\n", 1120 vino.ino, vino.snap); 1121 err = PTR_ERR(in); 1122 d_delete(dn); 1123 goto done; 1124 } 1125 dout(" linking snapped dir %p to dn %p\n", in, dn); 1126 dn = splice_dentry(dn, in, NULL, true); 1127 if (IS_ERR(dn)) { 1128 err = PTR_ERR(dn); 1129 goto done; 1130 } 1131 req->r_dentry = dn; /* may have spliced */ 1132 igrab(in); 1133 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1134 } 1135 1136 if (rinfo->head->is_target) { 1137 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1138 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1139 1140 if (in == NULL || ceph_ino(in) != vino.ino || 1141 ceph_snap(in) != vino.snap) { 1142 in = ceph_get_inode(sb, vino); 1143 if (IS_ERR(in)) { 1144 err = PTR_ERR(in); 1145 goto done; 1146 } 1147 } 1148 req->r_target_inode = in; 1149 1150 err = fill_inode(in, 1151 &rinfo->targeti, NULL, 1152 session, req->r_request_started, 1153 (le32_to_cpu(rinfo->head->result) == 0) ? 1154 req->r_fmode : -1, 1155 &req->r_caps_reservation); 1156 if (err < 0) { 1157 pr_err("fill_inode badness %p %llx.%llx\n", 1158 in, ceph_vinop(in)); 1159 goto done; 1160 } 1161 } 1162 1163 done: 1164 dout("fill_trace done err=%d\n", err); 1165 return err; 1166 } 1167 1168 /* 1169 * Prepopulate our cache with readdir results, leases, etc. 1170 */ 1171 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1172 struct ceph_mds_session *session) 1173 { 1174 struct dentry *parent = req->r_dentry; 1175 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1176 struct qstr dname; 1177 struct dentry *dn; 1178 struct inode *in; 1179 int err = 0, i; 1180 struct inode *snapdir = NULL; 1181 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1182 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1183 struct ceph_dentry_info *di; 1184 1185 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1186 snapdir = ceph_get_snapdir(parent->d_inode); 1187 parent = d_find_alias(snapdir); 1188 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1189 rinfo->dir_nr, parent); 1190 } else { 1191 dout("readdir_prepopulate %d items under dn %p\n", 1192 rinfo->dir_nr, parent); 1193 if (rinfo->dir_dir) 1194 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1195 } 1196 1197 for (i = 0; i < rinfo->dir_nr; i++) { 1198 struct ceph_vino vino; 1199 1200 dname.name = rinfo->dir_dname[i]; 1201 dname.len = rinfo->dir_dname_len[i]; 1202 dname.hash = full_name_hash(dname.name, dname.len); 1203 1204 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1205 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1206 1207 retry_lookup: 1208 dn = d_lookup(parent, &dname); 1209 dout("d_lookup on parent=%p name=%.*s got %p\n", 1210 parent, dname.len, dname.name, dn); 1211 1212 if (!dn) { 1213 dn = d_alloc(parent, &dname); 1214 dout("d_alloc %p '%.*s' = %p\n", parent, 1215 dname.len, dname.name, dn); 1216 if (dn == NULL) { 1217 dout("d_alloc badness\n"); 1218 err = -ENOMEM; 1219 goto out; 1220 } 1221 err = ceph_init_dentry(dn); 1222 if (err < 0) { 1223 dput(dn); 1224 goto out; 1225 } 1226 } else if (dn->d_inode && 1227 (ceph_ino(dn->d_inode) != vino.ino || 1228 ceph_snap(dn->d_inode) != vino.snap)) { 1229 dout(" dn %p points to wrong inode %p\n", 1230 dn, dn->d_inode); 1231 d_delete(dn); 1232 dput(dn); 1233 goto retry_lookup; 1234 } else { 1235 /* reorder parent's d_subdirs */ 1236 spin_lock(&dcache_lock); 1237 spin_lock(&parent->d_lock); 1238 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 1239 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1240 spin_unlock(&dn->d_lock); 1241 spin_unlock(&parent->d_lock); 1242 spin_unlock(&dcache_lock); 1243 } 1244 1245 di = dn->d_fsdata; 1246 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1247 1248 /* inode */ 1249 if (dn->d_inode) { 1250 in = dn->d_inode; 1251 } else { 1252 in = ceph_get_inode(parent->d_sb, vino); 1253 if (IS_ERR(in)) { 1254 dout("new_inode badness\n"); 1255 d_delete(dn); 1256 dput(dn); 1257 err = PTR_ERR(in); 1258 goto out; 1259 } 1260 dn = splice_dentry(dn, in, NULL, false); 1261 if (IS_ERR(dn)) 1262 dn = NULL; 1263 } 1264 1265 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1266 req->r_request_started, -1, 1267 &req->r_caps_reservation) < 0) { 1268 pr_err("fill_inode badness on %p\n", in); 1269 goto next_item; 1270 } 1271 if (dn) 1272 update_dentry_lease(dn, rinfo->dir_dlease[i], 1273 req->r_session, 1274 req->r_request_started); 1275 next_item: 1276 if (dn) 1277 dput(dn); 1278 } 1279 req->r_did_prepopulate = true; 1280 1281 out: 1282 if (snapdir) { 1283 iput(snapdir); 1284 dput(parent); 1285 } 1286 dout("readdir_prepopulate done\n"); 1287 return err; 1288 } 1289 1290 int ceph_inode_set_size(struct inode *inode, loff_t size) 1291 { 1292 struct ceph_inode_info *ci = ceph_inode(inode); 1293 int ret = 0; 1294 1295 spin_lock(&inode->i_lock); 1296 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1297 inode->i_size = size; 1298 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1299 1300 /* tell the MDS if we are approaching max_size */ 1301 if ((size << 1) >= ci->i_max_size && 1302 (ci->i_reported_size << 1) < ci->i_max_size) 1303 ret = 1; 1304 1305 spin_unlock(&inode->i_lock); 1306 return ret; 1307 } 1308 1309 /* 1310 * Write back inode data in a worker thread. (This can't be done 1311 * in the message handler context.) 1312 */ 1313 void ceph_queue_writeback(struct inode *inode) 1314 { 1315 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1316 &ceph_inode(inode)->i_wb_work)) { 1317 dout("ceph_queue_writeback %p\n", inode); 1318 igrab(inode); 1319 } else { 1320 dout("ceph_queue_writeback %p failed\n", inode); 1321 } 1322 } 1323 1324 static void ceph_writeback_work(struct work_struct *work) 1325 { 1326 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1327 i_wb_work); 1328 struct inode *inode = &ci->vfs_inode; 1329 1330 dout("writeback %p\n", inode); 1331 filemap_fdatawrite(&inode->i_data); 1332 iput(inode); 1333 } 1334 1335 /* 1336 * queue an async invalidation 1337 */ 1338 void ceph_queue_invalidate(struct inode *inode) 1339 { 1340 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1341 &ceph_inode(inode)->i_pg_inv_work)) { 1342 dout("ceph_queue_invalidate %p\n", inode); 1343 igrab(inode); 1344 } else { 1345 dout("ceph_queue_invalidate %p failed\n", inode); 1346 } 1347 } 1348 1349 /* 1350 * invalidate any pages that are not dirty or under writeback. this 1351 * includes pages that are clean and mapped. 1352 */ 1353 static void ceph_invalidate_nondirty_pages(struct address_space *mapping) 1354 { 1355 struct pagevec pvec; 1356 pgoff_t next = 0; 1357 int i; 1358 1359 pagevec_init(&pvec, 0); 1360 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1361 for (i = 0; i < pagevec_count(&pvec); i++) { 1362 struct page *page = pvec.pages[i]; 1363 pgoff_t index; 1364 int skip_page = 1365 (PageDirty(page) || PageWriteback(page)); 1366 1367 if (!skip_page) 1368 skip_page = !trylock_page(page); 1369 1370 /* 1371 * We really shouldn't be looking at the ->index of an 1372 * unlocked page. But we're not allowed to lock these 1373 * pages. So we rely upon nobody altering the ->index 1374 * of this (pinned-by-us) page. 1375 */ 1376 index = page->index; 1377 if (index > next) 1378 next = index; 1379 next++; 1380 1381 if (skip_page) 1382 continue; 1383 1384 generic_error_remove_page(mapping, page); 1385 unlock_page(page); 1386 } 1387 pagevec_release(&pvec); 1388 cond_resched(); 1389 } 1390 } 1391 1392 /* 1393 * Invalidate inode pages in a worker thread. (This can't be done 1394 * in the message handler context.) 1395 */ 1396 static void ceph_invalidate_work(struct work_struct *work) 1397 { 1398 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1399 i_pg_inv_work); 1400 struct inode *inode = &ci->vfs_inode; 1401 u32 orig_gen; 1402 int check = 0; 1403 1404 spin_lock(&inode->i_lock); 1405 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1406 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1407 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1408 /* nevermind! */ 1409 spin_unlock(&inode->i_lock); 1410 goto out; 1411 } 1412 orig_gen = ci->i_rdcache_gen; 1413 spin_unlock(&inode->i_lock); 1414 1415 ceph_invalidate_nondirty_pages(inode->i_mapping); 1416 1417 spin_lock(&inode->i_lock); 1418 if (orig_gen == ci->i_rdcache_gen && 1419 orig_gen == ci->i_rdcache_revoking) { 1420 dout("invalidate_pages %p gen %d successful\n", inode, 1421 ci->i_rdcache_gen); 1422 ci->i_rdcache_revoking--; 1423 check = 1; 1424 } else { 1425 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1426 inode, orig_gen, ci->i_rdcache_gen, 1427 ci->i_rdcache_revoking); 1428 } 1429 spin_unlock(&inode->i_lock); 1430 1431 if (check) 1432 ceph_check_caps(ci, 0, NULL); 1433 out: 1434 iput(inode); 1435 } 1436 1437 1438 /* 1439 * called by trunc_wq; take i_mutex ourselves 1440 * 1441 * We also truncate in a separate thread as well. 1442 */ 1443 static void ceph_vmtruncate_work(struct work_struct *work) 1444 { 1445 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1446 i_vmtruncate_work); 1447 struct inode *inode = &ci->vfs_inode; 1448 1449 dout("vmtruncate_work %p\n", inode); 1450 mutex_lock(&inode->i_mutex); 1451 __ceph_do_pending_vmtruncate(inode); 1452 mutex_unlock(&inode->i_mutex); 1453 iput(inode); 1454 } 1455 1456 /* 1457 * Queue an async vmtruncate. If we fail to queue work, we will handle 1458 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1459 */ 1460 void ceph_queue_vmtruncate(struct inode *inode) 1461 { 1462 struct ceph_inode_info *ci = ceph_inode(inode); 1463 1464 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1465 &ci->i_vmtruncate_work)) { 1466 dout("ceph_queue_vmtruncate %p\n", inode); 1467 igrab(inode); 1468 } else { 1469 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1470 inode, ci->i_truncate_pending); 1471 } 1472 } 1473 1474 /* 1475 * called with i_mutex held. 1476 * 1477 * Make sure any pending truncation is applied before doing anything 1478 * that may depend on it. 1479 */ 1480 void __ceph_do_pending_vmtruncate(struct inode *inode) 1481 { 1482 struct ceph_inode_info *ci = ceph_inode(inode); 1483 u64 to; 1484 int wrbuffer_refs, wake = 0; 1485 1486 retry: 1487 spin_lock(&inode->i_lock); 1488 if (ci->i_truncate_pending == 0) { 1489 dout("__do_pending_vmtruncate %p none pending\n", inode); 1490 spin_unlock(&inode->i_lock); 1491 return; 1492 } 1493 1494 /* 1495 * make sure any dirty snapped pages are flushed before we 1496 * possibly truncate them.. so write AND block! 1497 */ 1498 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1499 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1500 inode); 1501 spin_unlock(&inode->i_lock); 1502 filemap_write_and_wait_range(&inode->i_data, 0, 1503 inode->i_sb->s_maxbytes); 1504 goto retry; 1505 } 1506 1507 to = ci->i_truncate_size; 1508 wrbuffer_refs = ci->i_wrbuffer_ref; 1509 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1510 ci->i_truncate_pending, to); 1511 spin_unlock(&inode->i_lock); 1512 1513 truncate_inode_pages(inode->i_mapping, to); 1514 1515 spin_lock(&inode->i_lock); 1516 ci->i_truncate_pending--; 1517 if (ci->i_truncate_pending == 0) 1518 wake = 1; 1519 spin_unlock(&inode->i_lock); 1520 1521 if (wrbuffer_refs == 0) 1522 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1523 if (wake) 1524 wake_up_all(&ci->i_cap_wq); 1525 } 1526 1527 1528 /* 1529 * symlinks 1530 */ 1531 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1532 { 1533 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1534 nd_set_link(nd, ci->i_symlink); 1535 return NULL; 1536 } 1537 1538 static const struct inode_operations ceph_symlink_iops = { 1539 .readlink = generic_readlink, 1540 .follow_link = ceph_sym_follow_link, 1541 }; 1542 1543 /* 1544 * setattr 1545 */ 1546 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1547 { 1548 struct inode *inode = dentry->d_inode; 1549 struct ceph_inode_info *ci = ceph_inode(inode); 1550 struct inode *parent_inode = dentry->d_parent->d_inode; 1551 const unsigned int ia_valid = attr->ia_valid; 1552 struct ceph_mds_request *req; 1553 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; 1554 int issued; 1555 int release = 0, dirtied = 0; 1556 int mask = 0; 1557 int err = 0; 1558 1559 if (ceph_snap(inode) != CEPH_NOSNAP) 1560 return -EROFS; 1561 1562 __ceph_do_pending_vmtruncate(inode); 1563 1564 err = inode_change_ok(inode, attr); 1565 if (err != 0) 1566 return err; 1567 1568 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1569 USE_AUTH_MDS); 1570 if (IS_ERR(req)) 1571 return PTR_ERR(req); 1572 1573 spin_lock(&inode->i_lock); 1574 issued = __ceph_caps_issued(ci, NULL); 1575 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1576 1577 if (ia_valid & ATTR_UID) { 1578 dout("setattr %p uid %d -> %d\n", inode, 1579 inode->i_uid, attr->ia_uid); 1580 if (issued & CEPH_CAP_AUTH_EXCL) { 1581 inode->i_uid = attr->ia_uid; 1582 dirtied |= CEPH_CAP_AUTH_EXCL; 1583 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1584 attr->ia_uid != inode->i_uid) { 1585 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); 1586 mask |= CEPH_SETATTR_UID; 1587 release |= CEPH_CAP_AUTH_SHARED; 1588 } 1589 } 1590 if (ia_valid & ATTR_GID) { 1591 dout("setattr %p gid %d -> %d\n", inode, 1592 inode->i_gid, attr->ia_gid); 1593 if (issued & CEPH_CAP_AUTH_EXCL) { 1594 inode->i_gid = attr->ia_gid; 1595 dirtied |= CEPH_CAP_AUTH_EXCL; 1596 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1597 attr->ia_gid != inode->i_gid) { 1598 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); 1599 mask |= CEPH_SETATTR_GID; 1600 release |= CEPH_CAP_AUTH_SHARED; 1601 } 1602 } 1603 if (ia_valid & ATTR_MODE) { 1604 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1605 attr->ia_mode); 1606 if (issued & CEPH_CAP_AUTH_EXCL) { 1607 inode->i_mode = attr->ia_mode; 1608 dirtied |= CEPH_CAP_AUTH_EXCL; 1609 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1610 attr->ia_mode != inode->i_mode) { 1611 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1612 mask |= CEPH_SETATTR_MODE; 1613 release |= CEPH_CAP_AUTH_SHARED; 1614 } 1615 } 1616 1617 if (ia_valid & ATTR_ATIME) { 1618 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1619 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1620 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1621 if (issued & CEPH_CAP_FILE_EXCL) { 1622 ci->i_time_warp_seq++; 1623 inode->i_atime = attr->ia_atime; 1624 dirtied |= CEPH_CAP_FILE_EXCL; 1625 } else if ((issued & CEPH_CAP_FILE_WR) && 1626 timespec_compare(&inode->i_atime, 1627 &attr->ia_atime) < 0) { 1628 inode->i_atime = attr->ia_atime; 1629 dirtied |= CEPH_CAP_FILE_WR; 1630 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1631 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1632 ceph_encode_timespec(&req->r_args.setattr.atime, 1633 &attr->ia_atime); 1634 mask |= CEPH_SETATTR_ATIME; 1635 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1636 CEPH_CAP_FILE_WR; 1637 } 1638 } 1639 if (ia_valid & ATTR_MTIME) { 1640 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1641 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1642 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1643 if (issued & CEPH_CAP_FILE_EXCL) { 1644 ci->i_time_warp_seq++; 1645 inode->i_mtime = attr->ia_mtime; 1646 dirtied |= CEPH_CAP_FILE_EXCL; 1647 } else if ((issued & CEPH_CAP_FILE_WR) && 1648 timespec_compare(&inode->i_mtime, 1649 &attr->ia_mtime) < 0) { 1650 inode->i_mtime = attr->ia_mtime; 1651 dirtied |= CEPH_CAP_FILE_WR; 1652 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1653 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1654 ceph_encode_timespec(&req->r_args.setattr.mtime, 1655 &attr->ia_mtime); 1656 mask |= CEPH_SETATTR_MTIME; 1657 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1658 CEPH_CAP_FILE_WR; 1659 } 1660 } 1661 if (ia_valid & ATTR_SIZE) { 1662 dout("setattr %p size %lld -> %lld\n", inode, 1663 inode->i_size, attr->ia_size); 1664 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1665 err = -EINVAL; 1666 goto out; 1667 } 1668 if ((issued & CEPH_CAP_FILE_EXCL) && 1669 attr->ia_size > inode->i_size) { 1670 inode->i_size = attr->ia_size; 1671 inode->i_blocks = 1672 (attr->ia_size + (1 << 9) - 1) >> 9; 1673 inode->i_ctime = attr->ia_ctime; 1674 ci->i_reported_size = attr->ia_size; 1675 dirtied |= CEPH_CAP_FILE_EXCL; 1676 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1677 attr->ia_size != inode->i_size) { 1678 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1679 req->r_args.setattr.old_size = 1680 cpu_to_le64(inode->i_size); 1681 mask |= CEPH_SETATTR_SIZE; 1682 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1683 CEPH_CAP_FILE_WR; 1684 } 1685 } 1686 1687 /* these do nothing */ 1688 if (ia_valid & ATTR_CTIME) { 1689 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1690 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1691 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1692 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1693 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1694 only ? "ctime only" : "ignored"); 1695 inode->i_ctime = attr->ia_ctime; 1696 if (only) { 1697 /* 1698 * if kernel wants to dirty ctime but nothing else, 1699 * we need to choose a cap to dirty under, or do 1700 * a almost-no-op setattr 1701 */ 1702 if (issued & CEPH_CAP_AUTH_EXCL) 1703 dirtied |= CEPH_CAP_AUTH_EXCL; 1704 else if (issued & CEPH_CAP_FILE_EXCL) 1705 dirtied |= CEPH_CAP_FILE_EXCL; 1706 else if (issued & CEPH_CAP_XATTR_EXCL) 1707 dirtied |= CEPH_CAP_XATTR_EXCL; 1708 else 1709 mask |= CEPH_SETATTR_CTIME; 1710 } 1711 } 1712 if (ia_valid & ATTR_FILE) 1713 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1714 1715 if (dirtied) { 1716 __ceph_mark_dirty_caps(ci, dirtied); 1717 inode->i_ctime = CURRENT_TIME; 1718 } 1719 1720 release &= issued; 1721 spin_unlock(&inode->i_lock); 1722 1723 if (mask) { 1724 req->r_inode = igrab(inode); 1725 req->r_inode_drop = release; 1726 req->r_args.setattr.mask = cpu_to_le32(mask); 1727 req->r_num_caps = 1; 1728 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1729 } 1730 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1731 ceph_cap_string(dirtied), mask); 1732 1733 ceph_mdsc_put_request(req); 1734 __ceph_do_pending_vmtruncate(inode); 1735 return err; 1736 out: 1737 spin_unlock(&inode->i_lock); 1738 ceph_mdsc_put_request(req); 1739 return err; 1740 } 1741 1742 /* 1743 * Verify that we have a lease on the given mask. If not, 1744 * do a getattr against an mds. 1745 */ 1746 int ceph_do_getattr(struct inode *inode, int mask) 1747 { 1748 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 1749 struct ceph_mds_client *mdsc = fsc->mdsc; 1750 struct ceph_mds_request *req; 1751 int err; 1752 1753 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1754 dout("do_getattr inode %p SNAPDIR\n", inode); 1755 return 0; 1756 } 1757 1758 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode); 1759 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1760 return 0; 1761 1762 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1763 if (IS_ERR(req)) 1764 return PTR_ERR(req); 1765 req->r_inode = igrab(inode); 1766 req->r_num_caps = 1; 1767 req->r_args.getattr.mask = cpu_to_le32(mask); 1768 err = ceph_mdsc_do_request(mdsc, NULL, req); 1769 ceph_mdsc_put_request(req); 1770 dout("do_getattr result=%d\n", err); 1771 return err; 1772 } 1773 1774 1775 /* 1776 * Check inode permissions. We verify we have a valid value for 1777 * the AUTH cap, then call the generic handler. 1778 */ 1779 int ceph_permission(struct inode *inode, int mask) 1780 { 1781 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1782 1783 if (!err) 1784 err = generic_permission(inode, mask, NULL); 1785 return err; 1786 } 1787 1788 /* 1789 * Get all attributes. Hopefully somedata we'll have a statlite() 1790 * and can limit the fields we require to be accurate. 1791 */ 1792 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1793 struct kstat *stat) 1794 { 1795 struct inode *inode = dentry->d_inode; 1796 struct ceph_inode_info *ci = ceph_inode(inode); 1797 int err; 1798 1799 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1800 if (!err) { 1801 generic_fillattr(inode, stat); 1802 stat->ino = inode->i_ino; 1803 if (ceph_snap(inode) != CEPH_NOSNAP) 1804 stat->dev = ceph_snap(inode); 1805 else 1806 stat->dev = 0; 1807 if (S_ISDIR(inode->i_mode)) { 1808 stat->size = ci->i_rbytes; 1809 stat->blocks = 0; 1810 stat->blksize = 65536; 1811 } 1812 } 1813 return err; 1814 } 1815