1 #include "ceph_debug.h" 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/smp_lock.h> 6 #include <linux/slab.h> 7 #include <linux/string.h> 8 #include <linux/uaccess.h> 9 #include <linux/kernel.h> 10 #include <linux/namei.h> 11 #include <linux/writeback.h> 12 #include <linux/vmalloc.h> 13 #include <linux/pagevec.h> 14 15 #include "super.h" 16 #include "decode.h" 17 18 /* 19 * Ceph inode operations 20 * 21 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 22 * setattr, etc.), xattr helpers, and helpers for assimilating 23 * metadata returned by the MDS into our cache. 24 * 25 * Also define helpers for doing asynchronous writeback, invalidation, 26 * and truncation for the benefit of those who can't afford to block 27 * (typically because they are in the message handler path). 28 */ 29 30 static const struct inode_operations ceph_symlink_iops; 31 32 static void ceph_invalidate_work(struct work_struct *work); 33 static void ceph_writeback_work(struct work_struct *work); 34 static void ceph_vmtruncate_work(struct work_struct *work); 35 36 /* 37 * find or create an inode, given the ceph ino number 38 */ 39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 40 { 41 struct inode *inode; 42 ino_t t = ceph_vino_to_ino(vino); 43 44 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 45 if (inode == NULL) 46 return ERR_PTR(-ENOMEM); 47 if (inode->i_state & I_NEW) { 48 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 49 inode, ceph_vinop(inode), (u64)inode->i_ino); 50 unlock_new_inode(inode); 51 } 52 53 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 54 vino.snap, inode); 55 return inode; 56 } 57 58 /* 59 * get/constuct snapdir inode for a given directory 60 */ 61 struct inode *ceph_get_snapdir(struct inode *parent) 62 { 63 struct ceph_vino vino = { 64 .ino = ceph_ino(parent), 65 .snap = CEPH_SNAPDIR, 66 }; 67 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 68 struct ceph_inode_info *ci = ceph_inode(inode); 69 70 BUG_ON(!S_ISDIR(parent->i_mode)); 71 if (IS_ERR(inode)) 72 return inode; 73 inode->i_mode = parent->i_mode; 74 inode->i_uid = parent->i_uid; 75 inode->i_gid = parent->i_gid; 76 inode->i_op = &ceph_dir_iops; 77 inode->i_fop = &ceph_dir_fops; 78 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 79 ci->i_rbytes = 0; 80 return inode; 81 } 82 83 const struct inode_operations ceph_file_iops = { 84 .permission = ceph_permission, 85 .setattr = ceph_setattr, 86 .getattr = ceph_getattr, 87 .setxattr = ceph_setxattr, 88 .getxattr = ceph_getxattr, 89 .listxattr = ceph_listxattr, 90 .removexattr = ceph_removexattr, 91 }; 92 93 94 /* 95 * We use a 'frag tree' to keep track of the MDS's directory fragments 96 * for a given inode (usually there is just a single fragment). We 97 * need to know when a child frag is delegated to a new MDS, or when 98 * it is flagged as replicated, so we can direct our requests 99 * accordingly. 100 */ 101 102 /* 103 * find/create a frag in the tree 104 */ 105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 106 u32 f) 107 { 108 struct rb_node **p; 109 struct rb_node *parent = NULL; 110 struct ceph_inode_frag *frag; 111 int c; 112 113 p = &ci->i_fragtree.rb_node; 114 while (*p) { 115 parent = *p; 116 frag = rb_entry(parent, struct ceph_inode_frag, node); 117 c = ceph_frag_compare(f, frag->frag); 118 if (c < 0) 119 p = &(*p)->rb_left; 120 else if (c > 0) 121 p = &(*p)->rb_right; 122 else 123 return frag; 124 } 125 126 frag = kmalloc(sizeof(*frag), GFP_NOFS); 127 if (!frag) { 128 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 129 "frag %x\n", &ci->vfs_inode, 130 ceph_vinop(&ci->vfs_inode), f); 131 return ERR_PTR(-ENOMEM); 132 } 133 frag->frag = f; 134 frag->split_by = 0; 135 frag->mds = -1; 136 frag->ndist = 0; 137 138 rb_link_node(&frag->node, parent, p); 139 rb_insert_color(&frag->node, &ci->i_fragtree); 140 141 dout("get_or_create_frag added %llx.%llx frag %x\n", 142 ceph_vinop(&ci->vfs_inode), f); 143 return frag; 144 } 145 146 /* 147 * find a specific frag @f 148 */ 149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 150 { 151 struct rb_node *n = ci->i_fragtree.rb_node; 152 153 while (n) { 154 struct ceph_inode_frag *frag = 155 rb_entry(n, struct ceph_inode_frag, node); 156 int c = ceph_frag_compare(f, frag->frag); 157 if (c < 0) 158 n = n->rb_left; 159 else if (c > 0) 160 n = n->rb_right; 161 else 162 return frag; 163 } 164 return NULL; 165 } 166 167 /* 168 * Choose frag containing the given value @v. If @pfrag is 169 * specified, copy the frag delegation info to the caller if 170 * it is present. 171 */ 172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 173 struct ceph_inode_frag *pfrag, 174 int *found) 175 { 176 u32 t = ceph_frag_make(0, 0); 177 struct ceph_inode_frag *frag; 178 unsigned nway, i; 179 u32 n; 180 181 if (found) 182 *found = 0; 183 184 mutex_lock(&ci->i_fragtree_mutex); 185 while (1) { 186 WARN_ON(!ceph_frag_contains_value(t, v)); 187 frag = __ceph_find_frag(ci, t); 188 if (!frag) 189 break; /* t is a leaf */ 190 if (frag->split_by == 0) { 191 if (pfrag) 192 memcpy(pfrag, frag, sizeof(*pfrag)); 193 if (found) 194 *found = 1; 195 break; 196 } 197 198 /* choose child */ 199 nway = 1 << frag->split_by; 200 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 201 frag->split_by, nway); 202 for (i = 0; i < nway; i++) { 203 n = ceph_frag_make_child(t, frag->split_by, i); 204 if (ceph_frag_contains_value(n, v)) { 205 t = n; 206 break; 207 } 208 } 209 BUG_ON(i == nway); 210 } 211 dout("choose_frag(%x) = %x\n", v, t); 212 213 mutex_unlock(&ci->i_fragtree_mutex); 214 return t; 215 } 216 217 /* 218 * Process dirfrag (delegation) info from the mds. Include leaf 219 * fragment in tree ONLY if ndist > 0. Otherwise, only 220 * branches/splits are included in i_fragtree) 221 */ 222 static int ceph_fill_dirfrag(struct inode *inode, 223 struct ceph_mds_reply_dirfrag *dirinfo) 224 { 225 struct ceph_inode_info *ci = ceph_inode(inode); 226 struct ceph_inode_frag *frag; 227 u32 id = le32_to_cpu(dirinfo->frag); 228 int mds = le32_to_cpu(dirinfo->auth); 229 int ndist = le32_to_cpu(dirinfo->ndist); 230 int i; 231 int err = 0; 232 233 mutex_lock(&ci->i_fragtree_mutex); 234 if (ndist == 0) { 235 /* no delegation info needed. */ 236 frag = __ceph_find_frag(ci, id); 237 if (!frag) 238 goto out; 239 if (frag->split_by == 0) { 240 /* tree leaf, remove */ 241 dout("fill_dirfrag removed %llx.%llx frag %x" 242 " (no ref)\n", ceph_vinop(inode), id); 243 rb_erase(&frag->node, &ci->i_fragtree); 244 kfree(frag); 245 } else { 246 /* tree branch, keep and clear */ 247 dout("fill_dirfrag cleared %llx.%llx frag %x" 248 " referral\n", ceph_vinop(inode), id); 249 frag->mds = -1; 250 frag->ndist = 0; 251 } 252 goto out; 253 } 254 255 256 /* find/add this frag to store mds delegation info */ 257 frag = __get_or_create_frag(ci, id); 258 if (IS_ERR(frag)) { 259 /* this is not the end of the world; we can continue 260 with bad/inaccurate delegation info */ 261 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 262 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 263 err = -ENOMEM; 264 goto out; 265 } 266 267 frag->mds = mds; 268 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 269 for (i = 0; i < frag->ndist; i++) 270 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 271 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 272 ceph_vinop(inode), frag->frag, frag->ndist); 273 274 out: 275 mutex_unlock(&ci->i_fragtree_mutex); 276 return err; 277 } 278 279 280 /* 281 * initialize a newly allocated inode. 282 */ 283 struct inode *ceph_alloc_inode(struct super_block *sb) 284 { 285 struct ceph_inode_info *ci; 286 int i; 287 288 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 289 if (!ci) 290 return NULL; 291 292 dout("alloc_inode %p\n", &ci->vfs_inode); 293 294 ci->i_version = 0; 295 ci->i_time_warp_seq = 0; 296 ci->i_ceph_flags = 0; 297 ci->i_release_count = 0; 298 ci->i_symlink = NULL; 299 300 ci->i_fragtree = RB_ROOT; 301 mutex_init(&ci->i_fragtree_mutex); 302 303 ci->i_xattrs.blob = NULL; 304 ci->i_xattrs.prealloc_blob = NULL; 305 ci->i_xattrs.dirty = false; 306 ci->i_xattrs.index = RB_ROOT; 307 ci->i_xattrs.count = 0; 308 ci->i_xattrs.names_size = 0; 309 ci->i_xattrs.vals_size = 0; 310 ci->i_xattrs.version = 0; 311 ci->i_xattrs.index_version = 0; 312 313 ci->i_caps = RB_ROOT; 314 ci->i_auth_cap = NULL; 315 ci->i_dirty_caps = 0; 316 ci->i_flushing_caps = 0; 317 INIT_LIST_HEAD(&ci->i_dirty_item); 318 INIT_LIST_HEAD(&ci->i_flushing_item); 319 ci->i_cap_flush_seq = 0; 320 ci->i_cap_flush_last_tid = 0; 321 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 322 init_waitqueue_head(&ci->i_cap_wq); 323 ci->i_hold_caps_min = 0; 324 ci->i_hold_caps_max = 0; 325 INIT_LIST_HEAD(&ci->i_cap_delay_list); 326 ci->i_cap_exporting_mds = 0; 327 ci->i_cap_exporting_mseq = 0; 328 ci->i_cap_exporting_issued = 0; 329 INIT_LIST_HEAD(&ci->i_cap_snaps); 330 ci->i_head_snapc = NULL; 331 ci->i_snap_caps = 0; 332 333 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 334 ci->i_nr_by_mode[i] = 0; 335 336 ci->i_truncate_seq = 0; 337 ci->i_truncate_size = 0; 338 ci->i_truncate_pending = 0; 339 340 ci->i_max_size = 0; 341 ci->i_reported_size = 0; 342 ci->i_wanted_max_size = 0; 343 ci->i_requested_max_size = 0; 344 345 ci->i_pin_ref = 0; 346 ci->i_rd_ref = 0; 347 ci->i_rdcache_ref = 0; 348 ci->i_wr_ref = 0; 349 ci->i_wrbuffer_ref = 0; 350 ci->i_wrbuffer_ref_head = 0; 351 ci->i_shared_gen = 0; 352 ci->i_rdcache_gen = 0; 353 ci->i_rdcache_revoking = 0; 354 355 INIT_LIST_HEAD(&ci->i_unsafe_writes); 356 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 357 spin_lock_init(&ci->i_unsafe_lock); 358 359 ci->i_snap_realm = NULL; 360 INIT_LIST_HEAD(&ci->i_snap_realm_item); 361 INIT_LIST_HEAD(&ci->i_snap_flush_item); 362 363 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 364 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 365 366 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 367 368 return &ci->vfs_inode; 369 } 370 371 void ceph_destroy_inode(struct inode *inode) 372 { 373 struct ceph_inode_info *ci = ceph_inode(inode); 374 struct ceph_inode_frag *frag; 375 struct rb_node *n; 376 377 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 378 379 ceph_queue_caps_release(inode); 380 381 /* 382 * we may still have a snap_realm reference if there are stray 383 * caps in i_cap_exporting_issued or i_snap_caps. 384 */ 385 if (ci->i_snap_realm) { 386 struct ceph_mds_client *mdsc = 387 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 388 struct ceph_snap_realm *realm = ci->i_snap_realm; 389 390 dout(" dropping residual ref to snap realm %p\n", realm); 391 spin_lock(&realm->inodes_with_caps_lock); 392 list_del_init(&ci->i_snap_realm_item); 393 spin_unlock(&realm->inodes_with_caps_lock); 394 ceph_put_snap_realm(mdsc, realm); 395 } 396 397 kfree(ci->i_symlink); 398 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 399 frag = rb_entry(n, struct ceph_inode_frag, node); 400 rb_erase(n, &ci->i_fragtree); 401 kfree(frag); 402 } 403 404 __ceph_destroy_xattrs(ci); 405 if (ci->i_xattrs.blob) 406 ceph_buffer_put(ci->i_xattrs.blob); 407 if (ci->i_xattrs.prealloc_blob) 408 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 409 410 kmem_cache_free(ceph_inode_cachep, ci); 411 } 412 413 414 /* 415 * Helpers to fill in size, ctime, mtime, and atime. We have to be 416 * careful because either the client or MDS may have more up to date 417 * info, depending on which capabilities are held, and whether 418 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 419 * and size are monotonically increasing, except when utimes() or 420 * truncate() increments the corresponding _seq values.) 421 */ 422 int ceph_fill_file_size(struct inode *inode, int issued, 423 u32 truncate_seq, u64 truncate_size, u64 size) 424 { 425 struct ceph_inode_info *ci = ceph_inode(inode); 426 int queue_trunc = 0; 427 428 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 429 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 430 dout("size %lld -> %llu\n", inode->i_size, size); 431 inode->i_size = size; 432 inode->i_blocks = (size + (1<<9) - 1) >> 9; 433 ci->i_reported_size = size; 434 if (truncate_seq != ci->i_truncate_seq) { 435 dout("truncate_seq %u -> %u\n", 436 ci->i_truncate_seq, truncate_seq); 437 ci->i_truncate_seq = truncate_seq; 438 /* 439 * If we hold relevant caps, or in the case where we're 440 * not the only client referencing this file and we 441 * don't hold those caps, then we need to check whether 442 * the file is either opened or mmaped 443 */ 444 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| 445 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| 446 CEPH_CAP_FILE_EXCL| 447 CEPH_CAP_FILE_LAZYIO)) || 448 mapping_mapped(inode->i_mapping) || 449 __ceph_caps_file_wanted(ci)) { 450 ci->i_truncate_pending++; 451 queue_trunc = 1; 452 } 453 } 454 } 455 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 456 ci->i_truncate_size != truncate_size) { 457 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 458 truncate_size); 459 ci->i_truncate_size = truncate_size; 460 } 461 return queue_trunc; 462 } 463 464 void ceph_fill_file_time(struct inode *inode, int issued, 465 u64 time_warp_seq, struct timespec *ctime, 466 struct timespec *mtime, struct timespec *atime) 467 { 468 struct ceph_inode_info *ci = ceph_inode(inode); 469 int warn = 0; 470 471 if (issued & (CEPH_CAP_FILE_EXCL| 472 CEPH_CAP_FILE_WR| 473 CEPH_CAP_FILE_BUFFER)) { 474 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 475 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 476 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 477 ctime->tv_sec, ctime->tv_nsec); 478 inode->i_ctime = *ctime; 479 } 480 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 481 /* the MDS did a utimes() */ 482 dout("mtime %ld.%09ld -> %ld.%09ld " 483 "tw %d -> %d\n", 484 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 485 mtime->tv_sec, mtime->tv_nsec, 486 ci->i_time_warp_seq, (int)time_warp_seq); 487 488 inode->i_mtime = *mtime; 489 inode->i_atime = *atime; 490 ci->i_time_warp_seq = time_warp_seq; 491 } else if (time_warp_seq == ci->i_time_warp_seq) { 492 /* nobody did utimes(); take the max */ 493 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 494 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 495 inode->i_mtime.tv_sec, 496 inode->i_mtime.tv_nsec, 497 mtime->tv_sec, mtime->tv_nsec); 498 inode->i_mtime = *mtime; 499 } 500 if (timespec_compare(atime, &inode->i_atime) > 0) { 501 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 502 inode->i_atime.tv_sec, 503 inode->i_atime.tv_nsec, 504 atime->tv_sec, atime->tv_nsec); 505 inode->i_atime = *atime; 506 } 507 } else if (issued & CEPH_CAP_FILE_EXCL) { 508 /* we did a utimes(); ignore mds values */ 509 } else { 510 warn = 1; 511 } 512 } else { 513 /* we have no write caps; whatever the MDS says is true */ 514 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 515 inode->i_ctime = *ctime; 516 inode->i_mtime = *mtime; 517 inode->i_atime = *atime; 518 ci->i_time_warp_seq = time_warp_seq; 519 } else { 520 warn = 1; 521 } 522 } 523 if (warn) /* time_warp_seq shouldn't go backwards */ 524 dout("%p mds time_warp_seq %llu < %u\n", 525 inode, time_warp_seq, ci->i_time_warp_seq); 526 } 527 528 /* 529 * Populate an inode based on info from mds. May be called on new or 530 * existing inodes. 531 */ 532 static int fill_inode(struct inode *inode, 533 struct ceph_mds_reply_info_in *iinfo, 534 struct ceph_mds_reply_dirfrag *dirinfo, 535 struct ceph_mds_session *session, 536 unsigned long ttl_from, int cap_fmode, 537 struct ceph_cap_reservation *caps_reservation) 538 { 539 struct ceph_mds_reply_inode *info = iinfo->in; 540 struct ceph_inode_info *ci = ceph_inode(inode); 541 int i; 542 int issued, implemented; 543 struct timespec mtime, atime, ctime; 544 u32 nsplits; 545 struct ceph_buffer *xattr_blob = NULL; 546 int err = 0; 547 int queue_trunc = 0; 548 549 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 550 inode, ceph_vinop(inode), le64_to_cpu(info->version), 551 ci->i_version); 552 553 /* 554 * prealloc xattr data, if it looks like we'll need it. only 555 * if len > 4 (meaning there are actually xattrs; the first 4 556 * bytes are the xattr count). 557 */ 558 if (iinfo->xattr_len > 4) { 559 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 560 if (!xattr_blob) 561 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 562 iinfo->xattr_len); 563 } 564 565 spin_lock(&inode->i_lock); 566 567 /* 568 * provided version will be odd if inode value is projected, 569 * even if stable. skip the update if we have a newer info 570 * (e.g., due to inode info racing form multiple MDSs), or if 571 * we are getting projected (unstable) inode info. 572 */ 573 if (le64_to_cpu(info->version) > 0 && 574 (ci->i_version & ~1) > le64_to_cpu(info->version)) 575 goto no_change; 576 577 issued = __ceph_caps_issued(ci, &implemented); 578 issued |= implemented | __ceph_caps_dirty(ci); 579 580 /* update inode */ 581 ci->i_version = le64_to_cpu(info->version); 582 inode->i_version++; 583 inode->i_rdev = le32_to_cpu(info->rdev); 584 585 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 586 inode->i_mode = le32_to_cpu(info->mode); 587 inode->i_uid = le32_to_cpu(info->uid); 588 inode->i_gid = le32_to_cpu(info->gid); 589 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 590 inode->i_uid, inode->i_gid); 591 } 592 593 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 594 inode->i_nlink = le32_to_cpu(info->nlink); 595 596 /* be careful with mtime, atime, size */ 597 ceph_decode_timespec(&atime, &info->atime); 598 ceph_decode_timespec(&mtime, &info->mtime); 599 ceph_decode_timespec(&ctime, &info->ctime); 600 queue_trunc = ceph_fill_file_size(inode, issued, 601 le32_to_cpu(info->truncate_seq), 602 le64_to_cpu(info->truncate_size), 603 le64_to_cpu(info->size)); 604 ceph_fill_file_time(inode, issued, 605 le32_to_cpu(info->time_warp_seq), 606 &ctime, &mtime, &atime); 607 608 ci->i_max_size = le64_to_cpu(info->max_size); 609 ci->i_layout = info->layout; 610 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 611 612 /* xattrs */ 613 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 614 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 615 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 616 if (ci->i_xattrs.blob) 617 ceph_buffer_put(ci->i_xattrs.blob); 618 ci->i_xattrs.blob = xattr_blob; 619 if (xattr_blob) 620 memcpy(ci->i_xattrs.blob->vec.iov_base, 621 iinfo->xattr_data, iinfo->xattr_len); 622 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 623 xattr_blob = NULL; 624 } 625 626 inode->i_mapping->a_ops = &ceph_aops; 627 inode->i_mapping->backing_dev_info = 628 &ceph_sb_to_client(inode->i_sb)->backing_dev_info; 629 630 switch (inode->i_mode & S_IFMT) { 631 case S_IFIFO: 632 case S_IFBLK: 633 case S_IFCHR: 634 case S_IFSOCK: 635 init_special_inode(inode, inode->i_mode, inode->i_rdev); 636 inode->i_op = &ceph_file_iops; 637 break; 638 case S_IFREG: 639 inode->i_op = &ceph_file_iops; 640 inode->i_fop = &ceph_file_fops; 641 break; 642 case S_IFLNK: 643 inode->i_op = &ceph_symlink_iops; 644 if (!ci->i_symlink) { 645 int symlen = iinfo->symlink_len; 646 char *sym; 647 648 BUG_ON(symlen != inode->i_size); 649 spin_unlock(&inode->i_lock); 650 651 err = -ENOMEM; 652 sym = kmalloc(symlen+1, GFP_NOFS); 653 if (!sym) 654 goto out; 655 memcpy(sym, iinfo->symlink, symlen); 656 sym[symlen] = 0; 657 658 spin_lock(&inode->i_lock); 659 if (!ci->i_symlink) 660 ci->i_symlink = sym; 661 else 662 kfree(sym); /* lost a race */ 663 } 664 break; 665 case S_IFDIR: 666 inode->i_op = &ceph_dir_iops; 667 inode->i_fop = &ceph_dir_fops; 668 669 ci->i_files = le64_to_cpu(info->files); 670 ci->i_subdirs = le64_to_cpu(info->subdirs); 671 ci->i_rbytes = le64_to_cpu(info->rbytes); 672 ci->i_rfiles = le64_to_cpu(info->rfiles); 673 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 674 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 675 676 /* set dir completion flag? */ 677 if (ci->i_files == 0 && ci->i_subdirs == 0 && 678 ceph_snap(inode) == CEPH_NOSNAP && 679 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 680 (issued & CEPH_CAP_FILE_EXCL) == 0 && 681 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 682 dout(" marking %p complete (empty)\n", inode); 683 ci->i_ceph_flags |= CEPH_I_COMPLETE; 684 ci->i_max_offset = 2; 685 } 686 687 /* it may be better to set st_size in getattr instead? */ 688 if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) 689 inode->i_size = ci->i_rbytes; 690 break; 691 default: 692 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 693 ceph_vinop(inode), inode->i_mode); 694 } 695 696 no_change: 697 spin_unlock(&inode->i_lock); 698 699 /* queue truncate if we saw i_size decrease */ 700 if (queue_trunc) 701 ceph_queue_vmtruncate(inode); 702 703 /* populate frag tree */ 704 /* FIXME: move me up, if/when version reflects fragtree changes */ 705 nsplits = le32_to_cpu(info->fragtree.nsplits); 706 mutex_lock(&ci->i_fragtree_mutex); 707 for (i = 0; i < nsplits; i++) { 708 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 709 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 710 711 if (IS_ERR(frag)) 712 continue; 713 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 714 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 715 } 716 mutex_unlock(&ci->i_fragtree_mutex); 717 718 /* were we issued a capability? */ 719 if (info->cap.caps) { 720 if (ceph_snap(inode) == CEPH_NOSNAP) { 721 ceph_add_cap(inode, session, 722 le64_to_cpu(info->cap.cap_id), 723 cap_fmode, 724 le32_to_cpu(info->cap.caps), 725 le32_to_cpu(info->cap.wanted), 726 le32_to_cpu(info->cap.seq), 727 le32_to_cpu(info->cap.mseq), 728 le64_to_cpu(info->cap.realm), 729 info->cap.flags, 730 caps_reservation); 731 } else { 732 spin_lock(&inode->i_lock); 733 dout(" %p got snap_caps %s\n", inode, 734 ceph_cap_string(le32_to_cpu(info->cap.caps))); 735 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 736 if (cap_fmode >= 0) 737 __ceph_get_fmode(ci, cap_fmode); 738 spin_unlock(&inode->i_lock); 739 } 740 } else if (cap_fmode >= 0) { 741 pr_warning("mds issued no caps on %llx.%llx\n", 742 ceph_vinop(inode)); 743 __ceph_get_fmode(ci, cap_fmode); 744 } 745 746 /* update delegation info? */ 747 if (dirinfo) 748 ceph_fill_dirfrag(inode, dirinfo); 749 750 err = 0; 751 752 out: 753 if (xattr_blob) 754 ceph_buffer_put(xattr_blob); 755 return err; 756 } 757 758 /* 759 * caller should hold session s_mutex. 760 */ 761 static void update_dentry_lease(struct dentry *dentry, 762 struct ceph_mds_reply_lease *lease, 763 struct ceph_mds_session *session, 764 unsigned long from_time) 765 { 766 struct ceph_dentry_info *di = ceph_dentry(dentry); 767 long unsigned duration = le32_to_cpu(lease->duration_ms); 768 long unsigned ttl = from_time + (duration * HZ) / 1000; 769 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 770 struct inode *dir; 771 772 /* only track leases on regular dentries */ 773 if (dentry->d_op != &ceph_dentry_ops) 774 return; 775 776 spin_lock(&dentry->d_lock); 777 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n", 778 dentry, le16_to_cpu(lease->mask), duration, ttl); 779 780 /* make lease_rdcache_gen match directory */ 781 dir = dentry->d_parent->d_inode; 782 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 783 784 if (lease->mask == 0) 785 goto out_unlock; 786 787 if (di->lease_gen == session->s_cap_gen && 788 time_before(ttl, dentry->d_time)) 789 goto out_unlock; /* we already have a newer lease. */ 790 791 if (di->lease_session && di->lease_session != session) 792 goto out_unlock; 793 794 ceph_dentry_lru_touch(dentry); 795 796 if (!di->lease_session) 797 di->lease_session = ceph_get_mds_session(session); 798 di->lease_gen = session->s_cap_gen; 799 di->lease_seq = le32_to_cpu(lease->seq); 800 di->lease_renew_after = half_ttl; 801 di->lease_renew_from = 0; 802 dentry->d_time = ttl; 803 out_unlock: 804 spin_unlock(&dentry->d_lock); 805 return; 806 } 807 808 /* 809 * Set dentry's directory position based on the current dir's max, and 810 * order it in d_subdirs, so that dcache_readdir behaves. 811 */ 812 static void ceph_set_dentry_offset(struct dentry *dn) 813 { 814 struct dentry *dir = dn->d_parent; 815 struct inode *inode = dn->d_parent->d_inode; 816 struct ceph_dentry_info *di; 817 818 BUG_ON(!inode); 819 820 di = ceph_dentry(dn); 821 822 spin_lock(&inode->i_lock); 823 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 824 spin_unlock(&inode->i_lock); 825 return; 826 } 827 di->offset = ceph_inode(inode)->i_max_offset++; 828 spin_unlock(&inode->i_lock); 829 830 spin_lock(&dcache_lock); 831 spin_lock(&dn->d_lock); 832 list_move(&dn->d_u.d_child, &dir->d_subdirs); 833 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 834 dn->d_u.d_child.prev, dn->d_u.d_child.next); 835 spin_unlock(&dn->d_lock); 836 spin_unlock(&dcache_lock); 837 } 838 839 /* 840 * splice a dentry to an inode. 841 * caller must hold directory i_mutex for this to be safe. 842 * 843 * we will only rehash the resulting dentry if @prehash is 844 * true; @prehash will be set to false (for the benefit of 845 * the caller) if we fail. 846 */ 847 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 848 bool *prehash) 849 { 850 struct dentry *realdn; 851 852 BUG_ON(dn->d_inode); 853 854 /* dn must be unhashed */ 855 if (!d_unhashed(dn)) 856 d_drop(dn); 857 realdn = d_materialise_unique(dn, in); 858 if (IS_ERR(realdn)) { 859 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 860 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 861 if (prehash) 862 *prehash = false; /* don't rehash on error */ 863 dn = realdn; /* note realdn contains the error */ 864 goto out; 865 } else if (realdn) { 866 dout("dn %p (%d) spliced with %p (%d) " 867 "inode %p ino %llx.%llx\n", 868 dn, atomic_read(&dn->d_count), 869 realdn, atomic_read(&realdn->d_count), 870 realdn->d_inode, ceph_vinop(realdn->d_inode)); 871 dput(dn); 872 dn = realdn; 873 } else { 874 BUG_ON(!ceph_dentry(dn)); 875 dout("dn %p attached to %p ino %llx.%llx\n", 876 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 877 } 878 if ((!prehash || *prehash) && d_unhashed(dn)) 879 d_rehash(dn); 880 ceph_set_dentry_offset(dn); 881 out: 882 return dn; 883 } 884 885 /* 886 * Incorporate results into the local cache. This is either just 887 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 888 * after a lookup). 889 * 890 * A reply may contain 891 * a directory inode along with a dentry. 892 * and/or a target inode 893 * 894 * Called with snap_rwsem (read). 895 */ 896 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 897 struct ceph_mds_session *session) 898 { 899 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 900 struct inode *in = NULL; 901 struct ceph_mds_reply_inode *ininfo; 902 struct ceph_vino vino; 903 struct ceph_client *client = ceph_sb_to_client(sb); 904 int i = 0; 905 int err = 0; 906 907 dout("fill_trace %p is_dentry %d is_target %d\n", req, 908 rinfo->head->is_dentry, rinfo->head->is_target); 909 910 #if 0 911 /* 912 * Debugging hook: 913 * 914 * If we resend completed ops to a recovering mds, we get no 915 * trace. Since that is very rare, pretend this is the case 916 * to ensure the 'no trace' handlers in the callers behave. 917 * 918 * Fill in inodes unconditionally to avoid breaking cap 919 * invariants. 920 */ 921 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 922 pr_info("fill_trace faking empty trace on %lld %s\n", 923 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 924 if (rinfo->head->is_dentry) { 925 rinfo->head->is_dentry = 0; 926 err = fill_inode(req->r_locked_dir, 927 &rinfo->diri, rinfo->dirfrag, 928 session, req->r_request_started, -1); 929 } 930 if (rinfo->head->is_target) { 931 rinfo->head->is_target = 0; 932 ininfo = rinfo->targeti.in; 933 vino.ino = le64_to_cpu(ininfo->ino); 934 vino.snap = le64_to_cpu(ininfo->snapid); 935 in = ceph_get_inode(sb, vino); 936 err = fill_inode(in, &rinfo->targeti, NULL, 937 session, req->r_request_started, 938 req->r_fmode); 939 iput(in); 940 } 941 } 942 #endif 943 944 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 945 dout("fill_trace reply is empty!\n"); 946 if (rinfo->head->result == 0 && req->r_locked_dir) 947 ceph_invalidate_dir_request(req); 948 return 0; 949 } 950 951 if (rinfo->head->is_dentry) { 952 struct inode *dir = req->r_locked_dir; 953 954 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 955 session, req->r_request_started, -1, 956 &req->r_caps_reservation); 957 if (err < 0) 958 return err; 959 } 960 961 /* 962 * ignore null lease/binding on snapdir ENOENT, or else we 963 * will have trouble splicing in the virtual snapdir later 964 */ 965 if (rinfo->head->is_dentry && !req->r_aborted && 966 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 967 client->mount_args->snapdir_name, 968 req->r_dentry->d_name.len))) { 969 /* 970 * lookup link rename : null -> possibly existing inode 971 * mknod symlink mkdir : null -> new inode 972 * unlink : linked -> null 973 */ 974 struct inode *dir = req->r_locked_dir; 975 struct dentry *dn = req->r_dentry; 976 bool have_dir_cap, have_lease; 977 978 BUG_ON(!dn); 979 BUG_ON(!dir); 980 BUG_ON(dn->d_parent->d_inode != dir); 981 BUG_ON(ceph_ino(dir) != 982 le64_to_cpu(rinfo->diri.in->ino)); 983 BUG_ON(ceph_snap(dir) != 984 le64_to_cpu(rinfo->diri.in->snapid)); 985 986 /* do we have a lease on the whole dir? */ 987 have_dir_cap = 988 (le32_to_cpu(rinfo->diri.in->cap.caps) & 989 CEPH_CAP_FILE_SHARED); 990 991 /* do we have a dn lease? */ 992 have_lease = have_dir_cap || 993 (le16_to_cpu(rinfo->dlease->mask) & 994 CEPH_LOCK_DN); 995 996 if (!have_lease) 997 dout("fill_trace no dentry lease or dir cap\n"); 998 999 /* rename? */ 1000 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1001 dout(" src %p '%.*s' dst %p '%.*s'\n", 1002 req->r_old_dentry, 1003 req->r_old_dentry->d_name.len, 1004 req->r_old_dentry->d_name.name, 1005 dn, dn->d_name.len, dn->d_name.name); 1006 dout("fill_trace doing d_move %p -> %p\n", 1007 req->r_old_dentry, dn); 1008 1009 /* d_move screws up d_subdirs order */ 1010 ceph_i_clear(dir, CEPH_I_COMPLETE); 1011 1012 d_move(req->r_old_dentry, dn); 1013 dout(" src %p '%.*s' dst %p '%.*s'\n", 1014 req->r_old_dentry, 1015 req->r_old_dentry->d_name.len, 1016 req->r_old_dentry->d_name.name, 1017 dn, dn->d_name.len, dn->d_name.name); 1018 1019 /* ensure target dentry is invalidated, despite 1020 rehashing bug in vfs_rename_dir */ 1021 ceph_invalidate_dentry_lease(dn); 1022 1023 /* take overwritten dentry's readdir offset */ 1024 dout("dn %p gets %p offset %lld (old offset %lld)\n", 1025 req->r_old_dentry, dn, ceph_dentry(dn)->offset, 1026 ceph_dentry(req->r_old_dentry)->offset); 1027 ceph_dentry(req->r_old_dentry)->offset = 1028 ceph_dentry(dn)->offset; 1029 1030 dn = req->r_old_dentry; /* use old_dentry */ 1031 in = dn->d_inode; 1032 } 1033 1034 /* null dentry? */ 1035 if (!rinfo->head->is_target) { 1036 dout("fill_trace null dentry\n"); 1037 if (dn->d_inode) { 1038 dout("d_delete %p\n", dn); 1039 d_delete(dn); 1040 } else { 1041 dout("d_instantiate %p NULL\n", dn); 1042 d_instantiate(dn, NULL); 1043 if (have_lease && d_unhashed(dn)) 1044 d_rehash(dn); 1045 update_dentry_lease(dn, rinfo->dlease, 1046 session, 1047 req->r_request_started); 1048 } 1049 goto done; 1050 } 1051 1052 /* attach proper inode */ 1053 ininfo = rinfo->targeti.in; 1054 vino.ino = le64_to_cpu(ininfo->ino); 1055 vino.snap = le64_to_cpu(ininfo->snapid); 1056 if (!dn->d_inode) { 1057 in = ceph_get_inode(sb, vino); 1058 if (IS_ERR(in)) { 1059 pr_err("fill_trace bad get_inode " 1060 "%llx.%llx\n", vino.ino, vino.snap); 1061 err = PTR_ERR(in); 1062 d_delete(dn); 1063 goto done; 1064 } 1065 dn = splice_dentry(dn, in, &have_lease); 1066 if (IS_ERR(dn)) { 1067 err = PTR_ERR(dn); 1068 goto done; 1069 } 1070 req->r_dentry = dn; /* may have spliced */ 1071 igrab(in); 1072 } else if (ceph_ino(in) == vino.ino && 1073 ceph_snap(in) == vino.snap) { 1074 igrab(in); 1075 } else { 1076 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1077 dn, in, ceph_ino(in), ceph_snap(in), 1078 vino.ino, vino.snap); 1079 have_lease = false; 1080 in = NULL; 1081 } 1082 1083 if (have_lease) 1084 update_dentry_lease(dn, rinfo->dlease, session, 1085 req->r_request_started); 1086 dout(" final dn %p\n", dn); 1087 i++; 1088 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1089 req->r_op == CEPH_MDS_OP_MKSNAP) { 1090 struct dentry *dn = req->r_dentry; 1091 1092 /* fill out a snapdir LOOKUPSNAP dentry */ 1093 BUG_ON(!dn); 1094 BUG_ON(!req->r_locked_dir); 1095 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1096 ininfo = rinfo->targeti.in; 1097 vino.ino = le64_to_cpu(ininfo->ino); 1098 vino.snap = le64_to_cpu(ininfo->snapid); 1099 in = ceph_get_inode(sb, vino); 1100 if (IS_ERR(in)) { 1101 pr_err("fill_inode get_inode badness %llx.%llx\n", 1102 vino.ino, vino.snap); 1103 err = PTR_ERR(in); 1104 d_delete(dn); 1105 goto done; 1106 } 1107 dout(" linking snapped dir %p to dn %p\n", in, dn); 1108 dn = splice_dentry(dn, in, NULL); 1109 if (IS_ERR(dn)) { 1110 err = PTR_ERR(dn); 1111 goto done; 1112 } 1113 req->r_dentry = dn; /* may have spliced */ 1114 igrab(in); 1115 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1116 } 1117 1118 if (rinfo->head->is_target) { 1119 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1120 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1121 1122 if (in == NULL || ceph_ino(in) != vino.ino || 1123 ceph_snap(in) != vino.snap) { 1124 in = ceph_get_inode(sb, vino); 1125 if (IS_ERR(in)) { 1126 err = PTR_ERR(in); 1127 goto done; 1128 } 1129 } 1130 req->r_target_inode = in; 1131 1132 err = fill_inode(in, 1133 &rinfo->targeti, NULL, 1134 session, req->r_request_started, 1135 (le32_to_cpu(rinfo->head->result) == 0) ? 1136 req->r_fmode : -1, 1137 &req->r_caps_reservation); 1138 if (err < 0) { 1139 pr_err("fill_inode badness %p %llx.%llx\n", 1140 in, ceph_vinop(in)); 1141 goto done; 1142 } 1143 } 1144 1145 done: 1146 dout("fill_trace done err=%d\n", err); 1147 return err; 1148 } 1149 1150 /* 1151 * Prepopulate our cache with readdir results, leases, etc. 1152 */ 1153 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1154 struct ceph_mds_session *session) 1155 { 1156 struct dentry *parent = req->r_dentry; 1157 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1158 struct qstr dname; 1159 struct dentry *dn; 1160 struct inode *in; 1161 int err = 0, i; 1162 struct inode *snapdir = NULL; 1163 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1164 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1165 struct ceph_dentry_info *di; 1166 1167 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1168 snapdir = ceph_get_snapdir(parent->d_inode); 1169 parent = d_find_alias(snapdir); 1170 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1171 rinfo->dir_nr, parent); 1172 } else { 1173 dout("readdir_prepopulate %d items under dn %p\n", 1174 rinfo->dir_nr, parent); 1175 if (rinfo->dir_dir) 1176 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1177 } 1178 1179 for (i = 0; i < rinfo->dir_nr; i++) { 1180 struct ceph_vino vino; 1181 1182 dname.name = rinfo->dir_dname[i]; 1183 dname.len = rinfo->dir_dname_len[i]; 1184 dname.hash = full_name_hash(dname.name, dname.len); 1185 1186 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1187 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1188 1189 retry_lookup: 1190 dn = d_lookup(parent, &dname); 1191 dout("d_lookup on parent=%p name=%.*s got %p\n", 1192 parent, dname.len, dname.name, dn); 1193 1194 if (!dn) { 1195 dn = d_alloc(parent, &dname); 1196 dout("d_alloc %p '%.*s' = %p\n", parent, 1197 dname.len, dname.name, dn); 1198 if (dn == NULL) { 1199 dout("d_alloc badness\n"); 1200 err = -ENOMEM; 1201 goto out; 1202 } 1203 err = ceph_init_dentry(dn); 1204 if (err < 0) { 1205 dput(dn); 1206 goto out; 1207 } 1208 } else if (dn->d_inode && 1209 (ceph_ino(dn->d_inode) != vino.ino || 1210 ceph_snap(dn->d_inode) != vino.snap)) { 1211 dout(" dn %p points to wrong inode %p\n", 1212 dn, dn->d_inode); 1213 d_delete(dn); 1214 dput(dn); 1215 goto retry_lookup; 1216 } else { 1217 /* reorder parent's d_subdirs */ 1218 spin_lock(&dcache_lock); 1219 spin_lock(&dn->d_lock); 1220 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1221 spin_unlock(&dn->d_lock); 1222 spin_unlock(&dcache_lock); 1223 } 1224 1225 di = dn->d_fsdata; 1226 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1227 1228 /* inode */ 1229 if (dn->d_inode) { 1230 in = dn->d_inode; 1231 } else { 1232 in = ceph_get_inode(parent->d_sb, vino); 1233 if (IS_ERR(in)) { 1234 dout("new_inode badness\n"); 1235 d_delete(dn); 1236 dput(dn); 1237 err = PTR_ERR(in); 1238 goto out; 1239 } 1240 dn = splice_dentry(dn, in, NULL); 1241 if (IS_ERR(dn)) 1242 dn = NULL; 1243 } 1244 1245 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1246 req->r_request_started, -1, 1247 &req->r_caps_reservation) < 0) { 1248 pr_err("fill_inode badness on %p\n", in); 1249 goto next_item; 1250 } 1251 if (dn) 1252 update_dentry_lease(dn, rinfo->dir_dlease[i], 1253 req->r_session, 1254 req->r_request_started); 1255 next_item: 1256 if (dn) 1257 dput(dn); 1258 } 1259 req->r_did_prepopulate = true; 1260 1261 out: 1262 if (snapdir) { 1263 iput(snapdir); 1264 dput(parent); 1265 } 1266 dout("readdir_prepopulate done\n"); 1267 return err; 1268 } 1269 1270 int ceph_inode_set_size(struct inode *inode, loff_t size) 1271 { 1272 struct ceph_inode_info *ci = ceph_inode(inode); 1273 int ret = 0; 1274 1275 spin_lock(&inode->i_lock); 1276 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1277 inode->i_size = size; 1278 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1279 1280 /* tell the MDS if we are approaching max_size */ 1281 if ((size << 1) >= ci->i_max_size && 1282 (ci->i_reported_size << 1) < ci->i_max_size) 1283 ret = 1; 1284 1285 spin_unlock(&inode->i_lock); 1286 return ret; 1287 } 1288 1289 /* 1290 * Write back inode data in a worker thread. (This can't be done 1291 * in the message handler context.) 1292 */ 1293 void ceph_queue_writeback(struct inode *inode) 1294 { 1295 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1296 &ceph_inode(inode)->i_wb_work)) { 1297 dout("ceph_queue_writeback %p\n", inode); 1298 igrab(inode); 1299 } else { 1300 dout("ceph_queue_writeback %p failed\n", inode); 1301 } 1302 } 1303 1304 static void ceph_writeback_work(struct work_struct *work) 1305 { 1306 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1307 i_wb_work); 1308 struct inode *inode = &ci->vfs_inode; 1309 1310 dout("writeback %p\n", inode); 1311 filemap_fdatawrite(&inode->i_data); 1312 iput(inode); 1313 } 1314 1315 /* 1316 * queue an async invalidation 1317 */ 1318 void ceph_queue_invalidate(struct inode *inode) 1319 { 1320 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1321 &ceph_inode(inode)->i_pg_inv_work)) { 1322 dout("ceph_queue_invalidate %p\n", inode); 1323 igrab(inode); 1324 } else { 1325 dout("ceph_queue_invalidate %p failed\n", inode); 1326 } 1327 } 1328 1329 /* 1330 * invalidate any pages that are not dirty or under writeback. this 1331 * includes pages that are clean and mapped. 1332 */ 1333 static void ceph_invalidate_nondirty_pages(struct address_space *mapping) 1334 { 1335 struct pagevec pvec; 1336 pgoff_t next = 0; 1337 int i; 1338 1339 pagevec_init(&pvec, 0); 1340 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1341 for (i = 0; i < pagevec_count(&pvec); i++) { 1342 struct page *page = pvec.pages[i]; 1343 pgoff_t index; 1344 int skip_page = 1345 (PageDirty(page) || PageWriteback(page)); 1346 1347 if (!skip_page) 1348 skip_page = !trylock_page(page); 1349 1350 /* 1351 * We really shouldn't be looking at the ->index of an 1352 * unlocked page. But we're not allowed to lock these 1353 * pages. So we rely upon nobody altering the ->index 1354 * of this (pinned-by-us) page. 1355 */ 1356 index = page->index; 1357 if (index > next) 1358 next = index; 1359 next++; 1360 1361 if (skip_page) 1362 continue; 1363 1364 generic_error_remove_page(mapping, page); 1365 unlock_page(page); 1366 } 1367 pagevec_release(&pvec); 1368 cond_resched(); 1369 } 1370 } 1371 1372 /* 1373 * Invalidate inode pages in a worker thread. (This can't be done 1374 * in the message handler context.) 1375 */ 1376 static void ceph_invalidate_work(struct work_struct *work) 1377 { 1378 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1379 i_pg_inv_work); 1380 struct inode *inode = &ci->vfs_inode; 1381 u32 orig_gen; 1382 int check = 0; 1383 1384 spin_lock(&inode->i_lock); 1385 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1386 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1387 if (ci->i_rdcache_gen == 0 || 1388 ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1389 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen); 1390 /* nevermind! */ 1391 ci->i_rdcache_revoking = 0; 1392 spin_unlock(&inode->i_lock); 1393 goto out; 1394 } 1395 orig_gen = ci->i_rdcache_gen; 1396 spin_unlock(&inode->i_lock); 1397 1398 ceph_invalidate_nondirty_pages(inode->i_mapping); 1399 1400 spin_lock(&inode->i_lock); 1401 if (orig_gen == ci->i_rdcache_gen) { 1402 dout("invalidate_pages %p gen %d successful\n", inode, 1403 ci->i_rdcache_gen); 1404 ci->i_rdcache_gen = 0; 1405 ci->i_rdcache_revoking = 0; 1406 check = 1; 1407 } else { 1408 dout("invalidate_pages %p gen %d raced, gen now %d\n", 1409 inode, orig_gen, ci->i_rdcache_gen); 1410 } 1411 spin_unlock(&inode->i_lock); 1412 1413 if (check) 1414 ceph_check_caps(ci, 0, NULL); 1415 out: 1416 iput(inode); 1417 } 1418 1419 1420 /* 1421 * called by trunc_wq; take i_mutex ourselves 1422 * 1423 * We also truncate in a separate thread as well. 1424 */ 1425 static void ceph_vmtruncate_work(struct work_struct *work) 1426 { 1427 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1428 i_vmtruncate_work); 1429 struct inode *inode = &ci->vfs_inode; 1430 1431 dout("vmtruncate_work %p\n", inode); 1432 mutex_lock(&inode->i_mutex); 1433 __ceph_do_pending_vmtruncate(inode); 1434 mutex_unlock(&inode->i_mutex); 1435 iput(inode); 1436 } 1437 1438 /* 1439 * Queue an async vmtruncate. If we fail to queue work, we will handle 1440 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1441 */ 1442 void ceph_queue_vmtruncate(struct inode *inode) 1443 { 1444 struct ceph_inode_info *ci = ceph_inode(inode); 1445 1446 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1447 &ci->i_vmtruncate_work)) { 1448 dout("ceph_queue_vmtruncate %p\n", inode); 1449 igrab(inode); 1450 } else { 1451 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1452 inode, ci->i_truncate_pending); 1453 } 1454 } 1455 1456 /* 1457 * called with i_mutex held. 1458 * 1459 * Make sure any pending truncation is applied before doing anything 1460 * that may depend on it. 1461 */ 1462 void __ceph_do_pending_vmtruncate(struct inode *inode) 1463 { 1464 struct ceph_inode_info *ci = ceph_inode(inode); 1465 u64 to; 1466 int wrbuffer_refs, wake = 0; 1467 1468 retry: 1469 spin_lock(&inode->i_lock); 1470 if (ci->i_truncate_pending == 0) { 1471 dout("__do_pending_vmtruncate %p none pending\n", inode); 1472 spin_unlock(&inode->i_lock); 1473 return; 1474 } 1475 1476 /* 1477 * make sure any dirty snapped pages are flushed before we 1478 * possibly truncate them.. so write AND block! 1479 */ 1480 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1481 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1482 inode); 1483 spin_unlock(&inode->i_lock); 1484 filemap_write_and_wait_range(&inode->i_data, 0, 1485 inode->i_sb->s_maxbytes); 1486 goto retry; 1487 } 1488 1489 to = ci->i_truncate_size; 1490 wrbuffer_refs = ci->i_wrbuffer_ref; 1491 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1492 ci->i_truncate_pending, to); 1493 spin_unlock(&inode->i_lock); 1494 1495 truncate_inode_pages(inode->i_mapping, to); 1496 1497 spin_lock(&inode->i_lock); 1498 ci->i_truncate_pending--; 1499 if (ci->i_truncate_pending == 0) 1500 wake = 1; 1501 spin_unlock(&inode->i_lock); 1502 1503 if (wrbuffer_refs == 0) 1504 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1505 if (wake) 1506 wake_up_all(&ci->i_cap_wq); 1507 } 1508 1509 1510 /* 1511 * symlinks 1512 */ 1513 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1514 { 1515 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1516 nd_set_link(nd, ci->i_symlink); 1517 return NULL; 1518 } 1519 1520 static const struct inode_operations ceph_symlink_iops = { 1521 .readlink = generic_readlink, 1522 .follow_link = ceph_sym_follow_link, 1523 }; 1524 1525 /* 1526 * setattr 1527 */ 1528 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1529 { 1530 struct inode *inode = dentry->d_inode; 1531 struct ceph_inode_info *ci = ceph_inode(inode); 1532 struct inode *parent_inode = dentry->d_parent->d_inode; 1533 const unsigned int ia_valid = attr->ia_valid; 1534 struct ceph_mds_request *req; 1535 struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc; 1536 int issued; 1537 int release = 0, dirtied = 0; 1538 int mask = 0; 1539 int err = 0; 1540 1541 if (ceph_snap(inode) != CEPH_NOSNAP) 1542 return -EROFS; 1543 1544 __ceph_do_pending_vmtruncate(inode); 1545 1546 err = inode_change_ok(inode, attr); 1547 if (err != 0) 1548 return err; 1549 1550 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1551 USE_AUTH_MDS); 1552 if (IS_ERR(req)) 1553 return PTR_ERR(req); 1554 1555 spin_lock(&inode->i_lock); 1556 issued = __ceph_caps_issued(ci, NULL); 1557 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1558 1559 if (ia_valid & ATTR_UID) { 1560 dout("setattr %p uid %d -> %d\n", inode, 1561 inode->i_uid, attr->ia_uid); 1562 if (issued & CEPH_CAP_AUTH_EXCL) { 1563 inode->i_uid = attr->ia_uid; 1564 dirtied |= CEPH_CAP_AUTH_EXCL; 1565 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1566 attr->ia_uid != inode->i_uid) { 1567 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); 1568 mask |= CEPH_SETATTR_UID; 1569 release |= CEPH_CAP_AUTH_SHARED; 1570 } 1571 } 1572 if (ia_valid & ATTR_GID) { 1573 dout("setattr %p gid %d -> %d\n", inode, 1574 inode->i_gid, attr->ia_gid); 1575 if (issued & CEPH_CAP_AUTH_EXCL) { 1576 inode->i_gid = attr->ia_gid; 1577 dirtied |= CEPH_CAP_AUTH_EXCL; 1578 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1579 attr->ia_gid != inode->i_gid) { 1580 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); 1581 mask |= CEPH_SETATTR_GID; 1582 release |= CEPH_CAP_AUTH_SHARED; 1583 } 1584 } 1585 if (ia_valid & ATTR_MODE) { 1586 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1587 attr->ia_mode); 1588 if (issued & CEPH_CAP_AUTH_EXCL) { 1589 inode->i_mode = attr->ia_mode; 1590 dirtied |= CEPH_CAP_AUTH_EXCL; 1591 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1592 attr->ia_mode != inode->i_mode) { 1593 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1594 mask |= CEPH_SETATTR_MODE; 1595 release |= CEPH_CAP_AUTH_SHARED; 1596 } 1597 } 1598 1599 if (ia_valid & ATTR_ATIME) { 1600 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1601 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1602 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1603 if (issued & CEPH_CAP_FILE_EXCL) { 1604 ci->i_time_warp_seq++; 1605 inode->i_atime = attr->ia_atime; 1606 dirtied |= CEPH_CAP_FILE_EXCL; 1607 } else if ((issued & CEPH_CAP_FILE_WR) && 1608 timespec_compare(&inode->i_atime, 1609 &attr->ia_atime) < 0) { 1610 inode->i_atime = attr->ia_atime; 1611 dirtied |= CEPH_CAP_FILE_WR; 1612 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1613 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1614 ceph_encode_timespec(&req->r_args.setattr.atime, 1615 &attr->ia_atime); 1616 mask |= CEPH_SETATTR_ATIME; 1617 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1618 CEPH_CAP_FILE_WR; 1619 } 1620 } 1621 if (ia_valid & ATTR_MTIME) { 1622 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1623 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1624 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1625 if (issued & CEPH_CAP_FILE_EXCL) { 1626 ci->i_time_warp_seq++; 1627 inode->i_mtime = attr->ia_mtime; 1628 dirtied |= CEPH_CAP_FILE_EXCL; 1629 } else if ((issued & CEPH_CAP_FILE_WR) && 1630 timespec_compare(&inode->i_mtime, 1631 &attr->ia_mtime) < 0) { 1632 inode->i_mtime = attr->ia_mtime; 1633 dirtied |= CEPH_CAP_FILE_WR; 1634 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1635 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1636 ceph_encode_timespec(&req->r_args.setattr.mtime, 1637 &attr->ia_mtime); 1638 mask |= CEPH_SETATTR_MTIME; 1639 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1640 CEPH_CAP_FILE_WR; 1641 } 1642 } 1643 if (ia_valid & ATTR_SIZE) { 1644 dout("setattr %p size %lld -> %lld\n", inode, 1645 inode->i_size, attr->ia_size); 1646 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1647 err = -EINVAL; 1648 goto out; 1649 } 1650 if ((issued & CEPH_CAP_FILE_EXCL) && 1651 attr->ia_size > inode->i_size) { 1652 inode->i_size = attr->ia_size; 1653 inode->i_blocks = 1654 (attr->ia_size + (1 << 9) - 1) >> 9; 1655 inode->i_ctime = attr->ia_ctime; 1656 ci->i_reported_size = attr->ia_size; 1657 dirtied |= CEPH_CAP_FILE_EXCL; 1658 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1659 attr->ia_size != inode->i_size) { 1660 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1661 req->r_args.setattr.old_size = 1662 cpu_to_le64(inode->i_size); 1663 mask |= CEPH_SETATTR_SIZE; 1664 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1665 CEPH_CAP_FILE_WR; 1666 } 1667 } 1668 1669 /* these do nothing */ 1670 if (ia_valid & ATTR_CTIME) { 1671 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1672 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1673 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1674 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1675 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1676 only ? "ctime only" : "ignored"); 1677 inode->i_ctime = attr->ia_ctime; 1678 if (only) { 1679 /* 1680 * if kernel wants to dirty ctime but nothing else, 1681 * we need to choose a cap to dirty under, or do 1682 * a almost-no-op setattr 1683 */ 1684 if (issued & CEPH_CAP_AUTH_EXCL) 1685 dirtied |= CEPH_CAP_AUTH_EXCL; 1686 else if (issued & CEPH_CAP_FILE_EXCL) 1687 dirtied |= CEPH_CAP_FILE_EXCL; 1688 else if (issued & CEPH_CAP_XATTR_EXCL) 1689 dirtied |= CEPH_CAP_XATTR_EXCL; 1690 else 1691 mask |= CEPH_SETATTR_CTIME; 1692 } 1693 } 1694 if (ia_valid & ATTR_FILE) 1695 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1696 1697 if (dirtied) { 1698 __ceph_mark_dirty_caps(ci, dirtied); 1699 inode->i_ctime = CURRENT_TIME; 1700 } 1701 1702 release &= issued; 1703 spin_unlock(&inode->i_lock); 1704 1705 if (mask) { 1706 req->r_inode = igrab(inode); 1707 req->r_inode_drop = release; 1708 req->r_args.setattr.mask = cpu_to_le32(mask); 1709 req->r_num_caps = 1; 1710 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1711 } 1712 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1713 ceph_cap_string(dirtied), mask); 1714 1715 ceph_mdsc_put_request(req); 1716 __ceph_do_pending_vmtruncate(inode); 1717 return err; 1718 out: 1719 spin_unlock(&inode->i_lock); 1720 ceph_mdsc_put_request(req); 1721 return err; 1722 } 1723 1724 /* 1725 * Verify that we have a lease on the given mask. If not, 1726 * do a getattr against an mds. 1727 */ 1728 int ceph_do_getattr(struct inode *inode, int mask) 1729 { 1730 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 1731 struct ceph_mds_client *mdsc = &client->mdsc; 1732 struct ceph_mds_request *req; 1733 int err; 1734 1735 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1736 dout("do_getattr inode %p SNAPDIR\n", inode); 1737 return 0; 1738 } 1739 1740 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); 1741 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1742 return 0; 1743 1744 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1745 if (IS_ERR(req)) 1746 return PTR_ERR(req); 1747 req->r_inode = igrab(inode); 1748 req->r_num_caps = 1; 1749 req->r_args.getattr.mask = cpu_to_le32(mask); 1750 err = ceph_mdsc_do_request(mdsc, NULL, req); 1751 ceph_mdsc_put_request(req); 1752 dout("do_getattr result=%d\n", err); 1753 return err; 1754 } 1755 1756 1757 /* 1758 * Check inode permissions. We verify we have a valid value for 1759 * the AUTH cap, then call the generic handler. 1760 */ 1761 int ceph_permission(struct inode *inode, int mask) 1762 { 1763 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1764 1765 if (!err) 1766 err = generic_permission(inode, mask, NULL); 1767 return err; 1768 } 1769 1770 /* 1771 * Get all attributes. Hopefully somedata we'll have a statlite() 1772 * and can limit the fields we require to be accurate. 1773 */ 1774 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1775 struct kstat *stat) 1776 { 1777 struct inode *inode = dentry->d_inode; 1778 struct ceph_inode_info *ci = ceph_inode(inode); 1779 int err; 1780 1781 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1782 if (!err) { 1783 generic_fillattr(inode, stat); 1784 stat->ino = inode->i_ino; 1785 if (ceph_snap(inode) != CEPH_NOSNAP) 1786 stat->dev = ceph_snap(inode); 1787 else 1788 stat->dev = 0; 1789 if (S_ISDIR(inode->i_mode)) { 1790 stat->size = ci->i_rbytes; 1791 stat->blocks = 0; 1792 stat->blksize = 65536; 1793 } 1794 } 1795 return err; 1796 } 1797