1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/smp_lock.h> 6 #include <linux/slab.h> 7 #include <linux/string.h> 8 #include <linux/uaccess.h> 9 #include <linux/kernel.h> 10 #include <linux/namei.h> 11 #include <linux/writeback.h> 12 #include <linux/vmalloc.h> 13 #include <linux/pagevec.h> 14 15 #include "super.h" 16 #include "mds_client.h" 17 #include <linux/ceph/decode.h> 18 19 /* 20 * Ceph inode operations 21 * 22 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 23 * setattr, etc.), xattr helpers, and helpers for assimilating 24 * metadata returned by the MDS into our cache. 25 * 26 * Also define helpers for doing asynchronous writeback, invalidation, 27 * and truncation for the benefit of those who can't afford to block 28 * (typically because they are in the message handler path). 29 */ 30 31 static const struct inode_operations ceph_symlink_iops; 32 33 static void ceph_invalidate_work(struct work_struct *work); 34 static void ceph_writeback_work(struct work_struct *work); 35 static void ceph_vmtruncate_work(struct work_struct *work); 36 37 /* 38 * find or create an inode, given the ceph ino number 39 */ 40 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 41 { 42 struct inode *inode; 43 ino_t t = ceph_vino_to_ino(vino); 44 45 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 46 if (inode == NULL) 47 return ERR_PTR(-ENOMEM); 48 if (inode->i_state & I_NEW) { 49 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 50 inode, ceph_vinop(inode), (u64)inode->i_ino); 51 unlock_new_inode(inode); 52 } 53 54 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 55 vino.snap, inode); 56 return inode; 57 } 58 59 /* 60 * get/constuct snapdir inode for a given directory 61 */ 62 struct inode *ceph_get_snapdir(struct inode *parent) 63 { 64 struct ceph_vino vino = { 65 .ino = ceph_ino(parent), 66 .snap = CEPH_SNAPDIR, 67 }; 68 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 69 struct ceph_inode_info *ci = ceph_inode(inode); 70 71 BUG_ON(!S_ISDIR(parent->i_mode)); 72 if (IS_ERR(inode)) 73 return inode; 74 inode->i_mode = parent->i_mode; 75 inode->i_uid = parent->i_uid; 76 inode->i_gid = parent->i_gid; 77 inode->i_op = &ceph_dir_iops; 78 inode->i_fop = &ceph_dir_fops; 79 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 80 ci->i_rbytes = 0; 81 return inode; 82 } 83 84 const struct inode_operations ceph_file_iops = { 85 .permission = ceph_permission, 86 .setattr = ceph_setattr, 87 .getattr = ceph_getattr, 88 .setxattr = ceph_setxattr, 89 .getxattr = ceph_getxattr, 90 .listxattr = ceph_listxattr, 91 .removexattr = ceph_removexattr, 92 }; 93 94 95 /* 96 * We use a 'frag tree' to keep track of the MDS's directory fragments 97 * for a given inode (usually there is just a single fragment). We 98 * need to know when a child frag is delegated to a new MDS, or when 99 * it is flagged as replicated, so we can direct our requests 100 * accordingly. 101 */ 102 103 /* 104 * find/create a frag in the tree 105 */ 106 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 107 u32 f) 108 { 109 struct rb_node **p; 110 struct rb_node *parent = NULL; 111 struct ceph_inode_frag *frag; 112 int c; 113 114 p = &ci->i_fragtree.rb_node; 115 while (*p) { 116 parent = *p; 117 frag = rb_entry(parent, struct ceph_inode_frag, node); 118 c = ceph_frag_compare(f, frag->frag); 119 if (c < 0) 120 p = &(*p)->rb_left; 121 else if (c > 0) 122 p = &(*p)->rb_right; 123 else 124 return frag; 125 } 126 127 frag = kmalloc(sizeof(*frag), GFP_NOFS); 128 if (!frag) { 129 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 130 "frag %x\n", &ci->vfs_inode, 131 ceph_vinop(&ci->vfs_inode), f); 132 return ERR_PTR(-ENOMEM); 133 } 134 frag->frag = f; 135 frag->split_by = 0; 136 frag->mds = -1; 137 frag->ndist = 0; 138 139 rb_link_node(&frag->node, parent, p); 140 rb_insert_color(&frag->node, &ci->i_fragtree); 141 142 dout("get_or_create_frag added %llx.%llx frag %x\n", 143 ceph_vinop(&ci->vfs_inode), f); 144 return frag; 145 } 146 147 /* 148 * find a specific frag @f 149 */ 150 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 151 { 152 struct rb_node *n = ci->i_fragtree.rb_node; 153 154 while (n) { 155 struct ceph_inode_frag *frag = 156 rb_entry(n, struct ceph_inode_frag, node); 157 int c = ceph_frag_compare(f, frag->frag); 158 if (c < 0) 159 n = n->rb_left; 160 else if (c > 0) 161 n = n->rb_right; 162 else 163 return frag; 164 } 165 return NULL; 166 } 167 168 /* 169 * Choose frag containing the given value @v. If @pfrag is 170 * specified, copy the frag delegation info to the caller if 171 * it is present. 172 */ 173 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 174 struct ceph_inode_frag *pfrag, 175 int *found) 176 { 177 u32 t = ceph_frag_make(0, 0); 178 struct ceph_inode_frag *frag; 179 unsigned nway, i; 180 u32 n; 181 182 if (found) 183 *found = 0; 184 185 mutex_lock(&ci->i_fragtree_mutex); 186 while (1) { 187 WARN_ON(!ceph_frag_contains_value(t, v)); 188 frag = __ceph_find_frag(ci, t); 189 if (!frag) 190 break; /* t is a leaf */ 191 if (frag->split_by == 0) { 192 if (pfrag) 193 memcpy(pfrag, frag, sizeof(*pfrag)); 194 if (found) 195 *found = 1; 196 break; 197 } 198 199 /* choose child */ 200 nway = 1 << frag->split_by; 201 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 202 frag->split_by, nway); 203 for (i = 0; i < nway; i++) { 204 n = ceph_frag_make_child(t, frag->split_by, i); 205 if (ceph_frag_contains_value(n, v)) { 206 t = n; 207 break; 208 } 209 } 210 BUG_ON(i == nway); 211 } 212 dout("choose_frag(%x) = %x\n", v, t); 213 214 mutex_unlock(&ci->i_fragtree_mutex); 215 return t; 216 } 217 218 /* 219 * Process dirfrag (delegation) info from the mds. Include leaf 220 * fragment in tree ONLY if ndist > 0. Otherwise, only 221 * branches/splits are included in i_fragtree) 222 */ 223 static int ceph_fill_dirfrag(struct inode *inode, 224 struct ceph_mds_reply_dirfrag *dirinfo) 225 { 226 struct ceph_inode_info *ci = ceph_inode(inode); 227 struct ceph_inode_frag *frag; 228 u32 id = le32_to_cpu(dirinfo->frag); 229 int mds = le32_to_cpu(dirinfo->auth); 230 int ndist = le32_to_cpu(dirinfo->ndist); 231 int i; 232 int err = 0; 233 234 mutex_lock(&ci->i_fragtree_mutex); 235 if (ndist == 0) { 236 /* no delegation info needed. */ 237 frag = __ceph_find_frag(ci, id); 238 if (!frag) 239 goto out; 240 if (frag->split_by == 0) { 241 /* tree leaf, remove */ 242 dout("fill_dirfrag removed %llx.%llx frag %x" 243 " (no ref)\n", ceph_vinop(inode), id); 244 rb_erase(&frag->node, &ci->i_fragtree); 245 kfree(frag); 246 } else { 247 /* tree branch, keep and clear */ 248 dout("fill_dirfrag cleared %llx.%llx frag %x" 249 " referral\n", ceph_vinop(inode), id); 250 frag->mds = -1; 251 frag->ndist = 0; 252 } 253 goto out; 254 } 255 256 257 /* find/add this frag to store mds delegation info */ 258 frag = __get_or_create_frag(ci, id); 259 if (IS_ERR(frag)) { 260 /* this is not the end of the world; we can continue 261 with bad/inaccurate delegation info */ 262 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 263 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 264 err = -ENOMEM; 265 goto out; 266 } 267 268 frag->mds = mds; 269 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 270 for (i = 0; i < frag->ndist; i++) 271 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 272 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 273 ceph_vinop(inode), frag->frag, frag->ndist); 274 275 out: 276 mutex_unlock(&ci->i_fragtree_mutex); 277 return err; 278 } 279 280 281 /* 282 * initialize a newly allocated inode. 283 */ 284 struct inode *ceph_alloc_inode(struct super_block *sb) 285 { 286 struct ceph_inode_info *ci; 287 int i; 288 289 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 290 if (!ci) 291 return NULL; 292 293 dout("alloc_inode %p\n", &ci->vfs_inode); 294 295 ci->i_version = 0; 296 ci->i_time_warp_seq = 0; 297 ci->i_ceph_flags = 0; 298 ci->i_release_count = 0; 299 ci->i_symlink = NULL; 300 301 ci->i_fragtree = RB_ROOT; 302 mutex_init(&ci->i_fragtree_mutex); 303 304 ci->i_xattrs.blob = NULL; 305 ci->i_xattrs.prealloc_blob = NULL; 306 ci->i_xattrs.dirty = false; 307 ci->i_xattrs.index = RB_ROOT; 308 ci->i_xattrs.count = 0; 309 ci->i_xattrs.names_size = 0; 310 ci->i_xattrs.vals_size = 0; 311 ci->i_xattrs.version = 0; 312 ci->i_xattrs.index_version = 0; 313 314 ci->i_caps = RB_ROOT; 315 ci->i_auth_cap = NULL; 316 ci->i_dirty_caps = 0; 317 ci->i_flushing_caps = 0; 318 INIT_LIST_HEAD(&ci->i_dirty_item); 319 INIT_LIST_HEAD(&ci->i_flushing_item); 320 ci->i_cap_flush_seq = 0; 321 ci->i_cap_flush_last_tid = 0; 322 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 323 init_waitqueue_head(&ci->i_cap_wq); 324 ci->i_hold_caps_min = 0; 325 ci->i_hold_caps_max = 0; 326 INIT_LIST_HEAD(&ci->i_cap_delay_list); 327 ci->i_cap_exporting_mds = 0; 328 ci->i_cap_exporting_mseq = 0; 329 ci->i_cap_exporting_issued = 0; 330 INIT_LIST_HEAD(&ci->i_cap_snaps); 331 ci->i_head_snapc = NULL; 332 ci->i_snap_caps = 0; 333 334 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 335 ci->i_nr_by_mode[i] = 0; 336 337 ci->i_truncate_seq = 0; 338 ci->i_truncate_size = 0; 339 ci->i_truncate_pending = 0; 340 341 ci->i_max_size = 0; 342 ci->i_reported_size = 0; 343 ci->i_wanted_max_size = 0; 344 ci->i_requested_max_size = 0; 345 346 ci->i_pin_ref = 0; 347 ci->i_rd_ref = 0; 348 ci->i_rdcache_ref = 0; 349 ci->i_wr_ref = 0; 350 ci->i_wrbuffer_ref = 0; 351 ci->i_wrbuffer_ref_head = 0; 352 ci->i_shared_gen = 0; 353 ci->i_rdcache_gen = 0; 354 ci->i_rdcache_revoking = 0; 355 356 INIT_LIST_HEAD(&ci->i_unsafe_writes); 357 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 358 spin_lock_init(&ci->i_unsafe_lock); 359 360 ci->i_snap_realm = NULL; 361 INIT_LIST_HEAD(&ci->i_snap_realm_item); 362 INIT_LIST_HEAD(&ci->i_snap_flush_item); 363 364 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 365 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 366 367 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 368 369 return &ci->vfs_inode; 370 } 371 372 void ceph_destroy_inode(struct inode *inode) 373 { 374 struct ceph_inode_info *ci = ceph_inode(inode); 375 struct ceph_inode_frag *frag; 376 struct rb_node *n; 377 378 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 379 380 ceph_queue_caps_release(inode); 381 382 /* 383 * we may still have a snap_realm reference if there are stray 384 * caps in i_cap_exporting_issued or i_snap_caps. 385 */ 386 if (ci->i_snap_realm) { 387 struct ceph_mds_client *mdsc = 388 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 389 struct ceph_snap_realm *realm = ci->i_snap_realm; 390 391 dout(" dropping residual ref to snap realm %p\n", realm); 392 spin_lock(&realm->inodes_with_caps_lock); 393 list_del_init(&ci->i_snap_realm_item); 394 spin_unlock(&realm->inodes_with_caps_lock); 395 ceph_put_snap_realm(mdsc, realm); 396 } 397 398 kfree(ci->i_symlink); 399 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 400 frag = rb_entry(n, struct ceph_inode_frag, node); 401 rb_erase(n, &ci->i_fragtree); 402 kfree(frag); 403 } 404 405 __ceph_destroy_xattrs(ci); 406 if (ci->i_xattrs.blob) 407 ceph_buffer_put(ci->i_xattrs.blob); 408 if (ci->i_xattrs.prealloc_blob) 409 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 410 411 kmem_cache_free(ceph_inode_cachep, ci); 412 } 413 414 415 /* 416 * Helpers to fill in size, ctime, mtime, and atime. We have to be 417 * careful because either the client or MDS may have more up to date 418 * info, depending on which capabilities are held, and whether 419 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 420 * and size are monotonically increasing, except when utimes() or 421 * truncate() increments the corresponding _seq values.) 422 */ 423 int ceph_fill_file_size(struct inode *inode, int issued, 424 u32 truncate_seq, u64 truncate_size, u64 size) 425 { 426 struct ceph_inode_info *ci = ceph_inode(inode); 427 int queue_trunc = 0; 428 429 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 430 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 431 dout("size %lld -> %llu\n", inode->i_size, size); 432 inode->i_size = size; 433 inode->i_blocks = (size + (1<<9) - 1) >> 9; 434 ci->i_reported_size = size; 435 if (truncate_seq != ci->i_truncate_seq) { 436 dout("truncate_seq %u -> %u\n", 437 ci->i_truncate_seq, truncate_seq); 438 ci->i_truncate_seq = truncate_seq; 439 /* 440 * If we hold relevant caps, or in the case where we're 441 * not the only client referencing this file and we 442 * don't hold those caps, then we need to check whether 443 * the file is either opened or mmaped 444 */ 445 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| 446 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| 447 CEPH_CAP_FILE_EXCL| 448 CEPH_CAP_FILE_LAZYIO)) || 449 mapping_mapped(inode->i_mapping) || 450 __ceph_caps_file_wanted(ci)) { 451 ci->i_truncate_pending++; 452 queue_trunc = 1; 453 } 454 } 455 } 456 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 457 ci->i_truncate_size != truncate_size) { 458 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 459 truncate_size); 460 ci->i_truncate_size = truncate_size; 461 } 462 return queue_trunc; 463 } 464 465 void ceph_fill_file_time(struct inode *inode, int issued, 466 u64 time_warp_seq, struct timespec *ctime, 467 struct timespec *mtime, struct timespec *atime) 468 { 469 struct ceph_inode_info *ci = ceph_inode(inode); 470 int warn = 0; 471 472 if (issued & (CEPH_CAP_FILE_EXCL| 473 CEPH_CAP_FILE_WR| 474 CEPH_CAP_FILE_BUFFER)) { 475 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 476 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 477 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 478 ctime->tv_sec, ctime->tv_nsec); 479 inode->i_ctime = *ctime; 480 } 481 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 482 /* the MDS did a utimes() */ 483 dout("mtime %ld.%09ld -> %ld.%09ld " 484 "tw %d -> %d\n", 485 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 486 mtime->tv_sec, mtime->tv_nsec, 487 ci->i_time_warp_seq, (int)time_warp_seq); 488 489 inode->i_mtime = *mtime; 490 inode->i_atime = *atime; 491 ci->i_time_warp_seq = time_warp_seq; 492 } else if (time_warp_seq == ci->i_time_warp_seq) { 493 /* nobody did utimes(); take the max */ 494 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 495 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 496 inode->i_mtime.tv_sec, 497 inode->i_mtime.tv_nsec, 498 mtime->tv_sec, mtime->tv_nsec); 499 inode->i_mtime = *mtime; 500 } 501 if (timespec_compare(atime, &inode->i_atime) > 0) { 502 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 503 inode->i_atime.tv_sec, 504 inode->i_atime.tv_nsec, 505 atime->tv_sec, atime->tv_nsec); 506 inode->i_atime = *atime; 507 } 508 } else if (issued & CEPH_CAP_FILE_EXCL) { 509 /* we did a utimes(); ignore mds values */ 510 } else { 511 warn = 1; 512 } 513 } else { 514 /* we have no write caps; whatever the MDS says is true */ 515 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 516 inode->i_ctime = *ctime; 517 inode->i_mtime = *mtime; 518 inode->i_atime = *atime; 519 ci->i_time_warp_seq = time_warp_seq; 520 } else { 521 warn = 1; 522 } 523 } 524 if (warn) /* time_warp_seq shouldn't go backwards */ 525 dout("%p mds time_warp_seq %llu < %u\n", 526 inode, time_warp_seq, ci->i_time_warp_seq); 527 } 528 529 /* 530 * Populate an inode based on info from mds. May be called on new or 531 * existing inodes. 532 */ 533 static int fill_inode(struct inode *inode, 534 struct ceph_mds_reply_info_in *iinfo, 535 struct ceph_mds_reply_dirfrag *dirinfo, 536 struct ceph_mds_session *session, 537 unsigned long ttl_from, int cap_fmode, 538 struct ceph_cap_reservation *caps_reservation) 539 { 540 struct ceph_mds_reply_inode *info = iinfo->in; 541 struct ceph_inode_info *ci = ceph_inode(inode); 542 int i; 543 int issued, implemented; 544 struct timespec mtime, atime, ctime; 545 u32 nsplits; 546 struct ceph_buffer *xattr_blob = NULL; 547 int err = 0; 548 int queue_trunc = 0; 549 550 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 551 inode, ceph_vinop(inode), le64_to_cpu(info->version), 552 ci->i_version); 553 554 /* 555 * prealloc xattr data, if it looks like we'll need it. only 556 * if len > 4 (meaning there are actually xattrs; the first 4 557 * bytes are the xattr count). 558 */ 559 if (iinfo->xattr_len > 4) { 560 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 561 if (!xattr_blob) 562 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 563 iinfo->xattr_len); 564 } 565 566 spin_lock(&inode->i_lock); 567 568 /* 569 * provided version will be odd if inode value is projected, 570 * even if stable. skip the update if we have a newer info 571 * (e.g., due to inode info racing form multiple MDSs), or if 572 * we are getting projected (unstable) inode info. 573 */ 574 if (le64_to_cpu(info->version) > 0 && 575 (ci->i_version & ~1) > le64_to_cpu(info->version)) 576 goto no_change; 577 578 issued = __ceph_caps_issued(ci, &implemented); 579 issued |= implemented | __ceph_caps_dirty(ci); 580 581 /* update inode */ 582 ci->i_version = le64_to_cpu(info->version); 583 inode->i_version++; 584 inode->i_rdev = le32_to_cpu(info->rdev); 585 586 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 587 inode->i_mode = le32_to_cpu(info->mode); 588 inode->i_uid = le32_to_cpu(info->uid); 589 inode->i_gid = le32_to_cpu(info->gid); 590 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 591 inode->i_uid, inode->i_gid); 592 } 593 594 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 595 inode->i_nlink = le32_to_cpu(info->nlink); 596 597 /* be careful with mtime, atime, size */ 598 ceph_decode_timespec(&atime, &info->atime); 599 ceph_decode_timespec(&mtime, &info->mtime); 600 ceph_decode_timespec(&ctime, &info->ctime); 601 queue_trunc = ceph_fill_file_size(inode, issued, 602 le32_to_cpu(info->truncate_seq), 603 le64_to_cpu(info->truncate_size), 604 le64_to_cpu(info->size)); 605 ceph_fill_file_time(inode, issued, 606 le32_to_cpu(info->time_warp_seq), 607 &ctime, &mtime, &atime); 608 609 ci->i_max_size = le64_to_cpu(info->max_size); 610 ci->i_layout = info->layout; 611 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 612 613 /* xattrs */ 614 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 615 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 616 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 617 if (ci->i_xattrs.blob) 618 ceph_buffer_put(ci->i_xattrs.blob); 619 ci->i_xattrs.blob = xattr_blob; 620 if (xattr_blob) 621 memcpy(ci->i_xattrs.blob->vec.iov_base, 622 iinfo->xattr_data, iinfo->xattr_len); 623 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 624 xattr_blob = NULL; 625 } 626 627 inode->i_mapping->a_ops = &ceph_aops; 628 inode->i_mapping->backing_dev_info = 629 &ceph_sb_to_client(inode->i_sb)->backing_dev_info; 630 631 switch (inode->i_mode & S_IFMT) { 632 case S_IFIFO: 633 case S_IFBLK: 634 case S_IFCHR: 635 case S_IFSOCK: 636 init_special_inode(inode, inode->i_mode, inode->i_rdev); 637 inode->i_op = &ceph_file_iops; 638 break; 639 case S_IFREG: 640 inode->i_op = &ceph_file_iops; 641 inode->i_fop = &ceph_file_fops; 642 break; 643 case S_IFLNK: 644 inode->i_op = &ceph_symlink_iops; 645 if (!ci->i_symlink) { 646 int symlen = iinfo->symlink_len; 647 char *sym; 648 649 BUG_ON(symlen != inode->i_size); 650 spin_unlock(&inode->i_lock); 651 652 err = -ENOMEM; 653 sym = kmalloc(symlen+1, GFP_NOFS); 654 if (!sym) 655 goto out; 656 memcpy(sym, iinfo->symlink, symlen); 657 sym[symlen] = 0; 658 659 spin_lock(&inode->i_lock); 660 if (!ci->i_symlink) 661 ci->i_symlink = sym; 662 else 663 kfree(sym); /* lost a race */ 664 } 665 break; 666 case S_IFDIR: 667 inode->i_op = &ceph_dir_iops; 668 inode->i_fop = &ceph_dir_fops; 669 670 ci->i_files = le64_to_cpu(info->files); 671 ci->i_subdirs = le64_to_cpu(info->subdirs); 672 ci->i_rbytes = le64_to_cpu(info->rbytes); 673 ci->i_rfiles = le64_to_cpu(info->rfiles); 674 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 675 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 676 677 /* set dir completion flag? */ 678 if (ci->i_files == 0 && ci->i_subdirs == 0 && 679 ceph_snap(inode) == CEPH_NOSNAP && 680 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 681 (issued & CEPH_CAP_FILE_EXCL) == 0 && 682 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 683 dout(" marking %p complete (empty)\n", inode); 684 ci->i_ceph_flags |= CEPH_I_COMPLETE; 685 ci->i_max_offset = 2; 686 } 687 688 /* it may be better to set st_size in getattr instead? */ 689 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) 690 inode->i_size = ci->i_rbytes; 691 break; 692 default: 693 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 694 ceph_vinop(inode), inode->i_mode); 695 } 696 697 no_change: 698 spin_unlock(&inode->i_lock); 699 700 /* queue truncate if we saw i_size decrease */ 701 if (queue_trunc) 702 ceph_queue_vmtruncate(inode); 703 704 /* populate frag tree */ 705 /* FIXME: move me up, if/when version reflects fragtree changes */ 706 nsplits = le32_to_cpu(info->fragtree.nsplits); 707 mutex_lock(&ci->i_fragtree_mutex); 708 for (i = 0; i < nsplits; i++) { 709 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 710 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 711 712 if (IS_ERR(frag)) 713 continue; 714 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 715 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 716 } 717 mutex_unlock(&ci->i_fragtree_mutex); 718 719 /* were we issued a capability? */ 720 if (info->cap.caps) { 721 if (ceph_snap(inode) == CEPH_NOSNAP) { 722 ceph_add_cap(inode, session, 723 le64_to_cpu(info->cap.cap_id), 724 cap_fmode, 725 le32_to_cpu(info->cap.caps), 726 le32_to_cpu(info->cap.wanted), 727 le32_to_cpu(info->cap.seq), 728 le32_to_cpu(info->cap.mseq), 729 le64_to_cpu(info->cap.realm), 730 info->cap.flags, 731 caps_reservation); 732 } else { 733 spin_lock(&inode->i_lock); 734 dout(" %p got snap_caps %s\n", inode, 735 ceph_cap_string(le32_to_cpu(info->cap.caps))); 736 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 737 if (cap_fmode >= 0) 738 __ceph_get_fmode(ci, cap_fmode); 739 spin_unlock(&inode->i_lock); 740 } 741 } else if (cap_fmode >= 0) { 742 pr_warning("mds issued no caps on %llx.%llx\n", 743 ceph_vinop(inode)); 744 __ceph_get_fmode(ci, cap_fmode); 745 } 746 747 /* update delegation info? */ 748 if (dirinfo) 749 ceph_fill_dirfrag(inode, dirinfo); 750 751 err = 0; 752 753 out: 754 if (xattr_blob) 755 ceph_buffer_put(xattr_blob); 756 return err; 757 } 758 759 /* 760 * caller should hold session s_mutex. 761 */ 762 static void update_dentry_lease(struct dentry *dentry, 763 struct ceph_mds_reply_lease *lease, 764 struct ceph_mds_session *session, 765 unsigned long from_time) 766 { 767 struct ceph_dentry_info *di = ceph_dentry(dentry); 768 long unsigned duration = le32_to_cpu(lease->duration_ms); 769 long unsigned ttl = from_time + (duration * HZ) / 1000; 770 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 771 struct inode *dir; 772 773 /* only track leases on regular dentries */ 774 if (dentry->d_op != &ceph_dentry_ops) 775 return; 776 777 spin_lock(&dentry->d_lock); 778 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n", 779 dentry, le16_to_cpu(lease->mask), duration, ttl); 780 781 /* make lease_rdcache_gen match directory */ 782 dir = dentry->d_parent->d_inode; 783 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 784 785 if (lease->mask == 0) 786 goto out_unlock; 787 788 if (di->lease_gen == session->s_cap_gen && 789 time_before(ttl, dentry->d_time)) 790 goto out_unlock; /* we already have a newer lease. */ 791 792 if (di->lease_session && di->lease_session != session) 793 goto out_unlock; 794 795 ceph_dentry_lru_touch(dentry); 796 797 if (!di->lease_session) 798 di->lease_session = ceph_get_mds_session(session); 799 di->lease_gen = session->s_cap_gen; 800 di->lease_seq = le32_to_cpu(lease->seq); 801 di->lease_renew_after = half_ttl; 802 di->lease_renew_from = 0; 803 dentry->d_time = ttl; 804 out_unlock: 805 spin_unlock(&dentry->d_lock); 806 return; 807 } 808 809 /* 810 * Set dentry's directory position based on the current dir's max, and 811 * order it in d_subdirs, so that dcache_readdir behaves. 812 */ 813 static void ceph_set_dentry_offset(struct dentry *dn) 814 { 815 struct dentry *dir = dn->d_parent; 816 struct inode *inode = dn->d_parent->d_inode; 817 struct ceph_dentry_info *di; 818 819 BUG_ON(!inode); 820 821 di = ceph_dentry(dn); 822 823 spin_lock(&inode->i_lock); 824 if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 825 spin_unlock(&inode->i_lock); 826 return; 827 } 828 di->offset = ceph_inode(inode)->i_max_offset++; 829 spin_unlock(&inode->i_lock); 830 831 spin_lock(&dcache_lock); 832 spin_lock(&dn->d_lock); 833 list_move(&dn->d_u.d_child, &dir->d_subdirs); 834 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 835 dn->d_u.d_child.prev, dn->d_u.d_child.next); 836 spin_unlock(&dn->d_lock); 837 spin_unlock(&dcache_lock); 838 } 839 840 /* 841 * splice a dentry to an inode. 842 * caller must hold directory i_mutex for this to be safe. 843 * 844 * we will only rehash the resulting dentry if @prehash is 845 * true; @prehash will be set to false (for the benefit of 846 * the caller) if we fail. 847 */ 848 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 849 bool *prehash, bool set_offset) 850 { 851 struct dentry *realdn; 852 853 BUG_ON(dn->d_inode); 854 855 /* dn must be unhashed */ 856 if (!d_unhashed(dn)) 857 d_drop(dn); 858 realdn = d_materialise_unique(dn, in); 859 if (IS_ERR(realdn)) { 860 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 861 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 862 if (prehash) 863 *prehash = false; /* don't rehash on error */ 864 dn = realdn; /* note realdn contains the error */ 865 goto out; 866 } else if (realdn) { 867 dout("dn %p (%d) spliced with %p (%d) " 868 "inode %p ino %llx.%llx\n", 869 dn, atomic_read(&dn->d_count), 870 realdn, atomic_read(&realdn->d_count), 871 realdn->d_inode, ceph_vinop(realdn->d_inode)); 872 dput(dn); 873 dn = realdn; 874 } else { 875 BUG_ON(!ceph_dentry(dn)); 876 dout("dn %p attached to %p ino %llx.%llx\n", 877 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 878 } 879 if ((!prehash || *prehash) && d_unhashed(dn)) 880 d_rehash(dn); 881 if (set_offset) 882 ceph_set_dentry_offset(dn); 883 out: 884 return dn; 885 } 886 887 /* 888 * Incorporate results into the local cache. This is either just 889 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 890 * after a lookup). 891 * 892 * A reply may contain 893 * a directory inode along with a dentry. 894 * and/or a target inode 895 * 896 * Called with snap_rwsem (read). 897 */ 898 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 899 struct ceph_mds_session *session) 900 { 901 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 902 struct inode *in = NULL; 903 struct ceph_mds_reply_inode *ininfo; 904 struct ceph_vino vino; 905 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 906 int i = 0; 907 int err = 0; 908 909 dout("fill_trace %p is_dentry %d is_target %d\n", req, 910 rinfo->head->is_dentry, rinfo->head->is_target); 911 912 #if 0 913 /* 914 * Debugging hook: 915 * 916 * If we resend completed ops to a recovering mds, we get no 917 * trace. Since that is very rare, pretend this is the case 918 * to ensure the 'no trace' handlers in the callers behave. 919 * 920 * Fill in inodes unconditionally to avoid breaking cap 921 * invariants. 922 */ 923 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 924 pr_info("fill_trace faking empty trace on %lld %s\n", 925 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 926 if (rinfo->head->is_dentry) { 927 rinfo->head->is_dentry = 0; 928 err = fill_inode(req->r_locked_dir, 929 &rinfo->diri, rinfo->dirfrag, 930 session, req->r_request_started, -1); 931 } 932 if (rinfo->head->is_target) { 933 rinfo->head->is_target = 0; 934 ininfo = rinfo->targeti.in; 935 vino.ino = le64_to_cpu(ininfo->ino); 936 vino.snap = le64_to_cpu(ininfo->snapid); 937 in = ceph_get_inode(sb, vino); 938 err = fill_inode(in, &rinfo->targeti, NULL, 939 session, req->r_request_started, 940 req->r_fmode); 941 iput(in); 942 } 943 } 944 #endif 945 946 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 947 dout("fill_trace reply is empty!\n"); 948 if (rinfo->head->result == 0 && req->r_locked_dir) 949 ceph_invalidate_dir_request(req); 950 return 0; 951 } 952 953 if (rinfo->head->is_dentry) { 954 struct inode *dir = req->r_locked_dir; 955 956 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 957 session, req->r_request_started, -1, 958 &req->r_caps_reservation); 959 if (err < 0) 960 return err; 961 } 962 963 /* 964 * ignore null lease/binding on snapdir ENOENT, or else we 965 * will have trouble splicing in the virtual snapdir later 966 */ 967 if (rinfo->head->is_dentry && !req->r_aborted && 968 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 969 fsc->mount_options->snapdir_name, 970 req->r_dentry->d_name.len))) { 971 /* 972 * lookup link rename : null -> possibly existing inode 973 * mknod symlink mkdir : null -> new inode 974 * unlink : linked -> null 975 */ 976 struct inode *dir = req->r_locked_dir; 977 struct dentry *dn = req->r_dentry; 978 bool have_dir_cap, have_lease; 979 980 BUG_ON(!dn); 981 BUG_ON(!dir); 982 BUG_ON(dn->d_parent->d_inode != dir); 983 BUG_ON(ceph_ino(dir) != 984 le64_to_cpu(rinfo->diri.in->ino)); 985 BUG_ON(ceph_snap(dir) != 986 le64_to_cpu(rinfo->diri.in->snapid)); 987 988 /* do we have a lease on the whole dir? */ 989 have_dir_cap = 990 (le32_to_cpu(rinfo->diri.in->cap.caps) & 991 CEPH_CAP_FILE_SHARED); 992 993 /* do we have a dn lease? */ 994 have_lease = have_dir_cap || 995 (le16_to_cpu(rinfo->dlease->mask) & 996 CEPH_LOCK_DN); 997 998 if (!have_lease) 999 dout("fill_trace no dentry lease or dir cap\n"); 1000 1001 /* rename? */ 1002 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1003 dout(" src %p '%.*s' dst %p '%.*s'\n", 1004 req->r_old_dentry, 1005 req->r_old_dentry->d_name.len, 1006 req->r_old_dentry->d_name.name, 1007 dn, dn->d_name.len, dn->d_name.name); 1008 dout("fill_trace doing d_move %p -> %p\n", 1009 req->r_old_dentry, dn); 1010 1011 /* d_move screws up d_subdirs order */ 1012 ceph_i_clear(dir, CEPH_I_COMPLETE); 1013 1014 d_move(req->r_old_dentry, dn); 1015 dout(" src %p '%.*s' dst %p '%.*s'\n", 1016 req->r_old_dentry, 1017 req->r_old_dentry->d_name.len, 1018 req->r_old_dentry->d_name.name, 1019 dn, dn->d_name.len, dn->d_name.name); 1020 1021 /* ensure target dentry is invalidated, despite 1022 rehashing bug in vfs_rename_dir */ 1023 ceph_invalidate_dentry_lease(dn); 1024 1025 /* take overwritten dentry's readdir offset */ 1026 dout("dn %p gets %p offset %lld (old offset %lld)\n", 1027 req->r_old_dentry, dn, ceph_dentry(dn)->offset, 1028 ceph_dentry(req->r_old_dentry)->offset); 1029 ceph_dentry(req->r_old_dentry)->offset = 1030 ceph_dentry(dn)->offset; 1031 1032 dn = req->r_old_dentry; /* use old_dentry */ 1033 in = dn->d_inode; 1034 } 1035 1036 /* null dentry? */ 1037 if (!rinfo->head->is_target) { 1038 dout("fill_trace null dentry\n"); 1039 if (dn->d_inode) { 1040 dout("d_delete %p\n", dn); 1041 d_delete(dn); 1042 } else { 1043 dout("d_instantiate %p NULL\n", dn); 1044 d_instantiate(dn, NULL); 1045 if (have_lease && d_unhashed(dn)) 1046 d_rehash(dn); 1047 update_dentry_lease(dn, rinfo->dlease, 1048 session, 1049 req->r_request_started); 1050 } 1051 goto done; 1052 } 1053 1054 /* attach proper inode */ 1055 ininfo = rinfo->targeti.in; 1056 vino.ino = le64_to_cpu(ininfo->ino); 1057 vino.snap = le64_to_cpu(ininfo->snapid); 1058 if (!dn->d_inode) { 1059 in = ceph_get_inode(sb, vino); 1060 if (IS_ERR(in)) { 1061 pr_err("fill_trace bad get_inode " 1062 "%llx.%llx\n", vino.ino, vino.snap); 1063 err = PTR_ERR(in); 1064 d_delete(dn); 1065 goto done; 1066 } 1067 dn = splice_dentry(dn, in, &have_lease, true); 1068 if (IS_ERR(dn)) { 1069 err = PTR_ERR(dn); 1070 goto done; 1071 } 1072 req->r_dentry = dn; /* may have spliced */ 1073 igrab(in); 1074 } else if (ceph_ino(in) == vino.ino && 1075 ceph_snap(in) == vino.snap) { 1076 igrab(in); 1077 } else { 1078 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1079 dn, in, ceph_ino(in), ceph_snap(in), 1080 vino.ino, vino.snap); 1081 have_lease = false; 1082 in = NULL; 1083 } 1084 1085 if (have_lease) 1086 update_dentry_lease(dn, rinfo->dlease, session, 1087 req->r_request_started); 1088 dout(" final dn %p\n", dn); 1089 i++; 1090 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1091 req->r_op == CEPH_MDS_OP_MKSNAP) { 1092 struct dentry *dn = req->r_dentry; 1093 1094 /* fill out a snapdir LOOKUPSNAP dentry */ 1095 BUG_ON(!dn); 1096 BUG_ON(!req->r_locked_dir); 1097 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1098 ininfo = rinfo->targeti.in; 1099 vino.ino = le64_to_cpu(ininfo->ino); 1100 vino.snap = le64_to_cpu(ininfo->snapid); 1101 in = ceph_get_inode(sb, vino); 1102 if (IS_ERR(in)) { 1103 pr_err("fill_inode get_inode badness %llx.%llx\n", 1104 vino.ino, vino.snap); 1105 err = PTR_ERR(in); 1106 d_delete(dn); 1107 goto done; 1108 } 1109 dout(" linking snapped dir %p to dn %p\n", in, dn); 1110 dn = splice_dentry(dn, in, NULL, true); 1111 if (IS_ERR(dn)) { 1112 err = PTR_ERR(dn); 1113 goto done; 1114 } 1115 req->r_dentry = dn; /* may have spliced */ 1116 igrab(in); 1117 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1118 } 1119 1120 if (rinfo->head->is_target) { 1121 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1122 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1123 1124 if (in == NULL || ceph_ino(in) != vino.ino || 1125 ceph_snap(in) != vino.snap) { 1126 in = ceph_get_inode(sb, vino); 1127 if (IS_ERR(in)) { 1128 err = PTR_ERR(in); 1129 goto done; 1130 } 1131 } 1132 req->r_target_inode = in; 1133 1134 err = fill_inode(in, 1135 &rinfo->targeti, NULL, 1136 session, req->r_request_started, 1137 (le32_to_cpu(rinfo->head->result) == 0) ? 1138 req->r_fmode : -1, 1139 &req->r_caps_reservation); 1140 if (err < 0) { 1141 pr_err("fill_inode badness %p %llx.%llx\n", 1142 in, ceph_vinop(in)); 1143 goto done; 1144 } 1145 } 1146 1147 done: 1148 dout("fill_trace done err=%d\n", err); 1149 return err; 1150 } 1151 1152 /* 1153 * Prepopulate our cache with readdir results, leases, etc. 1154 */ 1155 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1156 struct ceph_mds_session *session) 1157 { 1158 struct dentry *parent = req->r_dentry; 1159 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1160 struct qstr dname; 1161 struct dentry *dn; 1162 struct inode *in; 1163 int err = 0, i; 1164 struct inode *snapdir = NULL; 1165 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1166 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1167 struct ceph_dentry_info *di; 1168 1169 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1170 snapdir = ceph_get_snapdir(parent->d_inode); 1171 parent = d_find_alias(snapdir); 1172 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1173 rinfo->dir_nr, parent); 1174 } else { 1175 dout("readdir_prepopulate %d items under dn %p\n", 1176 rinfo->dir_nr, parent); 1177 if (rinfo->dir_dir) 1178 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1179 } 1180 1181 for (i = 0; i < rinfo->dir_nr; i++) { 1182 struct ceph_vino vino; 1183 1184 dname.name = rinfo->dir_dname[i]; 1185 dname.len = rinfo->dir_dname_len[i]; 1186 dname.hash = full_name_hash(dname.name, dname.len); 1187 1188 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1189 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1190 1191 retry_lookup: 1192 dn = d_lookup(parent, &dname); 1193 dout("d_lookup on parent=%p name=%.*s got %p\n", 1194 parent, dname.len, dname.name, dn); 1195 1196 if (!dn) { 1197 dn = d_alloc(parent, &dname); 1198 dout("d_alloc %p '%.*s' = %p\n", parent, 1199 dname.len, dname.name, dn); 1200 if (dn == NULL) { 1201 dout("d_alloc badness\n"); 1202 err = -ENOMEM; 1203 goto out; 1204 } 1205 err = ceph_init_dentry(dn); 1206 if (err < 0) { 1207 dput(dn); 1208 goto out; 1209 } 1210 } else if (dn->d_inode && 1211 (ceph_ino(dn->d_inode) != vino.ino || 1212 ceph_snap(dn->d_inode) != vino.snap)) { 1213 dout(" dn %p points to wrong inode %p\n", 1214 dn, dn->d_inode); 1215 d_delete(dn); 1216 dput(dn); 1217 goto retry_lookup; 1218 } else { 1219 /* reorder parent's d_subdirs */ 1220 spin_lock(&dcache_lock); 1221 spin_lock(&dn->d_lock); 1222 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1223 spin_unlock(&dn->d_lock); 1224 spin_unlock(&dcache_lock); 1225 } 1226 1227 di = dn->d_fsdata; 1228 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1229 1230 /* inode */ 1231 if (dn->d_inode) { 1232 in = dn->d_inode; 1233 } else { 1234 in = ceph_get_inode(parent->d_sb, vino); 1235 if (IS_ERR(in)) { 1236 dout("new_inode badness\n"); 1237 d_delete(dn); 1238 dput(dn); 1239 err = PTR_ERR(in); 1240 goto out; 1241 } 1242 dn = splice_dentry(dn, in, NULL, false); 1243 if (IS_ERR(dn)) 1244 dn = NULL; 1245 } 1246 1247 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1248 req->r_request_started, -1, 1249 &req->r_caps_reservation) < 0) { 1250 pr_err("fill_inode badness on %p\n", in); 1251 goto next_item; 1252 } 1253 if (dn) 1254 update_dentry_lease(dn, rinfo->dir_dlease[i], 1255 req->r_session, 1256 req->r_request_started); 1257 next_item: 1258 if (dn) 1259 dput(dn); 1260 } 1261 req->r_did_prepopulate = true; 1262 1263 out: 1264 if (snapdir) { 1265 iput(snapdir); 1266 dput(parent); 1267 } 1268 dout("readdir_prepopulate done\n"); 1269 return err; 1270 } 1271 1272 int ceph_inode_set_size(struct inode *inode, loff_t size) 1273 { 1274 struct ceph_inode_info *ci = ceph_inode(inode); 1275 int ret = 0; 1276 1277 spin_lock(&inode->i_lock); 1278 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1279 inode->i_size = size; 1280 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1281 1282 /* tell the MDS if we are approaching max_size */ 1283 if ((size << 1) >= ci->i_max_size && 1284 (ci->i_reported_size << 1) < ci->i_max_size) 1285 ret = 1; 1286 1287 spin_unlock(&inode->i_lock); 1288 return ret; 1289 } 1290 1291 /* 1292 * Write back inode data in a worker thread. (This can't be done 1293 * in the message handler context.) 1294 */ 1295 void ceph_queue_writeback(struct inode *inode) 1296 { 1297 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1298 &ceph_inode(inode)->i_wb_work)) { 1299 dout("ceph_queue_writeback %p\n", inode); 1300 igrab(inode); 1301 } else { 1302 dout("ceph_queue_writeback %p failed\n", inode); 1303 } 1304 } 1305 1306 static void ceph_writeback_work(struct work_struct *work) 1307 { 1308 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1309 i_wb_work); 1310 struct inode *inode = &ci->vfs_inode; 1311 1312 dout("writeback %p\n", inode); 1313 filemap_fdatawrite(&inode->i_data); 1314 iput(inode); 1315 } 1316 1317 /* 1318 * queue an async invalidation 1319 */ 1320 void ceph_queue_invalidate(struct inode *inode) 1321 { 1322 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1323 &ceph_inode(inode)->i_pg_inv_work)) { 1324 dout("ceph_queue_invalidate %p\n", inode); 1325 igrab(inode); 1326 } else { 1327 dout("ceph_queue_invalidate %p failed\n", inode); 1328 } 1329 } 1330 1331 /* 1332 * invalidate any pages that are not dirty or under writeback. this 1333 * includes pages that are clean and mapped. 1334 */ 1335 static void ceph_invalidate_nondirty_pages(struct address_space *mapping) 1336 { 1337 struct pagevec pvec; 1338 pgoff_t next = 0; 1339 int i; 1340 1341 pagevec_init(&pvec, 0); 1342 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1343 for (i = 0; i < pagevec_count(&pvec); i++) { 1344 struct page *page = pvec.pages[i]; 1345 pgoff_t index; 1346 int skip_page = 1347 (PageDirty(page) || PageWriteback(page)); 1348 1349 if (!skip_page) 1350 skip_page = !trylock_page(page); 1351 1352 /* 1353 * We really shouldn't be looking at the ->index of an 1354 * unlocked page. But we're not allowed to lock these 1355 * pages. So we rely upon nobody altering the ->index 1356 * of this (pinned-by-us) page. 1357 */ 1358 index = page->index; 1359 if (index > next) 1360 next = index; 1361 next++; 1362 1363 if (skip_page) 1364 continue; 1365 1366 generic_error_remove_page(mapping, page); 1367 unlock_page(page); 1368 } 1369 pagevec_release(&pvec); 1370 cond_resched(); 1371 } 1372 } 1373 1374 /* 1375 * Invalidate inode pages in a worker thread. (This can't be done 1376 * in the message handler context.) 1377 */ 1378 static void ceph_invalidate_work(struct work_struct *work) 1379 { 1380 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1381 i_pg_inv_work); 1382 struct inode *inode = &ci->vfs_inode; 1383 u32 orig_gen; 1384 int check = 0; 1385 1386 spin_lock(&inode->i_lock); 1387 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1388 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1389 if (ci->i_rdcache_gen == 0 || 1390 ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1391 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen); 1392 /* nevermind! */ 1393 ci->i_rdcache_revoking = 0; 1394 spin_unlock(&inode->i_lock); 1395 goto out; 1396 } 1397 orig_gen = ci->i_rdcache_gen; 1398 spin_unlock(&inode->i_lock); 1399 1400 ceph_invalidate_nondirty_pages(inode->i_mapping); 1401 1402 spin_lock(&inode->i_lock); 1403 if (orig_gen == ci->i_rdcache_gen) { 1404 dout("invalidate_pages %p gen %d successful\n", inode, 1405 ci->i_rdcache_gen); 1406 ci->i_rdcache_gen = 0; 1407 ci->i_rdcache_revoking = 0; 1408 check = 1; 1409 } else { 1410 dout("invalidate_pages %p gen %d raced, gen now %d\n", 1411 inode, orig_gen, ci->i_rdcache_gen); 1412 } 1413 spin_unlock(&inode->i_lock); 1414 1415 if (check) 1416 ceph_check_caps(ci, 0, NULL); 1417 out: 1418 iput(inode); 1419 } 1420 1421 1422 /* 1423 * called by trunc_wq; take i_mutex ourselves 1424 * 1425 * We also truncate in a separate thread as well. 1426 */ 1427 static void ceph_vmtruncate_work(struct work_struct *work) 1428 { 1429 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1430 i_vmtruncate_work); 1431 struct inode *inode = &ci->vfs_inode; 1432 1433 dout("vmtruncate_work %p\n", inode); 1434 mutex_lock(&inode->i_mutex); 1435 __ceph_do_pending_vmtruncate(inode); 1436 mutex_unlock(&inode->i_mutex); 1437 iput(inode); 1438 } 1439 1440 /* 1441 * Queue an async vmtruncate. If we fail to queue work, we will handle 1442 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1443 */ 1444 void ceph_queue_vmtruncate(struct inode *inode) 1445 { 1446 struct ceph_inode_info *ci = ceph_inode(inode); 1447 1448 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1449 &ci->i_vmtruncate_work)) { 1450 dout("ceph_queue_vmtruncate %p\n", inode); 1451 igrab(inode); 1452 } else { 1453 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1454 inode, ci->i_truncate_pending); 1455 } 1456 } 1457 1458 /* 1459 * called with i_mutex held. 1460 * 1461 * Make sure any pending truncation is applied before doing anything 1462 * that may depend on it. 1463 */ 1464 void __ceph_do_pending_vmtruncate(struct inode *inode) 1465 { 1466 struct ceph_inode_info *ci = ceph_inode(inode); 1467 u64 to; 1468 int wrbuffer_refs, wake = 0; 1469 1470 retry: 1471 spin_lock(&inode->i_lock); 1472 if (ci->i_truncate_pending == 0) { 1473 dout("__do_pending_vmtruncate %p none pending\n", inode); 1474 spin_unlock(&inode->i_lock); 1475 return; 1476 } 1477 1478 /* 1479 * make sure any dirty snapped pages are flushed before we 1480 * possibly truncate them.. so write AND block! 1481 */ 1482 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1483 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1484 inode); 1485 spin_unlock(&inode->i_lock); 1486 filemap_write_and_wait_range(&inode->i_data, 0, 1487 inode->i_sb->s_maxbytes); 1488 goto retry; 1489 } 1490 1491 to = ci->i_truncate_size; 1492 wrbuffer_refs = ci->i_wrbuffer_ref; 1493 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1494 ci->i_truncate_pending, to); 1495 spin_unlock(&inode->i_lock); 1496 1497 truncate_inode_pages(inode->i_mapping, to); 1498 1499 spin_lock(&inode->i_lock); 1500 ci->i_truncate_pending--; 1501 if (ci->i_truncate_pending == 0) 1502 wake = 1; 1503 spin_unlock(&inode->i_lock); 1504 1505 if (wrbuffer_refs == 0) 1506 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1507 if (wake) 1508 wake_up_all(&ci->i_cap_wq); 1509 } 1510 1511 1512 /* 1513 * symlinks 1514 */ 1515 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1516 { 1517 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1518 nd_set_link(nd, ci->i_symlink); 1519 return NULL; 1520 } 1521 1522 static const struct inode_operations ceph_symlink_iops = { 1523 .readlink = generic_readlink, 1524 .follow_link = ceph_sym_follow_link, 1525 }; 1526 1527 /* 1528 * setattr 1529 */ 1530 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1531 { 1532 struct inode *inode = dentry->d_inode; 1533 struct ceph_inode_info *ci = ceph_inode(inode); 1534 struct inode *parent_inode = dentry->d_parent->d_inode; 1535 const unsigned int ia_valid = attr->ia_valid; 1536 struct ceph_mds_request *req; 1537 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; 1538 int issued; 1539 int release = 0, dirtied = 0; 1540 int mask = 0; 1541 int err = 0; 1542 1543 if (ceph_snap(inode) != CEPH_NOSNAP) 1544 return -EROFS; 1545 1546 __ceph_do_pending_vmtruncate(inode); 1547 1548 err = inode_change_ok(inode, attr); 1549 if (err != 0) 1550 return err; 1551 1552 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1553 USE_AUTH_MDS); 1554 if (IS_ERR(req)) 1555 return PTR_ERR(req); 1556 1557 spin_lock(&inode->i_lock); 1558 issued = __ceph_caps_issued(ci, NULL); 1559 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1560 1561 if (ia_valid & ATTR_UID) { 1562 dout("setattr %p uid %d -> %d\n", inode, 1563 inode->i_uid, attr->ia_uid); 1564 if (issued & CEPH_CAP_AUTH_EXCL) { 1565 inode->i_uid = attr->ia_uid; 1566 dirtied |= CEPH_CAP_AUTH_EXCL; 1567 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1568 attr->ia_uid != inode->i_uid) { 1569 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); 1570 mask |= CEPH_SETATTR_UID; 1571 release |= CEPH_CAP_AUTH_SHARED; 1572 } 1573 } 1574 if (ia_valid & ATTR_GID) { 1575 dout("setattr %p gid %d -> %d\n", inode, 1576 inode->i_gid, attr->ia_gid); 1577 if (issued & CEPH_CAP_AUTH_EXCL) { 1578 inode->i_gid = attr->ia_gid; 1579 dirtied |= CEPH_CAP_AUTH_EXCL; 1580 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1581 attr->ia_gid != inode->i_gid) { 1582 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); 1583 mask |= CEPH_SETATTR_GID; 1584 release |= CEPH_CAP_AUTH_SHARED; 1585 } 1586 } 1587 if (ia_valid & ATTR_MODE) { 1588 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1589 attr->ia_mode); 1590 if (issued & CEPH_CAP_AUTH_EXCL) { 1591 inode->i_mode = attr->ia_mode; 1592 dirtied |= CEPH_CAP_AUTH_EXCL; 1593 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1594 attr->ia_mode != inode->i_mode) { 1595 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1596 mask |= CEPH_SETATTR_MODE; 1597 release |= CEPH_CAP_AUTH_SHARED; 1598 } 1599 } 1600 1601 if (ia_valid & ATTR_ATIME) { 1602 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1603 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1604 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1605 if (issued & CEPH_CAP_FILE_EXCL) { 1606 ci->i_time_warp_seq++; 1607 inode->i_atime = attr->ia_atime; 1608 dirtied |= CEPH_CAP_FILE_EXCL; 1609 } else if ((issued & CEPH_CAP_FILE_WR) && 1610 timespec_compare(&inode->i_atime, 1611 &attr->ia_atime) < 0) { 1612 inode->i_atime = attr->ia_atime; 1613 dirtied |= CEPH_CAP_FILE_WR; 1614 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1615 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1616 ceph_encode_timespec(&req->r_args.setattr.atime, 1617 &attr->ia_atime); 1618 mask |= CEPH_SETATTR_ATIME; 1619 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1620 CEPH_CAP_FILE_WR; 1621 } 1622 } 1623 if (ia_valid & ATTR_MTIME) { 1624 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1625 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1626 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1627 if (issued & CEPH_CAP_FILE_EXCL) { 1628 ci->i_time_warp_seq++; 1629 inode->i_mtime = attr->ia_mtime; 1630 dirtied |= CEPH_CAP_FILE_EXCL; 1631 } else if ((issued & CEPH_CAP_FILE_WR) && 1632 timespec_compare(&inode->i_mtime, 1633 &attr->ia_mtime) < 0) { 1634 inode->i_mtime = attr->ia_mtime; 1635 dirtied |= CEPH_CAP_FILE_WR; 1636 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1637 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1638 ceph_encode_timespec(&req->r_args.setattr.mtime, 1639 &attr->ia_mtime); 1640 mask |= CEPH_SETATTR_MTIME; 1641 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1642 CEPH_CAP_FILE_WR; 1643 } 1644 } 1645 if (ia_valid & ATTR_SIZE) { 1646 dout("setattr %p size %lld -> %lld\n", inode, 1647 inode->i_size, attr->ia_size); 1648 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1649 err = -EINVAL; 1650 goto out; 1651 } 1652 if ((issued & CEPH_CAP_FILE_EXCL) && 1653 attr->ia_size > inode->i_size) { 1654 inode->i_size = attr->ia_size; 1655 inode->i_blocks = 1656 (attr->ia_size + (1 << 9) - 1) >> 9; 1657 inode->i_ctime = attr->ia_ctime; 1658 ci->i_reported_size = attr->ia_size; 1659 dirtied |= CEPH_CAP_FILE_EXCL; 1660 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1661 attr->ia_size != inode->i_size) { 1662 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1663 req->r_args.setattr.old_size = 1664 cpu_to_le64(inode->i_size); 1665 mask |= CEPH_SETATTR_SIZE; 1666 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1667 CEPH_CAP_FILE_WR; 1668 } 1669 } 1670 1671 /* these do nothing */ 1672 if (ia_valid & ATTR_CTIME) { 1673 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1674 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1675 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1676 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1677 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1678 only ? "ctime only" : "ignored"); 1679 inode->i_ctime = attr->ia_ctime; 1680 if (only) { 1681 /* 1682 * if kernel wants to dirty ctime but nothing else, 1683 * we need to choose a cap to dirty under, or do 1684 * a almost-no-op setattr 1685 */ 1686 if (issued & CEPH_CAP_AUTH_EXCL) 1687 dirtied |= CEPH_CAP_AUTH_EXCL; 1688 else if (issued & CEPH_CAP_FILE_EXCL) 1689 dirtied |= CEPH_CAP_FILE_EXCL; 1690 else if (issued & CEPH_CAP_XATTR_EXCL) 1691 dirtied |= CEPH_CAP_XATTR_EXCL; 1692 else 1693 mask |= CEPH_SETATTR_CTIME; 1694 } 1695 } 1696 if (ia_valid & ATTR_FILE) 1697 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1698 1699 if (dirtied) { 1700 __ceph_mark_dirty_caps(ci, dirtied); 1701 inode->i_ctime = CURRENT_TIME; 1702 } 1703 1704 release &= issued; 1705 spin_unlock(&inode->i_lock); 1706 1707 if (mask) { 1708 req->r_inode = igrab(inode); 1709 req->r_inode_drop = release; 1710 req->r_args.setattr.mask = cpu_to_le32(mask); 1711 req->r_num_caps = 1; 1712 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1713 } 1714 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1715 ceph_cap_string(dirtied), mask); 1716 1717 ceph_mdsc_put_request(req); 1718 __ceph_do_pending_vmtruncate(inode); 1719 return err; 1720 out: 1721 spin_unlock(&inode->i_lock); 1722 ceph_mdsc_put_request(req); 1723 return err; 1724 } 1725 1726 /* 1727 * Verify that we have a lease on the given mask. If not, 1728 * do a getattr against an mds. 1729 */ 1730 int ceph_do_getattr(struct inode *inode, int mask) 1731 { 1732 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 1733 struct ceph_mds_client *mdsc = fsc->mdsc; 1734 struct ceph_mds_request *req; 1735 int err; 1736 1737 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1738 dout("do_getattr inode %p SNAPDIR\n", inode); 1739 return 0; 1740 } 1741 1742 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); 1743 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1744 return 0; 1745 1746 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1747 if (IS_ERR(req)) 1748 return PTR_ERR(req); 1749 req->r_inode = igrab(inode); 1750 req->r_num_caps = 1; 1751 req->r_args.getattr.mask = cpu_to_le32(mask); 1752 err = ceph_mdsc_do_request(mdsc, NULL, req); 1753 ceph_mdsc_put_request(req); 1754 dout("do_getattr result=%d\n", err); 1755 return err; 1756 } 1757 1758 1759 /* 1760 * Check inode permissions. We verify we have a valid value for 1761 * the AUTH cap, then call the generic handler. 1762 */ 1763 int ceph_permission(struct inode *inode, int mask) 1764 { 1765 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1766 1767 if (!err) 1768 err = generic_permission(inode, mask, NULL); 1769 return err; 1770 } 1771 1772 /* 1773 * Get all attributes. Hopefully somedata we'll have a statlite() 1774 * and can limit the fields we require to be accurate. 1775 */ 1776 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1777 struct kstat *stat) 1778 { 1779 struct inode *inode = dentry->d_inode; 1780 struct ceph_inode_info *ci = ceph_inode(inode); 1781 int err; 1782 1783 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1784 if (!err) { 1785 generic_fillattr(inode, stat); 1786 stat->ino = inode->i_ino; 1787 if (ceph_snap(inode) != CEPH_NOSNAP) 1788 stat->dev = ceph_snap(inode); 1789 else 1790 stat->dev = 0; 1791 if (S_ISDIR(inode->i_mode)) { 1792 stat->size = ci->i_rbytes; 1793 stat->blocks = 0; 1794 stat->blksize = 65536; 1795 } 1796 } 1797 return err; 1798 } 1799