1 #include "ceph_debug.h" 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/smp_lock.h> 6 #include <linux/slab.h> 7 #include <linux/string.h> 8 #include <linux/uaccess.h> 9 #include <linux/kernel.h> 10 #include <linux/namei.h> 11 #include <linux/writeback.h> 12 #include <linux/vmalloc.h> 13 #include <linux/pagevec.h> 14 15 #include "super.h" 16 #include "decode.h" 17 18 /* 19 * Ceph inode operations 20 * 21 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 22 * setattr, etc.), xattr helpers, and helpers for assimilating 23 * metadata returned by the MDS into our cache. 24 * 25 * Also define helpers for doing asynchronous writeback, invalidation, 26 * and truncation for the benefit of those who can't afford to block 27 * (typically because they are in the message handler path). 28 */ 29 30 static const struct inode_operations ceph_symlink_iops; 31 32 static void ceph_invalidate_work(struct work_struct *work); 33 static void ceph_writeback_work(struct work_struct *work); 34 static void ceph_vmtruncate_work(struct work_struct *work); 35 36 /* 37 * find or create an inode, given the ceph ino number 38 */ 39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 40 { 41 struct inode *inode; 42 ino_t t = ceph_vino_to_ino(vino); 43 44 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 45 if (inode == NULL) 46 return ERR_PTR(-ENOMEM); 47 if (inode->i_state & I_NEW) { 48 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 49 inode, ceph_vinop(inode), (u64)inode->i_ino); 50 unlock_new_inode(inode); 51 } 52 53 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 54 vino.snap, inode); 55 return inode; 56 } 57 58 /* 59 * get/constuct snapdir inode for a given directory 60 */ 61 struct inode *ceph_get_snapdir(struct inode *parent) 62 { 63 struct ceph_vino vino = { 64 .ino = ceph_ino(parent), 65 .snap = CEPH_SNAPDIR, 66 }; 67 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 68 struct ceph_inode_info *ci = ceph_inode(inode); 69 70 BUG_ON(!S_ISDIR(parent->i_mode)); 71 if (IS_ERR(inode)) 72 return ERR_PTR(PTR_ERR(inode)); 73 inode->i_mode = parent->i_mode; 74 inode->i_uid = parent->i_uid; 75 inode->i_gid = parent->i_gid; 76 inode->i_op = &ceph_dir_iops; 77 inode->i_fop = &ceph_dir_fops; 78 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 79 ci->i_rbytes = 0; 80 return inode; 81 } 82 83 const struct inode_operations ceph_file_iops = { 84 .permission = ceph_permission, 85 .setattr = ceph_setattr, 86 .getattr = ceph_getattr, 87 .setxattr = ceph_setxattr, 88 .getxattr = ceph_getxattr, 89 .listxattr = ceph_listxattr, 90 .removexattr = ceph_removexattr, 91 }; 92 93 94 /* 95 * We use a 'frag tree' to keep track of the MDS's directory fragments 96 * for a given inode (usually there is just a single fragment). We 97 * need to know when a child frag is delegated to a new MDS, or when 98 * it is flagged as replicated, so we can direct our requests 99 * accordingly. 100 */ 101 102 /* 103 * find/create a frag in the tree 104 */ 105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 106 u32 f) 107 { 108 struct rb_node **p; 109 struct rb_node *parent = NULL; 110 struct ceph_inode_frag *frag; 111 int c; 112 113 p = &ci->i_fragtree.rb_node; 114 while (*p) { 115 parent = *p; 116 frag = rb_entry(parent, struct ceph_inode_frag, node); 117 c = ceph_frag_compare(f, frag->frag); 118 if (c < 0) 119 p = &(*p)->rb_left; 120 else if (c > 0) 121 p = &(*p)->rb_right; 122 else 123 return frag; 124 } 125 126 frag = kmalloc(sizeof(*frag), GFP_NOFS); 127 if (!frag) { 128 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 129 "frag %x\n", &ci->vfs_inode, 130 ceph_vinop(&ci->vfs_inode), f); 131 return ERR_PTR(-ENOMEM); 132 } 133 frag->frag = f; 134 frag->split_by = 0; 135 frag->mds = -1; 136 frag->ndist = 0; 137 138 rb_link_node(&frag->node, parent, p); 139 rb_insert_color(&frag->node, &ci->i_fragtree); 140 141 dout("get_or_create_frag added %llx.%llx frag %x\n", 142 ceph_vinop(&ci->vfs_inode), f); 143 return frag; 144 } 145 146 /* 147 * find a specific frag @f 148 */ 149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 150 { 151 struct rb_node *n = ci->i_fragtree.rb_node; 152 153 while (n) { 154 struct ceph_inode_frag *frag = 155 rb_entry(n, struct ceph_inode_frag, node); 156 int c = ceph_frag_compare(f, frag->frag); 157 if (c < 0) 158 n = n->rb_left; 159 else if (c > 0) 160 n = n->rb_right; 161 else 162 return frag; 163 } 164 return NULL; 165 } 166 167 /* 168 * Choose frag containing the given value @v. If @pfrag is 169 * specified, copy the frag delegation info to the caller if 170 * it is present. 171 */ 172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 173 struct ceph_inode_frag *pfrag, 174 int *found) 175 { 176 u32 t = ceph_frag_make(0, 0); 177 struct ceph_inode_frag *frag; 178 unsigned nway, i; 179 u32 n; 180 181 if (found) 182 *found = 0; 183 184 mutex_lock(&ci->i_fragtree_mutex); 185 while (1) { 186 WARN_ON(!ceph_frag_contains_value(t, v)); 187 frag = __ceph_find_frag(ci, t); 188 if (!frag) 189 break; /* t is a leaf */ 190 if (frag->split_by == 0) { 191 if (pfrag) 192 memcpy(pfrag, frag, sizeof(*pfrag)); 193 if (found) 194 *found = 1; 195 break; 196 } 197 198 /* choose child */ 199 nway = 1 << frag->split_by; 200 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 201 frag->split_by, nway); 202 for (i = 0; i < nway; i++) { 203 n = ceph_frag_make_child(t, frag->split_by, i); 204 if (ceph_frag_contains_value(n, v)) { 205 t = n; 206 break; 207 } 208 } 209 BUG_ON(i == nway); 210 } 211 dout("choose_frag(%x) = %x\n", v, t); 212 213 mutex_unlock(&ci->i_fragtree_mutex); 214 return t; 215 } 216 217 /* 218 * Process dirfrag (delegation) info from the mds. Include leaf 219 * fragment in tree ONLY if ndist > 0. Otherwise, only 220 * branches/splits are included in i_fragtree) 221 */ 222 static int ceph_fill_dirfrag(struct inode *inode, 223 struct ceph_mds_reply_dirfrag *dirinfo) 224 { 225 struct ceph_inode_info *ci = ceph_inode(inode); 226 struct ceph_inode_frag *frag; 227 u32 id = le32_to_cpu(dirinfo->frag); 228 int mds = le32_to_cpu(dirinfo->auth); 229 int ndist = le32_to_cpu(dirinfo->ndist); 230 int i; 231 int err = 0; 232 233 mutex_lock(&ci->i_fragtree_mutex); 234 if (ndist == 0) { 235 /* no delegation info needed. */ 236 frag = __ceph_find_frag(ci, id); 237 if (!frag) 238 goto out; 239 if (frag->split_by == 0) { 240 /* tree leaf, remove */ 241 dout("fill_dirfrag removed %llx.%llx frag %x" 242 " (no ref)\n", ceph_vinop(inode), id); 243 rb_erase(&frag->node, &ci->i_fragtree); 244 kfree(frag); 245 } else { 246 /* tree branch, keep and clear */ 247 dout("fill_dirfrag cleared %llx.%llx frag %x" 248 " referral\n", ceph_vinop(inode), id); 249 frag->mds = -1; 250 frag->ndist = 0; 251 } 252 goto out; 253 } 254 255 256 /* find/add this frag to store mds delegation info */ 257 frag = __get_or_create_frag(ci, id); 258 if (IS_ERR(frag)) { 259 /* this is not the end of the world; we can continue 260 with bad/inaccurate delegation info */ 261 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 262 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 263 err = -ENOMEM; 264 goto out; 265 } 266 267 frag->mds = mds; 268 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 269 for (i = 0; i < frag->ndist; i++) 270 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 271 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 272 ceph_vinop(inode), frag->frag, frag->ndist); 273 274 out: 275 mutex_unlock(&ci->i_fragtree_mutex); 276 return err; 277 } 278 279 280 /* 281 * initialize a newly allocated inode. 282 */ 283 struct inode *ceph_alloc_inode(struct super_block *sb) 284 { 285 struct ceph_inode_info *ci; 286 int i; 287 288 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 289 if (!ci) 290 return NULL; 291 292 dout("alloc_inode %p\n", &ci->vfs_inode); 293 294 ci->i_version = 0; 295 ci->i_time_warp_seq = 0; 296 ci->i_ceph_flags = 0; 297 ci->i_release_count = 0; 298 ci->i_symlink = NULL; 299 300 ci->i_fragtree = RB_ROOT; 301 mutex_init(&ci->i_fragtree_mutex); 302 303 ci->i_xattrs.blob = NULL; 304 ci->i_xattrs.prealloc_blob = NULL; 305 ci->i_xattrs.dirty = false; 306 ci->i_xattrs.index = RB_ROOT; 307 ci->i_xattrs.count = 0; 308 ci->i_xattrs.names_size = 0; 309 ci->i_xattrs.vals_size = 0; 310 ci->i_xattrs.version = 0; 311 ci->i_xattrs.index_version = 0; 312 313 ci->i_caps = RB_ROOT; 314 ci->i_auth_cap = NULL; 315 ci->i_dirty_caps = 0; 316 ci->i_flushing_caps = 0; 317 INIT_LIST_HEAD(&ci->i_dirty_item); 318 INIT_LIST_HEAD(&ci->i_flushing_item); 319 ci->i_cap_flush_seq = 0; 320 ci->i_cap_flush_last_tid = 0; 321 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 322 init_waitqueue_head(&ci->i_cap_wq); 323 ci->i_hold_caps_min = 0; 324 ci->i_hold_caps_max = 0; 325 INIT_LIST_HEAD(&ci->i_cap_delay_list); 326 ci->i_cap_exporting_mds = 0; 327 ci->i_cap_exporting_mseq = 0; 328 ci->i_cap_exporting_issued = 0; 329 INIT_LIST_HEAD(&ci->i_cap_snaps); 330 ci->i_head_snapc = NULL; 331 ci->i_snap_caps = 0; 332 333 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 334 ci->i_nr_by_mode[i] = 0; 335 336 ci->i_truncate_seq = 0; 337 ci->i_truncate_size = 0; 338 ci->i_truncate_pending = 0; 339 340 ci->i_max_size = 0; 341 ci->i_reported_size = 0; 342 ci->i_wanted_max_size = 0; 343 ci->i_requested_max_size = 0; 344 345 ci->i_pin_ref = 0; 346 ci->i_rd_ref = 0; 347 ci->i_rdcache_ref = 0; 348 ci->i_wr_ref = 0; 349 ci->i_wrbuffer_ref = 0; 350 ci->i_wrbuffer_ref_head = 0; 351 ci->i_shared_gen = 0; 352 ci->i_rdcache_gen = 0; 353 ci->i_rdcache_revoking = 0; 354 355 INIT_LIST_HEAD(&ci->i_unsafe_writes); 356 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 357 spin_lock_init(&ci->i_unsafe_lock); 358 359 ci->i_snap_realm = NULL; 360 INIT_LIST_HEAD(&ci->i_snap_realm_item); 361 INIT_LIST_HEAD(&ci->i_snap_flush_item); 362 363 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 364 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 365 366 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 367 368 return &ci->vfs_inode; 369 } 370 371 void ceph_destroy_inode(struct inode *inode) 372 { 373 struct ceph_inode_info *ci = ceph_inode(inode); 374 struct ceph_inode_frag *frag; 375 struct rb_node *n; 376 377 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 378 379 ceph_queue_caps_release(inode); 380 381 /* 382 * we may still have a snap_realm reference if there are stray 383 * caps in i_cap_exporting_issued or i_snap_caps. 384 */ 385 if (ci->i_snap_realm) { 386 struct ceph_mds_client *mdsc = 387 &ceph_client(ci->vfs_inode.i_sb)->mdsc; 388 struct ceph_snap_realm *realm = ci->i_snap_realm; 389 390 dout(" dropping residual ref to snap realm %p\n", realm); 391 spin_lock(&realm->inodes_with_caps_lock); 392 list_del_init(&ci->i_snap_realm_item); 393 spin_unlock(&realm->inodes_with_caps_lock); 394 ceph_put_snap_realm(mdsc, realm); 395 } 396 397 kfree(ci->i_symlink); 398 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 399 frag = rb_entry(n, struct ceph_inode_frag, node); 400 rb_erase(n, &ci->i_fragtree); 401 kfree(frag); 402 } 403 404 __ceph_destroy_xattrs(ci); 405 if (ci->i_xattrs.blob) 406 ceph_buffer_put(ci->i_xattrs.blob); 407 if (ci->i_xattrs.prealloc_blob) 408 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 409 410 kmem_cache_free(ceph_inode_cachep, ci); 411 } 412 413 414 /* 415 * Helpers to fill in size, ctime, mtime, and atime. We have to be 416 * careful because either the client or MDS may have more up to date 417 * info, depending on which capabilities are held, and whether 418 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 419 * and size are monotonically increasing, except when utimes() or 420 * truncate() increments the corresponding _seq values.) 421 */ 422 int ceph_fill_file_size(struct inode *inode, int issued, 423 u32 truncate_seq, u64 truncate_size, u64 size) 424 { 425 struct ceph_inode_info *ci = ceph_inode(inode); 426 int queue_trunc = 0; 427 428 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 429 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 430 dout("size %lld -> %llu\n", inode->i_size, size); 431 inode->i_size = size; 432 inode->i_blocks = (size + (1<<9) - 1) >> 9; 433 ci->i_reported_size = size; 434 if (truncate_seq != ci->i_truncate_seq) { 435 dout("truncate_seq %u -> %u\n", 436 ci->i_truncate_seq, truncate_seq); 437 ci->i_truncate_seq = truncate_seq; 438 /* 439 * If we hold relevant caps, or in the case where we're 440 * not the only client referencing this file and we 441 * don't hold those caps, then we need to check whether 442 * the file is either opened or mmaped 443 */ 444 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| 445 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| 446 CEPH_CAP_FILE_EXCL)) || 447 mapping_mapped(inode->i_mapping) || 448 __ceph_caps_file_wanted(ci)) { 449 ci->i_truncate_pending++; 450 queue_trunc = 1; 451 } 452 } 453 } 454 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 455 ci->i_truncate_size != truncate_size) { 456 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 457 truncate_size); 458 ci->i_truncate_size = truncate_size; 459 } 460 return queue_trunc; 461 } 462 463 void ceph_fill_file_time(struct inode *inode, int issued, 464 u64 time_warp_seq, struct timespec *ctime, 465 struct timespec *mtime, struct timespec *atime) 466 { 467 struct ceph_inode_info *ci = ceph_inode(inode); 468 int warn = 0; 469 470 if (issued & (CEPH_CAP_FILE_EXCL| 471 CEPH_CAP_FILE_WR| 472 CEPH_CAP_FILE_BUFFER)) { 473 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 474 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 475 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 476 ctime->tv_sec, ctime->tv_nsec); 477 inode->i_ctime = *ctime; 478 } 479 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 480 /* the MDS did a utimes() */ 481 dout("mtime %ld.%09ld -> %ld.%09ld " 482 "tw %d -> %d\n", 483 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 484 mtime->tv_sec, mtime->tv_nsec, 485 ci->i_time_warp_seq, (int)time_warp_seq); 486 487 inode->i_mtime = *mtime; 488 inode->i_atime = *atime; 489 ci->i_time_warp_seq = time_warp_seq; 490 } else if (time_warp_seq == ci->i_time_warp_seq) { 491 /* nobody did utimes(); take the max */ 492 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 493 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 494 inode->i_mtime.tv_sec, 495 inode->i_mtime.tv_nsec, 496 mtime->tv_sec, mtime->tv_nsec); 497 inode->i_mtime = *mtime; 498 } 499 if (timespec_compare(atime, &inode->i_atime) > 0) { 500 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 501 inode->i_atime.tv_sec, 502 inode->i_atime.tv_nsec, 503 atime->tv_sec, atime->tv_nsec); 504 inode->i_atime = *atime; 505 } 506 } else if (issued & CEPH_CAP_FILE_EXCL) { 507 /* we did a utimes(); ignore mds values */ 508 } else { 509 warn = 1; 510 } 511 } else { 512 /* we have no write caps; whatever the MDS says is true */ 513 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 514 inode->i_ctime = *ctime; 515 inode->i_mtime = *mtime; 516 inode->i_atime = *atime; 517 ci->i_time_warp_seq = time_warp_seq; 518 } else { 519 warn = 1; 520 } 521 } 522 if (warn) /* time_warp_seq shouldn't go backwards */ 523 dout("%p mds time_warp_seq %llu < %u\n", 524 inode, time_warp_seq, ci->i_time_warp_seq); 525 } 526 527 /* 528 * Populate an inode based on info from mds. May be called on new or 529 * existing inodes. 530 */ 531 static int fill_inode(struct inode *inode, 532 struct ceph_mds_reply_info_in *iinfo, 533 struct ceph_mds_reply_dirfrag *dirinfo, 534 struct ceph_mds_session *session, 535 unsigned long ttl_from, int cap_fmode, 536 struct ceph_cap_reservation *caps_reservation) 537 { 538 struct ceph_mds_reply_inode *info = iinfo->in; 539 struct ceph_inode_info *ci = ceph_inode(inode); 540 int i; 541 int issued, implemented; 542 struct timespec mtime, atime, ctime; 543 u32 nsplits; 544 struct ceph_buffer *xattr_blob = NULL; 545 int err = 0; 546 int queue_trunc = 0; 547 548 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 549 inode, ceph_vinop(inode), le64_to_cpu(info->version), 550 ci->i_version); 551 552 /* 553 * prealloc xattr data, if it looks like we'll need it. only 554 * if len > 4 (meaning there are actually xattrs; the first 4 555 * bytes are the xattr count). 556 */ 557 if (iinfo->xattr_len > 4) { 558 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 559 if (!xattr_blob) 560 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 561 iinfo->xattr_len); 562 } 563 564 spin_lock(&inode->i_lock); 565 566 /* 567 * provided version will be odd if inode value is projected, 568 * even if stable. skip the update if we have a newer info 569 * (e.g., due to inode info racing form multiple MDSs), or if 570 * we are getting projected (unstable) inode info. 571 */ 572 if (le64_to_cpu(info->version) > 0 && 573 (ci->i_version & ~1) > le64_to_cpu(info->version)) 574 goto no_change; 575 576 issued = __ceph_caps_issued(ci, &implemented); 577 issued |= implemented | __ceph_caps_dirty(ci); 578 579 /* update inode */ 580 ci->i_version = le64_to_cpu(info->version); 581 inode->i_version++; 582 inode->i_rdev = le32_to_cpu(info->rdev); 583 584 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 585 inode->i_mode = le32_to_cpu(info->mode); 586 inode->i_uid = le32_to_cpu(info->uid); 587 inode->i_gid = le32_to_cpu(info->gid); 588 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 589 inode->i_uid, inode->i_gid); 590 } 591 592 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 593 inode->i_nlink = le32_to_cpu(info->nlink); 594 595 /* be careful with mtime, atime, size */ 596 ceph_decode_timespec(&atime, &info->atime); 597 ceph_decode_timespec(&mtime, &info->mtime); 598 ceph_decode_timespec(&ctime, &info->ctime); 599 queue_trunc = ceph_fill_file_size(inode, issued, 600 le32_to_cpu(info->truncate_seq), 601 le64_to_cpu(info->truncate_size), 602 le64_to_cpu(info->size)); 603 ceph_fill_file_time(inode, issued, 604 le32_to_cpu(info->time_warp_seq), 605 &ctime, &mtime, &atime); 606 607 ci->i_max_size = le64_to_cpu(info->max_size); 608 ci->i_layout = info->layout; 609 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 610 611 /* xattrs */ 612 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 613 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 614 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 615 if (ci->i_xattrs.blob) 616 ceph_buffer_put(ci->i_xattrs.blob); 617 ci->i_xattrs.blob = xattr_blob; 618 if (xattr_blob) 619 memcpy(ci->i_xattrs.blob->vec.iov_base, 620 iinfo->xattr_data, iinfo->xattr_len); 621 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 622 } 623 624 inode->i_mapping->a_ops = &ceph_aops; 625 inode->i_mapping->backing_dev_info = 626 &ceph_client(inode->i_sb)->backing_dev_info; 627 628 switch (inode->i_mode & S_IFMT) { 629 case S_IFIFO: 630 case S_IFBLK: 631 case S_IFCHR: 632 case S_IFSOCK: 633 init_special_inode(inode, inode->i_mode, inode->i_rdev); 634 inode->i_op = &ceph_file_iops; 635 break; 636 case S_IFREG: 637 inode->i_op = &ceph_file_iops; 638 inode->i_fop = &ceph_file_fops; 639 break; 640 case S_IFLNK: 641 inode->i_op = &ceph_symlink_iops; 642 if (!ci->i_symlink) { 643 int symlen = iinfo->symlink_len; 644 char *sym; 645 646 BUG_ON(symlen != inode->i_size); 647 spin_unlock(&inode->i_lock); 648 649 err = -ENOMEM; 650 sym = kmalloc(symlen+1, GFP_NOFS); 651 if (!sym) 652 goto out; 653 memcpy(sym, iinfo->symlink, symlen); 654 sym[symlen] = 0; 655 656 spin_lock(&inode->i_lock); 657 if (!ci->i_symlink) 658 ci->i_symlink = sym; 659 else 660 kfree(sym); /* lost a race */ 661 } 662 break; 663 case S_IFDIR: 664 inode->i_op = &ceph_dir_iops; 665 inode->i_fop = &ceph_dir_fops; 666 667 ci->i_files = le64_to_cpu(info->files); 668 ci->i_subdirs = le64_to_cpu(info->subdirs); 669 ci->i_rbytes = le64_to_cpu(info->rbytes); 670 ci->i_rfiles = le64_to_cpu(info->rfiles); 671 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 672 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 673 674 /* set dir completion flag? */ 675 if (ci->i_files == 0 && ci->i_subdirs == 0 && 676 ceph_snap(inode) == CEPH_NOSNAP && 677 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) { 678 dout(" marking %p complete (empty)\n", inode); 679 ci->i_ceph_flags |= CEPH_I_COMPLETE; 680 ci->i_max_offset = 2; 681 } 682 683 /* it may be better to set st_size in getattr instead? */ 684 if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES)) 685 inode->i_size = ci->i_rbytes; 686 break; 687 default: 688 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 689 ceph_vinop(inode), inode->i_mode); 690 } 691 692 no_change: 693 spin_unlock(&inode->i_lock); 694 695 /* queue truncate if we saw i_size decrease */ 696 if (queue_trunc) 697 ceph_queue_vmtruncate(inode); 698 699 /* populate frag tree */ 700 /* FIXME: move me up, if/when version reflects fragtree changes */ 701 nsplits = le32_to_cpu(info->fragtree.nsplits); 702 mutex_lock(&ci->i_fragtree_mutex); 703 for (i = 0; i < nsplits; i++) { 704 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 705 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 706 707 if (IS_ERR(frag)) 708 continue; 709 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 710 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 711 } 712 mutex_unlock(&ci->i_fragtree_mutex); 713 714 /* were we issued a capability? */ 715 if (info->cap.caps) { 716 if (ceph_snap(inode) == CEPH_NOSNAP) { 717 ceph_add_cap(inode, session, 718 le64_to_cpu(info->cap.cap_id), 719 cap_fmode, 720 le32_to_cpu(info->cap.caps), 721 le32_to_cpu(info->cap.wanted), 722 le32_to_cpu(info->cap.seq), 723 le32_to_cpu(info->cap.mseq), 724 le64_to_cpu(info->cap.realm), 725 info->cap.flags, 726 caps_reservation); 727 } else { 728 spin_lock(&inode->i_lock); 729 dout(" %p got snap_caps %s\n", inode, 730 ceph_cap_string(le32_to_cpu(info->cap.caps))); 731 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 732 if (cap_fmode >= 0) 733 __ceph_get_fmode(ci, cap_fmode); 734 spin_unlock(&inode->i_lock); 735 } 736 } 737 738 /* update delegation info? */ 739 if (dirinfo) 740 ceph_fill_dirfrag(inode, dirinfo); 741 742 err = 0; 743 744 out: 745 if (xattr_blob) 746 ceph_buffer_put(xattr_blob); 747 return err; 748 } 749 750 /* 751 * caller should hold session s_mutex. 752 */ 753 static void update_dentry_lease(struct dentry *dentry, 754 struct ceph_mds_reply_lease *lease, 755 struct ceph_mds_session *session, 756 unsigned long from_time) 757 { 758 struct ceph_dentry_info *di = ceph_dentry(dentry); 759 long unsigned duration = le32_to_cpu(lease->duration_ms); 760 long unsigned ttl = from_time + (duration * HZ) / 1000; 761 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 762 struct inode *dir; 763 764 /* only track leases on regular dentries */ 765 if (dentry->d_op != &ceph_dentry_ops) 766 return; 767 768 spin_lock(&dentry->d_lock); 769 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n", 770 dentry, le16_to_cpu(lease->mask), duration, ttl); 771 772 /* make lease_rdcache_gen match directory */ 773 dir = dentry->d_parent->d_inode; 774 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 775 776 if (lease->mask == 0) 777 goto out_unlock; 778 779 if (di->lease_gen == session->s_cap_gen && 780 time_before(ttl, dentry->d_time)) 781 goto out_unlock; /* we already have a newer lease. */ 782 783 if (di->lease_session && di->lease_session != session) 784 goto out_unlock; 785 786 ceph_dentry_lru_touch(dentry); 787 788 if (!di->lease_session) 789 di->lease_session = ceph_get_mds_session(session); 790 di->lease_gen = session->s_cap_gen; 791 di->lease_seq = le32_to_cpu(lease->seq); 792 di->lease_renew_after = half_ttl; 793 di->lease_renew_from = 0; 794 dentry->d_time = ttl; 795 out_unlock: 796 spin_unlock(&dentry->d_lock); 797 return; 798 } 799 800 /* 801 * splice a dentry to an inode. 802 * caller must hold directory i_mutex for this to be safe. 803 * 804 * we will only rehash the resulting dentry if @prehash is 805 * true; @prehash will be set to false (for the benefit of 806 * the caller) if we fail. 807 */ 808 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 809 bool *prehash) 810 { 811 struct dentry *realdn; 812 813 /* dn must be unhashed */ 814 if (!d_unhashed(dn)) 815 d_drop(dn); 816 realdn = d_materialise_unique(dn, in); 817 if (IS_ERR(realdn)) { 818 pr_err("splice_dentry error %p inode %p ino %llx.%llx\n", 819 dn, in, ceph_vinop(in)); 820 if (prehash) 821 *prehash = false; /* don't rehash on error */ 822 dn = realdn; /* note realdn contains the error */ 823 goto out; 824 } else if (realdn) { 825 dout("dn %p (%d) spliced with %p (%d) " 826 "inode %p ino %llx.%llx\n", 827 dn, atomic_read(&dn->d_count), 828 realdn, atomic_read(&realdn->d_count), 829 realdn->d_inode, ceph_vinop(realdn->d_inode)); 830 dput(dn); 831 dn = realdn; 832 } else { 833 BUG_ON(!ceph_dentry(dn)); 834 835 dout("dn %p attached to %p ino %llx.%llx\n", 836 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 837 } 838 if ((!prehash || *prehash) && d_unhashed(dn)) 839 d_rehash(dn); 840 out: 841 return dn; 842 } 843 844 /* 845 * Set dentry's directory position based on the current dir's max, and 846 * order it in d_subdirs, so that dcache_readdir behaves. 847 */ 848 static void ceph_set_dentry_offset(struct dentry *dn) 849 { 850 struct dentry *dir = dn->d_parent; 851 struct inode *inode = dn->d_parent->d_inode; 852 struct ceph_dentry_info *di; 853 854 BUG_ON(!inode); 855 856 di = ceph_dentry(dn); 857 858 spin_lock(&inode->i_lock); 859 di->offset = ceph_inode(inode)->i_max_offset++; 860 spin_unlock(&inode->i_lock); 861 862 spin_lock(&dcache_lock); 863 spin_lock(&dn->d_lock); 864 list_move_tail(&dir->d_subdirs, &dn->d_u.d_child); 865 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 866 dn->d_u.d_child.prev, dn->d_u.d_child.next); 867 spin_unlock(&dn->d_lock); 868 spin_unlock(&dcache_lock); 869 } 870 871 /* 872 * Incorporate results into the local cache. This is either just 873 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 874 * after a lookup). 875 * 876 * A reply may contain 877 * a directory inode along with a dentry. 878 * and/or a target inode 879 * 880 * Called with snap_rwsem (read). 881 */ 882 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 883 struct ceph_mds_session *session) 884 { 885 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 886 struct inode *in = NULL; 887 struct ceph_mds_reply_inode *ininfo; 888 struct ceph_vino vino; 889 struct ceph_client *client = ceph_sb_to_client(sb); 890 int i = 0; 891 int err = 0; 892 893 dout("fill_trace %p is_dentry %d is_target %d\n", req, 894 rinfo->head->is_dentry, rinfo->head->is_target); 895 896 #if 0 897 /* 898 * Debugging hook: 899 * 900 * If we resend completed ops to a recovering mds, we get no 901 * trace. Since that is very rare, pretend this is the case 902 * to ensure the 'no trace' handlers in the callers behave. 903 * 904 * Fill in inodes unconditionally to avoid breaking cap 905 * invariants. 906 */ 907 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 908 pr_info("fill_trace faking empty trace on %lld %s\n", 909 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 910 if (rinfo->head->is_dentry) { 911 rinfo->head->is_dentry = 0; 912 err = fill_inode(req->r_locked_dir, 913 &rinfo->diri, rinfo->dirfrag, 914 session, req->r_request_started, -1); 915 } 916 if (rinfo->head->is_target) { 917 rinfo->head->is_target = 0; 918 ininfo = rinfo->targeti.in; 919 vino.ino = le64_to_cpu(ininfo->ino); 920 vino.snap = le64_to_cpu(ininfo->snapid); 921 in = ceph_get_inode(sb, vino); 922 err = fill_inode(in, &rinfo->targeti, NULL, 923 session, req->r_request_started, 924 req->r_fmode); 925 iput(in); 926 } 927 } 928 #endif 929 930 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 931 dout("fill_trace reply is empty!\n"); 932 if (rinfo->head->result == 0 && req->r_locked_dir) { 933 struct ceph_inode_info *ci = 934 ceph_inode(req->r_locked_dir); 935 dout(" clearing %p complete (empty trace)\n", 936 req->r_locked_dir); 937 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 938 ci->i_release_count++; 939 } 940 return 0; 941 } 942 943 if (rinfo->head->is_dentry) { 944 struct inode *dir = req->r_locked_dir; 945 946 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 947 session, req->r_request_started, -1, 948 &req->r_caps_reservation); 949 if (err < 0) 950 return err; 951 } 952 953 /* 954 * ignore null lease/binding on snapdir ENOENT, or else we 955 * will have trouble splicing in the virtual snapdir later 956 */ 957 if (rinfo->head->is_dentry && !req->r_aborted && 958 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 959 client->mount_args->snapdir_name, 960 req->r_dentry->d_name.len))) { 961 /* 962 * lookup link rename : null -> possibly existing inode 963 * mknod symlink mkdir : null -> new inode 964 * unlink : linked -> null 965 */ 966 struct inode *dir = req->r_locked_dir; 967 struct dentry *dn = req->r_dentry; 968 bool have_dir_cap, have_lease; 969 970 BUG_ON(!dn); 971 BUG_ON(!dir); 972 BUG_ON(dn->d_parent->d_inode != dir); 973 BUG_ON(ceph_ino(dir) != 974 le64_to_cpu(rinfo->diri.in->ino)); 975 BUG_ON(ceph_snap(dir) != 976 le64_to_cpu(rinfo->diri.in->snapid)); 977 978 /* do we have a lease on the whole dir? */ 979 have_dir_cap = 980 (le32_to_cpu(rinfo->diri.in->cap.caps) & 981 CEPH_CAP_FILE_SHARED); 982 983 /* do we have a dn lease? */ 984 have_lease = have_dir_cap || 985 (le16_to_cpu(rinfo->dlease->mask) & 986 CEPH_LOCK_DN); 987 988 if (!have_lease) 989 dout("fill_trace no dentry lease or dir cap\n"); 990 991 /* rename? */ 992 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 993 dout(" src %p '%.*s' dst %p '%.*s'\n", 994 req->r_old_dentry, 995 req->r_old_dentry->d_name.len, 996 req->r_old_dentry->d_name.name, 997 dn, dn->d_name.len, dn->d_name.name); 998 dout("fill_trace doing d_move %p -> %p\n", 999 req->r_old_dentry, dn); 1000 1001 /* d_move screws up d_subdirs order */ 1002 ceph_i_clear(dir, CEPH_I_COMPLETE); 1003 1004 d_move(req->r_old_dentry, dn); 1005 dout(" src %p '%.*s' dst %p '%.*s'\n", 1006 req->r_old_dentry, 1007 req->r_old_dentry->d_name.len, 1008 req->r_old_dentry->d_name.name, 1009 dn, dn->d_name.len, dn->d_name.name); 1010 /* ensure target dentry is invalidated, despite 1011 rehashing bug in vfs_rename_dir */ 1012 dn->d_time = jiffies; 1013 ceph_dentry(dn)->lease_shared_gen = 0; 1014 /* take overwritten dentry's readdir offset */ 1015 ceph_dentry(req->r_old_dentry)->offset = 1016 ceph_dentry(dn)->offset; 1017 dn = req->r_old_dentry; /* use old_dentry */ 1018 in = dn->d_inode; 1019 } 1020 1021 /* null dentry? */ 1022 if (!rinfo->head->is_target) { 1023 dout("fill_trace null dentry\n"); 1024 if (dn->d_inode) { 1025 dout("d_delete %p\n", dn); 1026 d_delete(dn); 1027 } else { 1028 dout("d_instantiate %p NULL\n", dn); 1029 d_instantiate(dn, NULL); 1030 if (have_lease && d_unhashed(dn)) 1031 d_rehash(dn); 1032 update_dentry_lease(dn, rinfo->dlease, 1033 session, 1034 req->r_request_started); 1035 } 1036 goto done; 1037 } 1038 1039 /* attach proper inode */ 1040 ininfo = rinfo->targeti.in; 1041 vino.ino = le64_to_cpu(ininfo->ino); 1042 vino.snap = le64_to_cpu(ininfo->snapid); 1043 if (!dn->d_inode) { 1044 in = ceph_get_inode(sb, vino); 1045 if (IS_ERR(in)) { 1046 pr_err("fill_trace bad get_inode " 1047 "%llx.%llx\n", vino.ino, vino.snap); 1048 err = PTR_ERR(in); 1049 d_delete(dn); 1050 goto done; 1051 } 1052 dn = splice_dentry(dn, in, &have_lease); 1053 if (IS_ERR(dn)) { 1054 err = PTR_ERR(dn); 1055 goto done; 1056 } 1057 req->r_dentry = dn; /* may have spliced */ 1058 ceph_set_dentry_offset(dn); 1059 igrab(in); 1060 } else if (ceph_ino(in) == vino.ino && 1061 ceph_snap(in) == vino.snap) { 1062 igrab(in); 1063 } else { 1064 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1065 dn, in, ceph_ino(in), ceph_snap(in), 1066 vino.ino, vino.snap); 1067 have_lease = false; 1068 in = NULL; 1069 } 1070 1071 if (have_lease) 1072 update_dentry_lease(dn, rinfo->dlease, session, 1073 req->r_request_started); 1074 dout(" final dn %p\n", dn); 1075 i++; 1076 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1077 req->r_op == CEPH_MDS_OP_MKSNAP) { 1078 struct dentry *dn = req->r_dentry; 1079 1080 /* fill out a snapdir LOOKUPSNAP dentry */ 1081 BUG_ON(!dn); 1082 BUG_ON(!req->r_locked_dir); 1083 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1084 ininfo = rinfo->targeti.in; 1085 vino.ino = le64_to_cpu(ininfo->ino); 1086 vino.snap = le64_to_cpu(ininfo->snapid); 1087 in = ceph_get_inode(sb, vino); 1088 if (IS_ERR(in)) { 1089 pr_err("fill_inode get_inode badness %llx.%llx\n", 1090 vino.ino, vino.snap); 1091 err = PTR_ERR(in); 1092 d_delete(dn); 1093 goto done; 1094 } 1095 dout(" linking snapped dir %p to dn %p\n", in, dn); 1096 dn = splice_dentry(dn, in, NULL); 1097 if (IS_ERR(dn)) { 1098 err = PTR_ERR(dn); 1099 goto done; 1100 } 1101 ceph_set_dentry_offset(dn); 1102 req->r_dentry = dn; /* may have spliced */ 1103 igrab(in); 1104 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1105 } 1106 1107 if (rinfo->head->is_target) { 1108 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1109 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1110 1111 if (in == NULL || ceph_ino(in) != vino.ino || 1112 ceph_snap(in) != vino.snap) { 1113 in = ceph_get_inode(sb, vino); 1114 if (IS_ERR(in)) { 1115 err = PTR_ERR(in); 1116 goto done; 1117 } 1118 } 1119 req->r_target_inode = in; 1120 1121 err = fill_inode(in, 1122 &rinfo->targeti, NULL, 1123 session, req->r_request_started, 1124 (le32_to_cpu(rinfo->head->result) == 0) ? 1125 req->r_fmode : -1, 1126 &req->r_caps_reservation); 1127 if (err < 0) { 1128 pr_err("fill_inode badness %p %llx.%llx\n", 1129 in, ceph_vinop(in)); 1130 goto done; 1131 } 1132 } 1133 1134 done: 1135 dout("fill_trace done err=%d\n", err); 1136 return err; 1137 } 1138 1139 /* 1140 * Prepopulate our cache with readdir results, leases, etc. 1141 */ 1142 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1143 struct ceph_mds_session *session) 1144 { 1145 struct dentry *parent = req->r_dentry; 1146 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1147 struct qstr dname; 1148 struct dentry *dn; 1149 struct inode *in; 1150 int err = 0, i; 1151 struct inode *snapdir = NULL; 1152 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1153 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1154 struct ceph_dentry_info *di; 1155 1156 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1157 snapdir = ceph_get_snapdir(parent->d_inode); 1158 parent = d_find_alias(snapdir); 1159 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1160 rinfo->dir_nr, parent); 1161 } else { 1162 dout("readdir_prepopulate %d items under dn %p\n", 1163 rinfo->dir_nr, parent); 1164 if (rinfo->dir_dir) 1165 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1166 } 1167 1168 for (i = 0; i < rinfo->dir_nr; i++) { 1169 struct ceph_vino vino; 1170 1171 dname.name = rinfo->dir_dname[i]; 1172 dname.len = rinfo->dir_dname_len[i]; 1173 dname.hash = full_name_hash(dname.name, dname.len); 1174 1175 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1176 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1177 1178 retry_lookup: 1179 dn = d_lookup(parent, &dname); 1180 dout("d_lookup on parent=%p name=%.*s got %p\n", 1181 parent, dname.len, dname.name, dn); 1182 1183 if (!dn) { 1184 dn = d_alloc(parent, &dname); 1185 dout("d_alloc %p '%.*s' = %p\n", parent, 1186 dname.len, dname.name, dn); 1187 if (dn == NULL) { 1188 dout("d_alloc badness\n"); 1189 err = -ENOMEM; 1190 goto out; 1191 } 1192 err = ceph_init_dentry(dn); 1193 if (err < 0) 1194 goto out; 1195 } else if (dn->d_inode && 1196 (ceph_ino(dn->d_inode) != vino.ino || 1197 ceph_snap(dn->d_inode) != vino.snap)) { 1198 dout(" dn %p points to wrong inode %p\n", 1199 dn, dn->d_inode); 1200 d_delete(dn); 1201 dput(dn); 1202 goto retry_lookup; 1203 } else { 1204 /* reorder parent's d_subdirs */ 1205 spin_lock(&dcache_lock); 1206 spin_lock(&dn->d_lock); 1207 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1208 spin_unlock(&dn->d_lock); 1209 spin_unlock(&dcache_lock); 1210 } 1211 1212 di = dn->d_fsdata; 1213 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1214 1215 /* inode */ 1216 if (dn->d_inode) { 1217 in = dn->d_inode; 1218 } else { 1219 in = ceph_get_inode(parent->d_sb, vino); 1220 if (in == NULL) { 1221 dout("new_inode badness\n"); 1222 d_delete(dn); 1223 dput(dn); 1224 err = -ENOMEM; 1225 goto out; 1226 } 1227 dn = splice_dentry(dn, in, NULL); 1228 } 1229 1230 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1231 req->r_request_started, -1, 1232 &req->r_caps_reservation) < 0) { 1233 pr_err("fill_inode badness on %p\n", in); 1234 dput(dn); 1235 continue; 1236 } 1237 update_dentry_lease(dn, rinfo->dir_dlease[i], 1238 req->r_session, req->r_request_started); 1239 dput(dn); 1240 } 1241 req->r_did_prepopulate = true; 1242 1243 out: 1244 if (snapdir) { 1245 iput(snapdir); 1246 dput(parent); 1247 } 1248 dout("readdir_prepopulate done\n"); 1249 return err; 1250 } 1251 1252 int ceph_inode_set_size(struct inode *inode, loff_t size) 1253 { 1254 struct ceph_inode_info *ci = ceph_inode(inode); 1255 int ret = 0; 1256 1257 spin_lock(&inode->i_lock); 1258 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1259 inode->i_size = size; 1260 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1261 1262 /* tell the MDS if we are approaching max_size */ 1263 if ((size << 1) >= ci->i_max_size && 1264 (ci->i_reported_size << 1) < ci->i_max_size) 1265 ret = 1; 1266 1267 spin_unlock(&inode->i_lock); 1268 return ret; 1269 } 1270 1271 /* 1272 * Write back inode data in a worker thread. (This can't be done 1273 * in the message handler context.) 1274 */ 1275 void ceph_queue_writeback(struct inode *inode) 1276 { 1277 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1278 &ceph_inode(inode)->i_wb_work)) { 1279 dout("ceph_queue_writeback %p\n", inode); 1280 igrab(inode); 1281 } else { 1282 dout("ceph_queue_writeback %p failed\n", inode); 1283 } 1284 } 1285 1286 static void ceph_writeback_work(struct work_struct *work) 1287 { 1288 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1289 i_wb_work); 1290 struct inode *inode = &ci->vfs_inode; 1291 1292 dout("writeback %p\n", inode); 1293 filemap_fdatawrite(&inode->i_data); 1294 iput(inode); 1295 } 1296 1297 /* 1298 * queue an async invalidation 1299 */ 1300 void ceph_queue_invalidate(struct inode *inode) 1301 { 1302 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1303 &ceph_inode(inode)->i_pg_inv_work)) { 1304 dout("ceph_queue_invalidate %p\n", inode); 1305 igrab(inode); 1306 } else { 1307 dout("ceph_queue_invalidate %p failed\n", inode); 1308 } 1309 } 1310 1311 /* 1312 * invalidate any pages that are not dirty or under writeback. this 1313 * includes pages that are clean and mapped. 1314 */ 1315 static void ceph_invalidate_nondirty_pages(struct address_space *mapping) 1316 { 1317 struct pagevec pvec; 1318 pgoff_t next = 0; 1319 int i; 1320 1321 pagevec_init(&pvec, 0); 1322 while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 1323 for (i = 0; i < pagevec_count(&pvec); i++) { 1324 struct page *page = pvec.pages[i]; 1325 pgoff_t index; 1326 int skip_page = 1327 (PageDirty(page) || PageWriteback(page)); 1328 1329 if (!skip_page) 1330 skip_page = !trylock_page(page); 1331 1332 /* 1333 * We really shouldn't be looking at the ->index of an 1334 * unlocked page. But we're not allowed to lock these 1335 * pages. So we rely upon nobody altering the ->index 1336 * of this (pinned-by-us) page. 1337 */ 1338 index = page->index; 1339 if (index > next) 1340 next = index; 1341 next++; 1342 1343 if (skip_page) 1344 continue; 1345 1346 generic_error_remove_page(mapping, page); 1347 unlock_page(page); 1348 } 1349 pagevec_release(&pvec); 1350 cond_resched(); 1351 } 1352 } 1353 1354 /* 1355 * Invalidate inode pages in a worker thread. (This can't be done 1356 * in the message handler context.) 1357 */ 1358 static void ceph_invalidate_work(struct work_struct *work) 1359 { 1360 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1361 i_pg_inv_work); 1362 struct inode *inode = &ci->vfs_inode; 1363 u32 orig_gen; 1364 int check = 0; 1365 1366 spin_lock(&inode->i_lock); 1367 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1368 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1369 if (ci->i_rdcache_gen == 0 || 1370 ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1371 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen); 1372 /* nevermind! */ 1373 ci->i_rdcache_revoking = 0; 1374 spin_unlock(&inode->i_lock); 1375 goto out; 1376 } 1377 orig_gen = ci->i_rdcache_gen; 1378 spin_unlock(&inode->i_lock); 1379 1380 ceph_invalidate_nondirty_pages(inode->i_mapping); 1381 1382 spin_lock(&inode->i_lock); 1383 if (orig_gen == ci->i_rdcache_gen) { 1384 dout("invalidate_pages %p gen %d successful\n", inode, 1385 ci->i_rdcache_gen); 1386 ci->i_rdcache_gen = 0; 1387 ci->i_rdcache_revoking = 0; 1388 check = 1; 1389 } else { 1390 dout("invalidate_pages %p gen %d raced, gen now %d\n", 1391 inode, orig_gen, ci->i_rdcache_gen); 1392 } 1393 spin_unlock(&inode->i_lock); 1394 1395 if (check) 1396 ceph_check_caps(ci, 0, NULL); 1397 out: 1398 iput(inode); 1399 } 1400 1401 1402 /* 1403 * called by trunc_wq; take i_mutex ourselves 1404 * 1405 * We also truncate in a separate thread as well. 1406 */ 1407 static void ceph_vmtruncate_work(struct work_struct *work) 1408 { 1409 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1410 i_vmtruncate_work); 1411 struct inode *inode = &ci->vfs_inode; 1412 1413 dout("vmtruncate_work %p\n", inode); 1414 mutex_lock(&inode->i_mutex); 1415 __ceph_do_pending_vmtruncate(inode); 1416 mutex_unlock(&inode->i_mutex); 1417 iput(inode); 1418 } 1419 1420 /* 1421 * Queue an async vmtruncate. If we fail to queue work, we will handle 1422 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1423 */ 1424 void ceph_queue_vmtruncate(struct inode *inode) 1425 { 1426 struct ceph_inode_info *ci = ceph_inode(inode); 1427 1428 if (queue_work(ceph_client(inode->i_sb)->trunc_wq, 1429 &ci->i_vmtruncate_work)) { 1430 dout("ceph_queue_vmtruncate %p\n", inode); 1431 igrab(inode); 1432 } else { 1433 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1434 inode, ci->i_truncate_pending); 1435 } 1436 } 1437 1438 /* 1439 * called with i_mutex held. 1440 * 1441 * Make sure any pending truncation is applied before doing anything 1442 * that may depend on it. 1443 */ 1444 void __ceph_do_pending_vmtruncate(struct inode *inode) 1445 { 1446 struct ceph_inode_info *ci = ceph_inode(inode); 1447 u64 to; 1448 int wrbuffer_refs, wake = 0; 1449 1450 retry: 1451 spin_lock(&inode->i_lock); 1452 if (ci->i_truncate_pending == 0) { 1453 dout("__do_pending_vmtruncate %p none pending\n", inode); 1454 spin_unlock(&inode->i_lock); 1455 return; 1456 } 1457 1458 /* 1459 * make sure any dirty snapped pages are flushed before we 1460 * possibly truncate them.. so write AND block! 1461 */ 1462 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1463 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1464 inode); 1465 spin_unlock(&inode->i_lock); 1466 filemap_write_and_wait_range(&inode->i_data, 0, 1467 inode->i_sb->s_maxbytes); 1468 goto retry; 1469 } 1470 1471 to = ci->i_truncate_size; 1472 wrbuffer_refs = ci->i_wrbuffer_ref; 1473 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1474 ci->i_truncate_pending, to); 1475 spin_unlock(&inode->i_lock); 1476 1477 truncate_inode_pages(inode->i_mapping, to); 1478 1479 spin_lock(&inode->i_lock); 1480 ci->i_truncate_pending--; 1481 if (ci->i_truncate_pending == 0) 1482 wake = 1; 1483 spin_unlock(&inode->i_lock); 1484 1485 if (wrbuffer_refs == 0) 1486 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1487 if (wake) 1488 wake_up(&ci->i_cap_wq); 1489 } 1490 1491 1492 /* 1493 * symlinks 1494 */ 1495 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1496 { 1497 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1498 nd_set_link(nd, ci->i_symlink); 1499 return NULL; 1500 } 1501 1502 static const struct inode_operations ceph_symlink_iops = { 1503 .readlink = generic_readlink, 1504 .follow_link = ceph_sym_follow_link, 1505 }; 1506 1507 /* 1508 * setattr 1509 */ 1510 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1511 { 1512 struct inode *inode = dentry->d_inode; 1513 struct ceph_inode_info *ci = ceph_inode(inode); 1514 struct inode *parent_inode = dentry->d_parent->d_inode; 1515 const unsigned int ia_valid = attr->ia_valid; 1516 struct ceph_mds_request *req; 1517 struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc; 1518 int issued; 1519 int release = 0, dirtied = 0; 1520 int mask = 0; 1521 int err = 0; 1522 1523 if (ceph_snap(inode) != CEPH_NOSNAP) 1524 return -EROFS; 1525 1526 __ceph_do_pending_vmtruncate(inode); 1527 1528 err = inode_change_ok(inode, attr); 1529 if (err != 0) 1530 return err; 1531 1532 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1533 USE_AUTH_MDS); 1534 if (IS_ERR(req)) 1535 return PTR_ERR(req); 1536 1537 spin_lock(&inode->i_lock); 1538 issued = __ceph_caps_issued(ci, NULL); 1539 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1540 1541 if (ia_valid & ATTR_UID) { 1542 dout("setattr %p uid %d -> %d\n", inode, 1543 inode->i_uid, attr->ia_uid); 1544 if (issued & CEPH_CAP_AUTH_EXCL) { 1545 inode->i_uid = attr->ia_uid; 1546 dirtied |= CEPH_CAP_AUTH_EXCL; 1547 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1548 attr->ia_uid != inode->i_uid) { 1549 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); 1550 mask |= CEPH_SETATTR_UID; 1551 release |= CEPH_CAP_AUTH_SHARED; 1552 } 1553 } 1554 if (ia_valid & ATTR_GID) { 1555 dout("setattr %p gid %d -> %d\n", inode, 1556 inode->i_gid, attr->ia_gid); 1557 if (issued & CEPH_CAP_AUTH_EXCL) { 1558 inode->i_gid = attr->ia_gid; 1559 dirtied |= CEPH_CAP_AUTH_EXCL; 1560 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1561 attr->ia_gid != inode->i_gid) { 1562 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); 1563 mask |= CEPH_SETATTR_GID; 1564 release |= CEPH_CAP_AUTH_SHARED; 1565 } 1566 } 1567 if (ia_valid & ATTR_MODE) { 1568 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1569 attr->ia_mode); 1570 if (issued & CEPH_CAP_AUTH_EXCL) { 1571 inode->i_mode = attr->ia_mode; 1572 dirtied |= CEPH_CAP_AUTH_EXCL; 1573 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1574 attr->ia_mode != inode->i_mode) { 1575 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1576 mask |= CEPH_SETATTR_MODE; 1577 release |= CEPH_CAP_AUTH_SHARED; 1578 } 1579 } 1580 1581 if (ia_valid & ATTR_ATIME) { 1582 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1583 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1584 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1585 if (issued & CEPH_CAP_FILE_EXCL) { 1586 ci->i_time_warp_seq++; 1587 inode->i_atime = attr->ia_atime; 1588 dirtied |= CEPH_CAP_FILE_EXCL; 1589 } else if ((issued & CEPH_CAP_FILE_WR) && 1590 timespec_compare(&inode->i_atime, 1591 &attr->ia_atime) < 0) { 1592 inode->i_atime = attr->ia_atime; 1593 dirtied |= CEPH_CAP_FILE_WR; 1594 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1595 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1596 ceph_encode_timespec(&req->r_args.setattr.atime, 1597 &attr->ia_atime); 1598 mask |= CEPH_SETATTR_ATIME; 1599 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1600 CEPH_CAP_FILE_WR; 1601 } 1602 } 1603 if (ia_valid & ATTR_MTIME) { 1604 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1605 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1606 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1607 if (issued & CEPH_CAP_FILE_EXCL) { 1608 ci->i_time_warp_seq++; 1609 inode->i_mtime = attr->ia_mtime; 1610 dirtied |= CEPH_CAP_FILE_EXCL; 1611 } else if ((issued & CEPH_CAP_FILE_WR) && 1612 timespec_compare(&inode->i_mtime, 1613 &attr->ia_mtime) < 0) { 1614 inode->i_mtime = attr->ia_mtime; 1615 dirtied |= CEPH_CAP_FILE_WR; 1616 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1617 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1618 ceph_encode_timespec(&req->r_args.setattr.mtime, 1619 &attr->ia_mtime); 1620 mask |= CEPH_SETATTR_MTIME; 1621 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1622 CEPH_CAP_FILE_WR; 1623 } 1624 } 1625 if (ia_valid & ATTR_SIZE) { 1626 dout("setattr %p size %lld -> %lld\n", inode, 1627 inode->i_size, attr->ia_size); 1628 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1629 err = -EINVAL; 1630 goto out; 1631 } 1632 if ((issued & CEPH_CAP_FILE_EXCL) && 1633 attr->ia_size > inode->i_size) { 1634 inode->i_size = attr->ia_size; 1635 inode->i_blocks = 1636 (attr->ia_size + (1 << 9) - 1) >> 9; 1637 inode->i_ctime = attr->ia_ctime; 1638 ci->i_reported_size = attr->ia_size; 1639 dirtied |= CEPH_CAP_FILE_EXCL; 1640 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1641 attr->ia_size != inode->i_size) { 1642 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1643 req->r_args.setattr.old_size = 1644 cpu_to_le64(inode->i_size); 1645 mask |= CEPH_SETATTR_SIZE; 1646 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1647 CEPH_CAP_FILE_WR; 1648 } 1649 } 1650 1651 /* these do nothing */ 1652 if (ia_valid & ATTR_CTIME) { 1653 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1654 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1655 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1656 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1657 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1658 only ? "ctime only" : "ignored"); 1659 inode->i_ctime = attr->ia_ctime; 1660 if (only) { 1661 /* 1662 * if kernel wants to dirty ctime but nothing else, 1663 * we need to choose a cap to dirty under, or do 1664 * a almost-no-op setattr 1665 */ 1666 if (issued & CEPH_CAP_AUTH_EXCL) 1667 dirtied |= CEPH_CAP_AUTH_EXCL; 1668 else if (issued & CEPH_CAP_FILE_EXCL) 1669 dirtied |= CEPH_CAP_FILE_EXCL; 1670 else if (issued & CEPH_CAP_XATTR_EXCL) 1671 dirtied |= CEPH_CAP_XATTR_EXCL; 1672 else 1673 mask |= CEPH_SETATTR_CTIME; 1674 } 1675 } 1676 if (ia_valid & ATTR_FILE) 1677 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1678 1679 if (dirtied) { 1680 __ceph_mark_dirty_caps(ci, dirtied); 1681 inode->i_ctime = CURRENT_TIME; 1682 } 1683 1684 release &= issued; 1685 spin_unlock(&inode->i_lock); 1686 1687 if (mask) { 1688 req->r_inode = igrab(inode); 1689 req->r_inode_drop = release; 1690 req->r_args.setattr.mask = cpu_to_le32(mask); 1691 req->r_num_caps = 1; 1692 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1693 } 1694 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1695 ceph_cap_string(dirtied), mask); 1696 1697 ceph_mdsc_put_request(req); 1698 __ceph_do_pending_vmtruncate(inode); 1699 return err; 1700 out: 1701 spin_unlock(&inode->i_lock); 1702 ceph_mdsc_put_request(req); 1703 return err; 1704 } 1705 1706 /* 1707 * Verify that we have a lease on the given mask. If not, 1708 * do a getattr against an mds. 1709 */ 1710 int ceph_do_getattr(struct inode *inode, int mask) 1711 { 1712 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 1713 struct ceph_mds_client *mdsc = &client->mdsc; 1714 struct ceph_mds_request *req; 1715 int err; 1716 1717 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1718 dout("do_getattr inode %p SNAPDIR\n", inode); 1719 return 0; 1720 } 1721 1722 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); 1723 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1724 return 0; 1725 1726 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1727 if (IS_ERR(req)) 1728 return PTR_ERR(req); 1729 req->r_inode = igrab(inode); 1730 req->r_num_caps = 1; 1731 req->r_args.getattr.mask = cpu_to_le32(mask); 1732 err = ceph_mdsc_do_request(mdsc, NULL, req); 1733 ceph_mdsc_put_request(req); 1734 dout("do_getattr result=%d\n", err); 1735 return err; 1736 } 1737 1738 1739 /* 1740 * Check inode permissions. We verify we have a valid value for 1741 * the AUTH cap, then call the generic handler. 1742 */ 1743 int ceph_permission(struct inode *inode, int mask) 1744 { 1745 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1746 1747 if (!err) 1748 err = generic_permission(inode, mask, NULL); 1749 return err; 1750 } 1751 1752 /* 1753 * Get all attributes. Hopefully somedata we'll have a statlite() 1754 * and can limit the fields we require to be accurate. 1755 */ 1756 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1757 struct kstat *stat) 1758 { 1759 struct inode *inode = dentry->d_inode; 1760 struct ceph_inode_info *ci = ceph_inode(inode); 1761 int err; 1762 1763 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1764 if (!err) { 1765 generic_fillattr(inode, stat); 1766 stat->ino = inode->i_ino; 1767 if (ceph_snap(inode) != CEPH_NOSNAP) 1768 stat->dev = ceph_snap(inode); 1769 else 1770 stat->dev = 0; 1771 if (S_ISDIR(inode->i_mode)) { 1772 stat->size = ci->i_rbytes; 1773 stat->blocks = 0; 1774 stat->blksize = 65536; 1775 } 1776 } 1777 return err; 1778 } 1779