1 #include "ceph_debug.h" 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/smp_lock.h> 6 #include <linux/slab.h> 7 #include <linux/string.h> 8 #include <linux/uaccess.h> 9 #include <linux/kernel.h> 10 #include <linux/namei.h> 11 #include <linux/writeback.h> 12 #include <linux/vmalloc.h> 13 14 #include "super.h" 15 #include "decode.h" 16 17 /* 18 * Ceph inode operations 19 * 20 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 21 * setattr, etc.), xattr helpers, and helpers for assimilating 22 * metadata returned by the MDS into our cache. 23 * 24 * Also define helpers for doing asynchronous writeback, invalidation, 25 * and truncation for the benefit of those who can't afford to block 26 * (typically because they are in the message handler path). 27 */ 28 29 static const struct inode_operations ceph_symlink_iops; 30 31 static void ceph_inode_invalidate_pages(struct work_struct *work); 32 33 /* 34 * find or create an inode, given the ceph ino number 35 */ 36 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 37 { 38 struct inode *inode; 39 ino_t t = ceph_vino_to_ino(vino); 40 41 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 42 if (inode == NULL) 43 return ERR_PTR(-ENOMEM); 44 if (inode->i_state & I_NEW) { 45 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 46 inode, ceph_vinop(inode), (u64)inode->i_ino); 47 unlock_new_inode(inode); 48 } 49 50 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 51 vino.snap, inode); 52 return inode; 53 } 54 55 /* 56 * get/constuct snapdir inode for a given directory 57 */ 58 struct inode *ceph_get_snapdir(struct inode *parent) 59 { 60 struct ceph_vino vino = { 61 .ino = ceph_ino(parent), 62 .snap = CEPH_SNAPDIR, 63 }; 64 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 65 66 BUG_ON(!S_ISDIR(parent->i_mode)); 67 if (IS_ERR(inode)) 68 return ERR_PTR(PTR_ERR(inode)); 69 inode->i_mode = parent->i_mode; 70 inode->i_uid = parent->i_uid; 71 inode->i_gid = parent->i_gid; 72 inode->i_op = &ceph_dir_iops; 73 inode->i_fop = &ceph_dir_fops; 74 ceph_inode(inode)->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 75 return inode; 76 } 77 78 const struct inode_operations ceph_file_iops = { 79 .permission = ceph_permission, 80 .setattr = ceph_setattr, 81 .getattr = ceph_getattr, 82 .setxattr = ceph_setxattr, 83 .getxattr = ceph_getxattr, 84 .listxattr = ceph_listxattr, 85 .removexattr = ceph_removexattr, 86 }; 87 88 89 /* 90 * We use a 'frag tree' to keep track of the MDS's directory fragments 91 * for a given inode (usually there is just a single fragment). We 92 * need to know when a child frag is delegated to a new MDS, or when 93 * it is flagged as replicated, so we can direct our requests 94 * accordingly. 95 */ 96 97 /* 98 * find/create a frag in the tree 99 */ 100 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 101 u32 f) 102 { 103 struct rb_node **p; 104 struct rb_node *parent = NULL; 105 struct ceph_inode_frag *frag; 106 int c; 107 108 p = &ci->i_fragtree.rb_node; 109 while (*p) { 110 parent = *p; 111 frag = rb_entry(parent, struct ceph_inode_frag, node); 112 c = ceph_frag_compare(f, frag->frag); 113 if (c < 0) 114 p = &(*p)->rb_left; 115 else if (c > 0) 116 p = &(*p)->rb_right; 117 else 118 return frag; 119 } 120 121 frag = kmalloc(sizeof(*frag), GFP_NOFS); 122 if (!frag) { 123 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 124 "frag %x\n", &ci->vfs_inode, 125 ceph_vinop(&ci->vfs_inode), f); 126 return ERR_PTR(-ENOMEM); 127 } 128 frag->frag = f; 129 frag->split_by = 0; 130 frag->mds = -1; 131 frag->ndist = 0; 132 133 rb_link_node(&frag->node, parent, p); 134 rb_insert_color(&frag->node, &ci->i_fragtree); 135 136 dout("get_or_create_frag added %llx.%llx frag %x\n", 137 ceph_vinop(&ci->vfs_inode), f); 138 return frag; 139 } 140 141 /* 142 * find a specific frag @f 143 */ 144 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 145 { 146 struct rb_node *n = ci->i_fragtree.rb_node; 147 148 while (n) { 149 struct ceph_inode_frag *frag = 150 rb_entry(n, struct ceph_inode_frag, node); 151 int c = ceph_frag_compare(f, frag->frag); 152 if (c < 0) 153 n = n->rb_left; 154 else if (c > 0) 155 n = n->rb_right; 156 else 157 return frag; 158 } 159 return NULL; 160 } 161 162 /* 163 * Choose frag containing the given value @v. If @pfrag is 164 * specified, copy the frag delegation info to the caller if 165 * it is present. 166 */ 167 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 168 struct ceph_inode_frag *pfrag, 169 int *found) 170 { 171 u32 t = ceph_frag_make(0, 0); 172 struct ceph_inode_frag *frag; 173 unsigned nway, i; 174 u32 n; 175 176 if (found) 177 *found = 0; 178 179 mutex_lock(&ci->i_fragtree_mutex); 180 while (1) { 181 WARN_ON(!ceph_frag_contains_value(t, v)); 182 frag = __ceph_find_frag(ci, t); 183 if (!frag) 184 break; /* t is a leaf */ 185 if (frag->split_by == 0) { 186 if (pfrag) 187 memcpy(pfrag, frag, sizeof(*pfrag)); 188 if (found) 189 *found = 1; 190 break; 191 } 192 193 /* choose child */ 194 nway = 1 << frag->split_by; 195 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 196 frag->split_by, nway); 197 for (i = 0; i < nway; i++) { 198 n = ceph_frag_make_child(t, frag->split_by, i); 199 if (ceph_frag_contains_value(n, v)) { 200 t = n; 201 break; 202 } 203 } 204 BUG_ON(i == nway); 205 } 206 dout("choose_frag(%x) = %x\n", v, t); 207 208 mutex_unlock(&ci->i_fragtree_mutex); 209 return t; 210 } 211 212 /* 213 * Process dirfrag (delegation) info from the mds. Include leaf 214 * fragment in tree ONLY if ndist > 0. Otherwise, only 215 * branches/splits are included in i_fragtree) 216 */ 217 static int ceph_fill_dirfrag(struct inode *inode, 218 struct ceph_mds_reply_dirfrag *dirinfo) 219 { 220 struct ceph_inode_info *ci = ceph_inode(inode); 221 struct ceph_inode_frag *frag; 222 u32 id = le32_to_cpu(dirinfo->frag); 223 int mds = le32_to_cpu(dirinfo->auth); 224 int ndist = le32_to_cpu(dirinfo->ndist); 225 int i; 226 int err = 0; 227 228 mutex_lock(&ci->i_fragtree_mutex); 229 if (ndist == 0) { 230 /* no delegation info needed. */ 231 frag = __ceph_find_frag(ci, id); 232 if (!frag) 233 goto out; 234 if (frag->split_by == 0) { 235 /* tree leaf, remove */ 236 dout("fill_dirfrag removed %llx.%llx frag %x" 237 " (no ref)\n", ceph_vinop(inode), id); 238 rb_erase(&frag->node, &ci->i_fragtree); 239 kfree(frag); 240 } else { 241 /* tree branch, keep and clear */ 242 dout("fill_dirfrag cleared %llx.%llx frag %x" 243 " referral\n", ceph_vinop(inode), id); 244 frag->mds = -1; 245 frag->ndist = 0; 246 } 247 goto out; 248 } 249 250 251 /* find/add this frag to store mds delegation info */ 252 frag = __get_or_create_frag(ci, id); 253 if (IS_ERR(frag)) { 254 /* this is not the end of the world; we can continue 255 with bad/inaccurate delegation info */ 256 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 257 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 258 err = -ENOMEM; 259 goto out; 260 } 261 262 frag->mds = mds; 263 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 264 for (i = 0; i < frag->ndist; i++) 265 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 266 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 267 ceph_vinop(inode), frag->frag, frag->ndist); 268 269 out: 270 mutex_unlock(&ci->i_fragtree_mutex); 271 return err; 272 } 273 274 275 /* 276 * initialize a newly allocated inode. 277 */ 278 struct inode *ceph_alloc_inode(struct super_block *sb) 279 { 280 struct ceph_inode_info *ci; 281 int i; 282 283 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 284 if (!ci) 285 return NULL; 286 287 dout("alloc_inode %p\n", &ci->vfs_inode); 288 289 ci->i_version = 0; 290 ci->i_time_warp_seq = 0; 291 ci->i_ceph_flags = 0; 292 ci->i_release_count = 0; 293 ci->i_symlink = NULL; 294 295 ci->i_fragtree = RB_ROOT; 296 mutex_init(&ci->i_fragtree_mutex); 297 298 ci->i_xattrs.blob = NULL; 299 ci->i_xattrs.prealloc_blob = NULL; 300 ci->i_xattrs.dirty = false; 301 ci->i_xattrs.index = RB_ROOT; 302 ci->i_xattrs.count = 0; 303 ci->i_xattrs.names_size = 0; 304 ci->i_xattrs.vals_size = 0; 305 ci->i_xattrs.version = 0; 306 ci->i_xattrs.index_version = 0; 307 308 ci->i_caps = RB_ROOT; 309 ci->i_auth_cap = NULL; 310 ci->i_dirty_caps = 0; 311 ci->i_flushing_caps = 0; 312 INIT_LIST_HEAD(&ci->i_dirty_item); 313 INIT_LIST_HEAD(&ci->i_flushing_item); 314 ci->i_cap_flush_seq = 0; 315 ci->i_cap_flush_last_tid = 0; 316 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 317 init_waitqueue_head(&ci->i_cap_wq); 318 ci->i_hold_caps_min = 0; 319 ci->i_hold_caps_max = 0; 320 INIT_LIST_HEAD(&ci->i_cap_delay_list); 321 ci->i_cap_exporting_mds = 0; 322 ci->i_cap_exporting_mseq = 0; 323 ci->i_cap_exporting_issued = 0; 324 INIT_LIST_HEAD(&ci->i_cap_snaps); 325 ci->i_head_snapc = NULL; 326 ci->i_snap_caps = 0; 327 328 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 329 ci->i_nr_by_mode[i] = 0; 330 331 ci->i_truncate_seq = 0; 332 ci->i_truncate_size = 0; 333 ci->i_truncate_pending = 0; 334 335 ci->i_max_size = 0; 336 ci->i_reported_size = 0; 337 ci->i_wanted_max_size = 0; 338 ci->i_requested_max_size = 0; 339 340 ci->i_pin_ref = 0; 341 ci->i_rd_ref = 0; 342 ci->i_rdcache_ref = 0; 343 ci->i_wr_ref = 0; 344 ci->i_wrbuffer_ref = 0; 345 ci->i_wrbuffer_ref_head = 0; 346 ci->i_shared_gen = 0; 347 ci->i_rdcache_gen = 0; 348 ci->i_rdcache_revoking = 0; 349 350 INIT_LIST_HEAD(&ci->i_unsafe_writes); 351 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 352 spin_lock_init(&ci->i_unsafe_lock); 353 354 ci->i_snap_realm = NULL; 355 INIT_LIST_HEAD(&ci->i_snap_realm_item); 356 INIT_LIST_HEAD(&ci->i_snap_flush_item); 357 358 INIT_WORK(&ci->i_wb_work, ceph_inode_writeback); 359 INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages); 360 361 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 362 363 return &ci->vfs_inode; 364 } 365 366 void ceph_destroy_inode(struct inode *inode) 367 { 368 struct ceph_inode_info *ci = ceph_inode(inode); 369 struct ceph_inode_frag *frag; 370 struct rb_node *n; 371 372 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 373 374 ceph_queue_caps_release(inode); 375 376 kfree(ci->i_symlink); 377 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 378 frag = rb_entry(n, struct ceph_inode_frag, node); 379 rb_erase(n, &ci->i_fragtree); 380 kfree(frag); 381 } 382 383 __ceph_destroy_xattrs(ci); 384 ceph_buffer_put(ci->i_xattrs.blob); 385 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 386 387 kmem_cache_free(ceph_inode_cachep, ci); 388 } 389 390 391 /* 392 * Helpers to fill in size, ctime, mtime, and atime. We have to be 393 * careful because either the client or MDS may have more up to date 394 * info, depending on which capabilities are held, and whether 395 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 396 * and size are monotonically increasing, except when utimes() or 397 * truncate() increments the corresponding _seq values.) 398 */ 399 int ceph_fill_file_size(struct inode *inode, int issued, 400 u32 truncate_seq, u64 truncate_size, u64 size) 401 { 402 struct ceph_inode_info *ci = ceph_inode(inode); 403 int queue_trunc = 0; 404 405 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 406 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 407 dout("size %lld -> %llu\n", inode->i_size, size); 408 inode->i_size = size; 409 inode->i_blocks = (size + (1<<9) - 1) >> 9; 410 ci->i_reported_size = size; 411 if (truncate_seq != ci->i_truncate_seq) { 412 dout("truncate_seq %u -> %u\n", 413 ci->i_truncate_seq, truncate_seq); 414 ci->i_truncate_seq = truncate_seq; 415 if (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD| 416 CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER| 417 CEPH_CAP_FILE_EXCL)) { 418 ci->i_truncate_pending++; 419 queue_trunc = 1; 420 } 421 } 422 } 423 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 424 ci->i_truncate_size != truncate_size) { 425 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 426 truncate_size); 427 ci->i_truncate_size = truncate_size; 428 } 429 return queue_trunc; 430 } 431 432 void ceph_fill_file_time(struct inode *inode, int issued, 433 u64 time_warp_seq, struct timespec *ctime, 434 struct timespec *mtime, struct timespec *atime) 435 { 436 struct ceph_inode_info *ci = ceph_inode(inode); 437 int warn = 0; 438 439 if (issued & (CEPH_CAP_FILE_EXCL| 440 CEPH_CAP_FILE_WR| 441 CEPH_CAP_FILE_BUFFER)) { 442 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 443 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 444 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 445 ctime->tv_sec, ctime->tv_nsec); 446 inode->i_ctime = *ctime; 447 } 448 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 449 /* the MDS did a utimes() */ 450 dout("mtime %ld.%09ld -> %ld.%09ld " 451 "tw %d -> %d\n", 452 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 453 mtime->tv_sec, mtime->tv_nsec, 454 ci->i_time_warp_seq, (int)time_warp_seq); 455 456 inode->i_mtime = *mtime; 457 inode->i_atime = *atime; 458 ci->i_time_warp_seq = time_warp_seq; 459 } else if (time_warp_seq == ci->i_time_warp_seq) { 460 /* nobody did utimes(); take the max */ 461 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 462 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 463 inode->i_mtime.tv_sec, 464 inode->i_mtime.tv_nsec, 465 mtime->tv_sec, mtime->tv_nsec); 466 inode->i_mtime = *mtime; 467 } 468 if (timespec_compare(atime, &inode->i_atime) > 0) { 469 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 470 inode->i_atime.tv_sec, 471 inode->i_atime.tv_nsec, 472 atime->tv_sec, atime->tv_nsec); 473 inode->i_atime = *atime; 474 } 475 } else if (issued & CEPH_CAP_FILE_EXCL) { 476 /* we did a utimes(); ignore mds values */ 477 } else { 478 warn = 1; 479 } 480 } else { 481 /* we have no write caps; whatever the MDS says is true */ 482 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 483 inode->i_ctime = *ctime; 484 inode->i_mtime = *mtime; 485 inode->i_atime = *atime; 486 ci->i_time_warp_seq = time_warp_seq; 487 } else { 488 warn = 1; 489 } 490 } 491 if (warn) /* time_warp_seq shouldn't go backwards */ 492 dout("%p mds time_warp_seq %llu < %u\n", 493 inode, time_warp_seq, ci->i_time_warp_seq); 494 } 495 496 /* 497 * Populate an inode based on info from mds. May be called on new or 498 * existing inodes. 499 */ 500 static int fill_inode(struct inode *inode, 501 struct ceph_mds_reply_info_in *iinfo, 502 struct ceph_mds_reply_dirfrag *dirinfo, 503 struct ceph_mds_session *session, 504 unsigned long ttl_from, int cap_fmode, 505 struct ceph_cap_reservation *caps_reservation) 506 { 507 struct ceph_mds_reply_inode *info = iinfo->in; 508 struct ceph_inode_info *ci = ceph_inode(inode); 509 int i; 510 int issued, implemented; 511 struct timespec mtime, atime, ctime; 512 u32 nsplits; 513 struct ceph_buffer *xattr_blob = NULL; 514 int err = 0; 515 int queue_trunc = 0; 516 517 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 518 inode, ceph_vinop(inode), le64_to_cpu(info->version), 519 ci->i_version); 520 521 /* 522 * prealloc xattr data, if it looks like we'll need it. only 523 * if len > 4 (meaning there are actually xattrs; the first 4 524 * bytes are the xattr count). 525 */ 526 if (iinfo->xattr_len > 4) { 527 xattr_blob = ceph_buffer_new_alloc(iinfo->xattr_len, GFP_NOFS); 528 if (!xattr_blob) 529 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 530 iinfo->xattr_len); 531 } 532 533 spin_lock(&inode->i_lock); 534 535 /* 536 * provided version will be odd if inode value is projected, 537 * even if stable. skip the update if we have a newer info 538 * (e.g., due to inode info racing form multiple MDSs), or if 539 * we are getting projected (unstable) inode info. 540 */ 541 if (le64_to_cpu(info->version) > 0 && 542 (ci->i_version & ~1) > le64_to_cpu(info->version)) 543 goto no_change; 544 545 issued = __ceph_caps_issued(ci, &implemented); 546 issued |= implemented | __ceph_caps_dirty(ci); 547 548 /* update inode */ 549 ci->i_version = le64_to_cpu(info->version); 550 inode->i_version++; 551 inode->i_rdev = le32_to_cpu(info->rdev); 552 553 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 554 inode->i_mode = le32_to_cpu(info->mode); 555 inode->i_uid = le32_to_cpu(info->uid); 556 inode->i_gid = le32_to_cpu(info->gid); 557 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 558 inode->i_uid, inode->i_gid); 559 } 560 561 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 562 inode->i_nlink = le32_to_cpu(info->nlink); 563 564 /* be careful with mtime, atime, size */ 565 ceph_decode_timespec(&atime, &info->atime); 566 ceph_decode_timespec(&mtime, &info->mtime); 567 ceph_decode_timespec(&ctime, &info->ctime); 568 queue_trunc = ceph_fill_file_size(inode, issued, 569 le32_to_cpu(info->truncate_seq), 570 le64_to_cpu(info->truncate_size), 571 le64_to_cpu(info->size)); 572 ceph_fill_file_time(inode, issued, 573 le32_to_cpu(info->time_warp_seq), 574 &ctime, &mtime, &atime); 575 576 ci->i_max_size = le64_to_cpu(info->max_size); 577 ci->i_layout = info->layout; 578 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 579 580 /* xattrs */ 581 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 582 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 583 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 584 if (ci->i_xattrs.blob) 585 ceph_buffer_put(ci->i_xattrs.blob); 586 ci->i_xattrs.blob = xattr_blob; 587 if (xattr_blob) 588 memcpy(ci->i_xattrs.blob->vec.iov_base, 589 iinfo->xattr_data, iinfo->xattr_len); 590 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 591 } 592 593 inode->i_mapping->a_ops = &ceph_aops; 594 inode->i_mapping->backing_dev_info = 595 &ceph_client(inode->i_sb)->backing_dev_info; 596 597 switch (inode->i_mode & S_IFMT) { 598 case S_IFIFO: 599 case S_IFBLK: 600 case S_IFCHR: 601 case S_IFSOCK: 602 init_special_inode(inode, inode->i_mode, inode->i_rdev); 603 inode->i_op = &ceph_file_iops; 604 break; 605 case S_IFREG: 606 inode->i_op = &ceph_file_iops; 607 inode->i_fop = &ceph_file_fops; 608 break; 609 case S_IFLNK: 610 inode->i_op = &ceph_symlink_iops; 611 if (!ci->i_symlink) { 612 int symlen = iinfo->symlink_len; 613 char *sym; 614 615 BUG_ON(symlen != inode->i_size); 616 spin_unlock(&inode->i_lock); 617 618 err = -ENOMEM; 619 sym = kmalloc(symlen+1, GFP_NOFS); 620 if (!sym) 621 goto out; 622 memcpy(sym, iinfo->symlink, symlen); 623 sym[symlen] = 0; 624 625 spin_lock(&inode->i_lock); 626 if (!ci->i_symlink) 627 ci->i_symlink = sym; 628 else 629 kfree(sym); /* lost a race */ 630 } 631 break; 632 case S_IFDIR: 633 inode->i_op = &ceph_dir_iops; 634 inode->i_fop = &ceph_dir_fops; 635 636 ci->i_files = le64_to_cpu(info->files); 637 ci->i_subdirs = le64_to_cpu(info->subdirs); 638 ci->i_rbytes = le64_to_cpu(info->rbytes); 639 ci->i_rfiles = le64_to_cpu(info->rfiles); 640 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 641 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 642 643 /* set dir completion flag? */ 644 if (ci->i_files == 0 && ci->i_subdirs == 0 && 645 ceph_snap(inode) == CEPH_NOSNAP && 646 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) { 647 dout(" marking %p complete (empty)\n", inode); 648 ci->i_ceph_flags |= CEPH_I_COMPLETE; 649 ci->i_max_offset = 2; 650 } 651 652 /* it may be better to set st_size in getattr instead? */ 653 if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES)) 654 inode->i_size = ci->i_rbytes; 655 break; 656 default: 657 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 658 ceph_vinop(inode), inode->i_mode); 659 } 660 661 no_change: 662 spin_unlock(&inode->i_lock); 663 664 /* queue truncate if we saw i_size decrease */ 665 if (queue_trunc) 666 if (queue_work(ceph_client(inode->i_sb)->trunc_wq, 667 &ci->i_vmtruncate_work)) 668 igrab(inode); 669 670 /* populate frag tree */ 671 /* FIXME: move me up, if/when version reflects fragtree changes */ 672 nsplits = le32_to_cpu(info->fragtree.nsplits); 673 mutex_lock(&ci->i_fragtree_mutex); 674 for (i = 0; i < nsplits; i++) { 675 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 676 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 677 678 if (IS_ERR(frag)) 679 continue; 680 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 681 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 682 } 683 mutex_unlock(&ci->i_fragtree_mutex); 684 685 /* were we issued a capability? */ 686 if (info->cap.caps) { 687 if (ceph_snap(inode) == CEPH_NOSNAP) { 688 ceph_add_cap(inode, session, 689 le64_to_cpu(info->cap.cap_id), 690 cap_fmode, 691 le32_to_cpu(info->cap.caps), 692 le32_to_cpu(info->cap.wanted), 693 le32_to_cpu(info->cap.seq), 694 le32_to_cpu(info->cap.mseq), 695 le64_to_cpu(info->cap.realm), 696 info->cap.flags, 697 caps_reservation); 698 } else { 699 spin_lock(&inode->i_lock); 700 dout(" %p got snap_caps %s\n", inode, 701 ceph_cap_string(le32_to_cpu(info->cap.caps))); 702 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 703 if (cap_fmode >= 0) 704 __ceph_get_fmode(ci, cap_fmode); 705 spin_unlock(&inode->i_lock); 706 } 707 } 708 709 /* update delegation info? */ 710 if (dirinfo) 711 ceph_fill_dirfrag(inode, dirinfo); 712 713 err = 0; 714 715 out: 716 ceph_buffer_put(xattr_blob); 717 return err; 718 } 719 720 /* 721 * caller should hold session s_mutex. 722 */ 723 static void update_dentry_lease(struct dentry *dentry, 724 struct ceph_mds_reply_lease *lease, 725 struct ceph_mds_session *session, 726 unsigned long from_time) 727 { 728 struct ceph_dentry_info *di = ceph_dentry(dentry); 729 long unsigned duration = le32_to_cpu(lease->duration_ms); 730 long unsigned ttl = from_time + (duration * HZ) / 1000; 731 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 732 struct inode *dir; 733 734 /* only track leases on regular dentries */ 735 if (dentry->d_op != &ceph_dentry_ops) 736 return; 737 738 spin_lock(&dentry->d_lock); 739 dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n", 740 dentry, le16_to_cpu(lease->mask), duration, ttl); 741 742 /* make lease_rdcache_gen match directory */ 743 dir = dentry->d_parent->d_inode; 744 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 745 746 if (lease->mask == 0) 747 goto out_unlock; 748 749 if (di->lease_gen == session->s_cap_gen && 750 time_before(ttl, dentry->d_time)) 751 goto out_unlock; /* we already have a newer lease. */ 752 753 if (di->lease_session && di->lease_session != session) 754 goto out_unlock; 755 756 ceph_dentry_lru_touch(dentry); 757 758 if (!di->lease_session) 759 di->lease_session = ceph_get_mds_session(session); 760 di->lease_gen = session->s_cap_gen; 761 di->lease_seq = le32_to_cpu(lease->seq); 762 di->lease_renew_after = half_ttl; 763 di->lease_renew_from = 0; 764 dentry->d_time = ttl; 765 out_unlock: 766 spin_unlock(&dentry->d_lock); 767 return; 768 } 769 770 /* 771 * splice a dentry to an inode. 772 * caller must hold directory i_mutex for this to be safe. 773 * 774 * we will only rehash the resulting dentry if @prehash is 775 * true; @prehash will be set to false (for the benefit of 776 * the caller) if we fail. 777 */ 778 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 779 bool *prehash) 780 { 781 struct dentry *realdn; 782 783 /* dn must be unhashed */ 784 if (!d_unhashed(dn)) 785 d_drop(dn); 786 realdn = d_materialise_unique(dn, in); 787 if (IS_ERR(realdn)) { 788 pr_err("splice_dentry error %p inode %p ino %llx.%llx\n", 789 dn, in, ceph_vinop(in)); 790 if (prehash) 791 *prehash = false; /* don't rehash on error */ 792 dn = realdn; /* note realdn contains the error */ 793 goto out; 794 } else if (realdn) { 795 dout("dn %p (%d) spliced with %p (%d) " 796 "inode %p ino %llx.%llx\n", 797 dn, atomic_read(&dn->d_count), 798 realdn, atomic_read(&realdn->d_count), 799 realdn->d_inode, ceph_vinop(realdn->d_inode)); 800 dput(dn); 801 dn = realdn; 802 } else { 803 BUG_ON(!ceph_dentry(dn)); 804 805 dout("dn %p attached to %p ino %llx.%llx\n", 806 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 807 } 808 if ((!prehash || *prehash) && d_unhashed(dn)) 809 d_rehash(dn); 810 out: 811 return dn; 812 } 813 814 /* 815 * Incorporate results into the local cache. This is either just 816 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 817 * after a lookup). 818 * 819 * A reply may contain 820 * a directory inode along with a dentry. 821 * and/or a target inode 822 * 823 * Called with snap_rwsem (read). 824 */ 825 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 826 struct ceph_mds_session *session) 827 { 828 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 829 struct inode *in = NULL; 830 struct ceph_mds_reply_inode *ininfo; 831 struct ceph_vino vino; 832 int i = 0; 833 int err = 0; 834 835 dout("fill_trace %p is_dentry %d is_target %d\n", req, 836 rinfo->head->is_dentry, rinfo->head->is_target); 837 838 #if 0 839 /* 840 * Debugging hook: 841 * 842 * If we resend completed ops to a recovering mds, we get no 843 * trace. Since that is very rare, pretend this is the case 844 * to ensure the 'no trace' handlers in the callers behave. 845 * 846 * Fill in inodes unconditionally to avoid breaking cap 847 * invariants. 848 */ 849 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 850 pr_info("fill_trace faking empty trace on %lld %s\n", 851 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 852 if (rinfo->head->is_dentry) { 853 rinfo->head->is_dentry = 0; 854 err = fill_inode(req->r_locked_dir, 855 &rinfo->diri, rinfo->dirfrag, 856 session, req->r_request_started, -1); 857 } 858 if (rinfo->head->is_target) { 859 rinfo->head->is_target = 0; 860 ininfo = rinfo->targeti.in; 861 vino.ino = le64_to_cpu(ininfo->ino); 862 vino.snap = le64_to_cpu(ininfo->snapid); 863 in = ceph_get_inode(sb, vino); 864 err = fill_inode(in, &rinfo->targeti, NULL, 865 session, req->r_request_started, 866 req->r_fmode); 867 iput(in); 868 } 869 } 870 #endif 871 872 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 873 dout("fill_trace reply is empty!\n"); 874 if (rinfo->head->result == 0 && req->r_locked_dir) { 875 struct ceph_inode_info *ci = 876 ceph_inode(req->r_locked_dir); 877 dout(" clearing %p complete (empty trace)\n", 878 req->r_locked_dir); 879 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 880 ci->i_release_count++; 881 } 882 return 0; 883 } 884 885 if (rinfo->head->is_dentry) { 886 /* 887 * lookup link rename : null -> possibly existing inode 888 * mknod symlink mkdir : null -> new inode 889 * unlink : linked -> null 890 */ 891 struct inode *dir = req->r_locked_dir; 892 struct dentry *dn = req->r_dentry; 893 bool have_dir_cap, have_lease; 894 895 BUG_ON(!dn); 896 BUG_ON(!dir); 897 BUG_ON(dn->d_parent->d_inode != dir); 898 BUG_ON(ceph_ino(dir) != 899 le64_to_cpu(rinfo->diri.in->ino)); 900 BUG_ON(ceph_snap(dir) != 901 le64_to_cpu(rinfo->diri.in->snapid)); 902 903 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 904 session, req->r_request_started, -1, 905 &req->r_caps_reservation); 906 if (err < 0) 907 return err; 908 909 /* do we have a lease on the whole dir? */ 910 have_dir_cap = 911 (le32_to_cpu(rinfo->diri.in->cap.caps) & 912 CEPH_CAP_FILE_SHARED); 913 914 /* do we have a dn lease? */ 915 have_lease = have_dir_cap || 916 (le16_to_cpu(rinfo->dlease->mask) & 917 CEPH_LOCK_DN); 918 919 if (!have_lease) 920 dout("fill_trace no dentry lease or dir cap\n"); 921 922 /* rename? */ 923 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 924 dout(" src %p '%.*s' dst %p '%.*s'\n", 925 req->r_old_dentry, 926 req->r_old_dentry->d_name.len, 927 req->r_old_dentry->d_name.name, 928 dn, dn->d_name.len, dn->d_name.name); 929 dout("fill_trace doing d_move %p -> %p\n", 930 req->r_old_dentry, dn); 931 d_move(req->r_old_dentry, dn); 932 dout(" src %p '%.*s' dst %p '%.*s'\n", 933 req->r_old_dentry, 934 req->r_old_dentry->d_name.len, 935 req->r_old_dentry->d_name.name, 936 dn, dn->d_name.len, dn->d_name.name); 937 /* take overwritten dentry's readdir offset */ 938 ceph_dentry(req->r_old_dentry)->offset = 939 ceph_dentry(dn)->offset; 940 dn = req->r_old_dentry; /* use old_dentry */ 941 in = dn->d_inode; 942 } 943 944 /* null dentry? */ 945 if (!rinfo->head->is_target) { 946 dout("fill_trace null dentry\n"); 947 if (dn->d_inode) { 948 dout("d_delete %p\n", dn); 949 d_delete(dn); 950 } else { 951 dout("d_instantiate %p NULL\n", dn); 952 d_instantiate(dn, NULL); 953 if (have_lease && d_unhashed(dn)) 954 d_rehash(dn); 955 update_dentry_lease(dn, rinfo->dlease, 956 session, 957 req->r_request_started); 958 } 959 goto done; 960 } 961 962 /* attach proper inode */ 963 ininfo = rinfo->targeti.in; 964 vino.ino = le64_to_cpu(ininfo->ino); 965 vino.snap = le64_to_cpu(ininfo->snapid); 966 if (!dn->d_inode) { 967 in = ceph_get_inode(sb, vino); 968 if (IS_ERR(in)) { 969 pr_err("fill_trace bad get_inode " 970 "%llx.%llx\n", vino.ino, vino.snap); 971 err = PTR_ERR(in); 972 d_delete(dn); 973 goto done; 974 } 975 dn = splice_dentry(dn, in, &have_lease); 976 if (IS_ERR(dn)) { 977 err = PTR_ERR(dn); 978 goto done; 979 } 980 req->r_dentry = dn; /* may have spliced */ 981 igrab(in); 982 } else if (ceph_ino(in) == vino.ino && 983 ceph_snap(in) == vino.snap) { 984 igrab(in); 985 } else { 986 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 987 dn, in, ceph_ino(in), ceph_snap(in), 988 vino.ino, vino.snap); 989 have_lease = false; 990 in = NULL; 991 } 992 993 if (have_lease) 994 update_dentry_lease(dn, rinfo->dlease, session, 995 req->r_request_started); 996 dout(" final dn %p\n", dn); 997 i++; 998 } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 999 req->r_op == CEPH_MDS_OP_MKSNAP) { 1000 struct dentry *dn = req->r_dentry; 1001 1002 /* fill out a snapdir LOOKUPSNAP dentry */ 1003 BUG_ON(!dn); 1004 BUG_ON(!req->r_locked_dir); 1005 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1006 ininfo = rinfo->targeti.in; 1007 vino.ino = le64_to_cpu(ininfo->ino); 1008 vino.snap = le64_to_cpu(ininfo->snapid); 1009 in = ceph_get_inode(sb, vino); 1010 if (IS_ERR(in)) { 1011 pr_err("fill_inode get_inode badness %llx.%llx\n", 1012 vino.ino, vino.snap); 1013 err = PTR_ERR(in); 1014 d_delete(dn); 1015 goto done; 1016 } 1017 dout(" linking snapped dir %p to dn %p\n", in, dn); 1018 dn = splice_dentry(dn, in, NULL); 1019 if (IS_ERR(dn)) { 1020 err = PTR_ERR(dn); 1021 goto done; 1022 } 1023 req->r_dentry = dn; /* may have spliced */ 1024 igrab(in); 1025 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1026 } 1027 1028 if (rinfo->head->is_target) { 1029 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1030 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1031 1032 if (in == NULL || ceph_ino(in) != vino.ino || 1033 ceph_snap(in) != vino.snap) { 1034 in = ceph_get_inode(sb, vino); 1035 if (IS_ERR(in)) { 1036 err = PTR_ERR(in); 1037 goto done; 1038 } 1039 } 1040 req->r_target_inode = in; 1041 1042 err = fill_inode(in, 1043 &rinfo->targeti, NULL, 1044 session, req->r_request_started, 1045 (le32_to_cpu(rinfo->head->result) == 0) ? 1046 req->r_fmode : -1, 1047 &req->r_caps_reservation); 1048 if (err < 0) { 1049 pr_err("fill_inode badness %p %llx.%llx\n", 1050 in, ceph_vinop(in)); 1051 goto done; 1052 } 1053 } 1054 1055 done: 1056 dout("fill_trace done err=%d\n", err); 1057 return err; 1058 } 1059 1060 /* 1061 * Prepopulate our cache with readdir results, leases, etc. 1062 */ 1063 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1064 struct ceph_mds_session *session) 1065 { 1066 struct dentry *parent = req->r_dentry; 1067 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1068 struct qstr dname; 1069 struct dentry *dn; 1070 struct inode *in; 1071 int err = 0, i; 1072 struct inode *snapdir = NULL; 1073 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1074 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1075 struct ceph_dentry_info *di; 1076 1077 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1078 snapdir = ceph_get_snapdir(parent->d_inode); 1079 parent = d_find_alias(snapdir); 1080 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1081 rinfo->dir_nr, parent); 1082 } else { 1083 dout("readdir_prepopulate %d items under dn %p\n", 1084 rinfo->dir_nr, parent); 1085 if (rinfo->dir_dir) 1086 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1087 } 1088 1089 for (i = 0; i < rinfo->dir_nr; i++) { 1090 struct ceph_vino vino; 1091 1092 dname.name = rinfo->dir_dname[i]; 1093 dname.len = rinfo->dir_dname_len[i]; 1094 dname.hash = full_name_hash(dname.name, dname.len); 1095 1096 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1097 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1098 1099 retry_lookup: 1100 dn = d_lookup(parent, &dname); 1101 dout("d_lookup on parent=%p name=%.*s got %p\n", 1102 parent, dname.len, dname.name, dn); 1103 1104 if (!dn) { 1105 dn = d_alloc(parent, &dname); 1106 dout("d_alloc %p '%.*s' = %p\n", parent, 1107 dname.len, dname.name, dn); 1108 if (dn == NULL) { 1109 dout("d_alloc badness\n"); 1110 err = -ENOMEM; 1111 goto out; 1112 } 1113 err = ceph_init_dentry(dn); 1114 if (err < 0) 1115 goto out; 1116 } else if (dn->d_inode && 1117 (ceph_ino(dn->d_inode) != vino.ino || 1118 ceph_snap(dn->d_inode) != vino.snap)) { 1119 dout(" dn %p points to wrong inode %p\n", 1120 dn, dn->d_inode); 1121 d_delete(dn); 1122 dput(dn); 1123 goto retry_lookup; 1124 } else { 1125 /* reorder parent's d_subdirs */ 1126 spin_lock(&dcache_lock); 1127 spin_lock(&dn->d_lock); 1128 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1129 spin_unlock(&dn->d_lock); 1130 spin_unlock(&dcache_lock); 1131 } 1132 1133 di = dn->d_fsdata; 1134 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1135 1136 /* inode */ 1137 if (dn->d_inode) { 1138 in = dn->d_inode; 1139 } else { 1140 in = ceph_get_inode(parent->d_sb, vino); 1141 if (in == NULL) { 1142 dout("new_inode badness\n"); 1143 d_delete(dn); 1144 dput(dn); 1145 err = -ENOMEM; 1146 goto out; 1147 } 1148 dn = splice_dentry(dn, in, NULL); 1149 } 1150 1151 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1152 req->r_request_started, -1, 1153 &req->r_caps_reservation) < 0) { 1154 pr_err("fill_inode badness on %p\n", in); 1155 dput(dn); 1156 continue; 1157 } 1158 update_dentry_lease(dn, rinfo->dir_dlease[i], 1159 req->r_session, req->r_request_started); 1160 dput(dn); 1161 } 1162 req->r_did_prepopulate = true; 1163 1164 out: 1165 if (snapdir) { 1166 iput(snapdir); 1167 dput(parent); 1168 } 1169 dout("readdir_prepopulate done\n"); 1170 return err; 1171 } 1172 1173 int ceph_inode_set_size(struct inode *inode, loff_t size) 1174 { 1175 struct ceph_inode_info *ci = ceph_inode(inode); 1176 int ret = 0; 1177 1178 spin_lock(&inode->i_lock); 1179 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1180 inode->i_size = size; 1181 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1182 1183 /* tell the MDS if we are approaching max_size */ 1184 if ((size << 1) >= ci->i_max_size && 1185 (ci->i_reported_size << 1) < ci->i_max_size) 1186 ret = 1; 1187 1188 spin_unlock(&inode->i_lock); 1189 return ret; 1190 } 1191 1192 /* 1193 * Write back inode data in a worker thread. (This can't be done 1194 * in the message handler context.) 1195 */ 1196 void ceph_inode_writeback(struct work_struct *work) 1197 { 1198 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1199 i_wb_work); 1200 struct inode *inode = &ci->vfs_inode; 1201 1202 dout("writeback %p\n", inode); 1203 filemap_fdatawrite(&inode->i_data); 1204 iput(inode); 1205 } 1206 1207 /* 1208 * Invalidate inode pages in a worker thread. (This can't be done 1209 * in the message handler context.) 1210 */ 1211 static void ceph_inode_invalidate_pages(struct work_struct *work) 1212 { 1213 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1214 i_pg_inv_work); 1215 struct inode *inode = &ci->vfs_inode; 1216 u32 orig_gen; 1217 int check = 0; 1218 1219 spin_lock(&inode->i_lock); 1220 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1221 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1222 if (ci->i_rdcache_gen == 0 || 1223 ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1224 BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen); 1225 /* nevermind! */ 1226 ci->i_rdcache_revoking = 0; 1227 spin_unlock(&inode->i_lock); 1228 goto out; 1229 } 1230 orig_gen = ci->i_rdcache_gen; 1231 spin_unlock(&inode->i_lock); 1232 1233 truncate_inode_pages(&inode->i_data, 0); 1234 1235 spin_lock(&inode->i_lock); 1236 if (orig_gen == ci->i_rdcache_gen) { 1237 dout("invalidate_pages %p gen %d successful\n", inode, 1238 ci->i_rdcache_gen); 1239 ci->i_rdcache_gen = 0; 1240 ci->i_rdcache_revoking = 0; 1241 check = 1; 1242 } else { 1243 dout("invalidate_pages %p gen %d raced, gen now %d\n", 1244 inode, orig_gen, ci->i_rdcache_gen); 1245 } 1246 spin_unlock(&inode->i_lock); 1247 1248 if (check) 1249 ceph_check_caps(ci, 0, NULL); 1250 out: 1251 iput(inode); 1252 } 1253 1254 1255 /* 1256 * called by trunc_wq; take i_mutex ourselves 1257 * 1258 * We also truncate in a separate thread as well. 1259 */ 1260 void ceph_vmtruncate_work(struct work_struct *work) 1261 { 1262 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1263 i_vmtruncate_work); 1264 struct inode *inode = &ci->vfs_inode; 1265 1266 dout("vmtruncate_work %p\n", inode); 1267 mutex_lock(&inode->i_mutex); 1268 __ceph_do_pending_vmtruncate(inode); 1269 mutex_unlock(&inode->i_mutex); 1270 iput(inode); 1271 } 1272 1273 /* 1274 * called with i_mutex held. 1275 * 1276 * Make sure any pending truncation is applied before doing anything 1277 * that may depend on it. 1278 */ 1279 void __ceph_do_pending_vmtruncate(struct inode *inode) 1280 { 1281 struct ceph_inode_info *ci = ceph_inode(inode); 1282 u64 to; 1283 int wrbuffer_refs, wake = 0; 1284 1285 retry: 1286 spin_lock(&inode->i_lock); 1287 if (ci->i_truncate_pending == 0) { 1288 dout("__do_pending_vmtruncate %p none pending\n", inode); 1289 spin_unlock(&inode->i_lock); 1290 return; 1291 } 1292 1293 /* 1294 * make sure any dirty snapped pages are flushed before we 1295 * possibly truncate them.. so write AND block! 1296 */ 1297 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1298 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1299 inode); 1300 spin_unlock(&inode->i_lock); 1301 filemap_write_and_wait_range(&inode->i_data, 0, 1302 inode->i_sb->s_maxbytes); 1303 goto retry; 1304 } 1305 1306 to = ci->i_truncate_size; 1307 wrbuffer_refs = ci->i_wrbuffer_ref; 1308 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1309 ci->i_truncate_pending, to); 1310 spin_unlock(&inode->i_lock); 1311 1312 truncate_inode_pages(inode->i_mapping, to); 1313 1314 spin_lock(&inode->i_lock); 1315 ci->i_truncate_pending--; 1316 if (ci->i_truncate_pending == 0) 1317 wake = 1; 1318 spin_unlock(&inode->i_lock); 1319 1320 if (wrbuffer_refs == 0) 1321 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1322 if (wake) 1323 wake_up(&ci->i_cap_wq); 1324 } 1325 1326 1327 /* 1328 * symlinks 1329 */ 1330 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1331 { 1332 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1333 nd_set_link(nd, ci->i_symlink); 1334 return NULL; 1335 } 1336 1337 static const struct inode_operations ceph_symlink_iops = { 1338 .readlink = generic_readlink, 1339 .follow_link = ceph_sym_follow_link, 1340 }; 1341 1342 /* 1343 * setattr 1344 */ 1345 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1346 { 1347 struct inode *inode = dentry->d_inode; 1348 struct ceph_inode_info *ci = ceph_inode(inode); 1349 struct inode *parent_inode = dentry->d_parent->d_inode; 1350 const unsigned int ia_valid = attr->ia_valid; 1351 struct ceph_mds_request *req; 1352 struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc; 1353 int issued; 1354 int release = 0, dirtied = 0; 1355 int mask = 0; 1356 int err = 0; 1357 int queue_trunc = 0; 1358 1359 if (ceph_snap(inode) != CEPH_NOSNAP) 1360 return -EROFS; 1361 1362 __ceph_do_pending_vmtruncate(inode); 1363 1364 err = inode_change_ok(inode, attr); 1365 if (err != 0) 1366 return err; 1367 1368 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1369 USE_AUTH_MDS); 1370 if (IS_ERR(req)) 1371 return PTR_ERR(req); 1372 1373 spin_lock(&inode->i_lock); 1374 issued = __ceph_caps_issued(ci, NULL); 1375 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1376 1377 if (ia_valid & ATTR_UID) { 1378 dout("setattr %p uid %d -> %d\n", inode, 1379 inode->i_uid, attr->ia_uid); 1380 if (issued & CEPH_CAP_AUTH_EXCL) { 1381 inode->i_uid = attr->ia_uid; 1382 dirtied |= CEPH_CAP_AUTH_EXCL; 1383 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1384 attr->ia_uid != inode->i_uid) { 1385 req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid); 1386 mask |= CEPH_SETATTR_UID; 1387 release |= CEPH_CAP_AUTH_SHARED; 1388 } 1389 } 1390 if (ia_valid & ATTR_GID) { 1391 dout("setattr %p gid %d -> %d\n", inode, 1392 inode->i_gid, attr->ia_gid); 1393 if (issued & CEPH_CAP_AUTH_EXCL) { 1394 inode->i_gid = attr->ia_gid; 1395 dirtied |= CEPH_CAP_AUTH_EXCL; 1396 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1397 attr->ia_gid != inode->i_gid) { 1398 req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid); 1399 mask |= CEPH_SETATTR_GID; 1400 release |= CEPH_CAP_AUTH_SHARED; 1401 } 1402 } 1403 if (ia_valid & ATTR_MODE) { 1404 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1405 attr->ia_mode); 1406 if (issued & CEPH_CAP_AUTH_EXCL) { 1407 inode->i_mode = attr->ia_mode; 1408 dirtied |= CEPH_CAP_AUTH_EXCL; 1409 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1410 attr->ia_mode != inode->i_mode) { 1411 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1412 mask |= CEPH_SETATTR_MODE; 1413 release |= CEPH_CAP_AUTH_SHARED; 1414 } 1415 } 1416 1417 if (ia_valid & ATTR_ATIME) { 1418 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1419 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1420 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1421 if (issued & CEPH_CAP_FILE_EXCL) { 1422 ci->i_time_warp_seq++; 1423 inode->i_atime = attr->ia_atime; 1424 dirtied |= CEPH_CAP_FILE_EXCL; 1425 } else if ((issued & CEPH_CAP_FILE_WR) && 1426 timespec_compare(&inode->i_atime, 1427 &attr->ia_atime) < 0) { 1428 inode->i_atime = attr->ia_atime; 1429 dirtied |= CEPH_CAP_FILE_WR; 1430 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1431 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1432 ceph_encode_timespec(&req->r_args.setattr.atime, 1433 &attr->ia_atime); 1434 mask |= CEPH_SETATTR_ATIME; 1435 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1436 CEPH_CAP_FILE_WR; 1437 } 1438 } 1439 if (ia_valid & ATTR_MTIME) { 1440 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1441 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1442 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1443 if (issued & CEPH_CAP_FILE_EXCL) { 1444 ci->i_time_warp_seq++; 1445 inode->i_mtime = attr->ia_mtime; 1446 dirtied |= CEPH_CAP_FILE_EXCL; 1447 } else if ((issued & CEPH_CAP_FILE_WR) && 1448 timespec_compare(&inode->i_mtime, 1449 &attr->ia_mtime) < 0) { 1450 inode->i_mtime = attr->ia_mtime; 1451 dirtied |= CEPH_CAP_FILE_WR; 1452 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1453 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1454 ceph_encode_timespec(&req->r_args.setattr.mtime, 1455 &attr->ia_mtime); 1456 mask |= CEPH_SETATTR_MTIME; 1457 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1458 CEPH_CAP_FILE_WR; 1459 } 1460 } 1461 if (ia_valid & ATTR_SIZE) { 1462 dout("setattr %p size %lld -> %lld\n", inode, 1463 inode->i_size, attr->ia_size); 1464 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1465 err = -EINVAL; 1466 goto out; 1467 } 1468 if ((issued & CEPH_CAP_FILE_EXCL) && 1469 attr->ia_size > inode->i_size) { 1470 inode->i_size = attr->ia_size; 1471 if (attr->ia_size < inode->i_size) { 1472 ci->i_truncate_size = attr->ia_size; 1473 ci->i_truncate_pending++; 1474 queue_trunc = 1; 1475 } 1476 inode->i_blocks = 1477 (attr->ia_size + (1 << 9) - 1) >> 9; 1478 inode->i_ctime = attr->ia_ctime; 1479 ci->i_reported_size = attr->ia_size; 1480 dirtied |= CEPH_CAP_FILE_EXCL; 1481 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1482 attr->ia_size != inode->i_size) { 1483 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1484 req->r_args.setattr.old_size = 1485 cpu_to_le64(inode->i_size); 1486 mask |= CEPH_SETATTR_SIZE; 1487 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1488 CEPH_CAP_FILE_WR; 1489 } 1490 } 1491 1492 /* these do nothing */ 1493 if (ia_valid & ATTR_CTIME) { 1494 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1495 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1496 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1497 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1498 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1499 only ? "ctime only" : "ignored"); 1500 inode->i_ctime = attr->ia_ctime; 1501 if (only) { 1502 /* 1503 * if kernel wants to dirty ctime but nothing else, 1504 * we need to choose a cap to dirty under, or do 1505 * a almost-no-op setattr 1506 */ 1507 if (issued & CEPH_CAP_AUTH_EXCL) 1508 dirtied |= CEPH_CAP_AUTH_EXCL; 1509 else if (issued & CEPH_CAP_FILE_EXCL) 1510 dirtied |= CEPH_CAP_FILE_EXCL; 1511 else if (issued & CEPH_CAP_XATTR_EXCL) 1512 dirtied |= CEPH_CAP_XATTR_EXCL; 1513 else 1514 mask |= CEPH_SETATTR_CTIME; 1515 } 1516 } 1517 if (ia_valid & ATTR_FILE) 1518 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1519 1520 if (dirtied) { 1521 __ceph_mark_dirty_caps(ci, dirtied); 1522 inode->i_ctime = CURRENT_TIME; 1523 } 1524 1525 release &= issued; 1526 spin_unlock(&inode->i_lock); 1527 1528 if (queue_trunc) 1529 __ceph_do_pending_vmtruncate(inode); 1530 1531 if (mask) { 1532 req->r_inode = igrab(inode); 1533 req->r_inode_drop = release; 1534 req->r_args.setattr.mask = cpu_to_le32(mask); 1535 req->r_num_caps = 1; 1536 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1537 } 1538 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1539 ceph_cap_string(dirtied), mask); 1540 1541 ceph_mdsc_put_request(req); 1542 __ceph_do_pending_vmtruncate(inode); 1543 return err; 1544 out: 1545 spin_unlock(&inode->i_lock); 1546 ceph_mdsc_put_request(req); 1547 return err; 1548 } 1549 1550 /* 1551 * Verify that we have a lease on the given mask. If not, 1552 * do a getattr against an mds. 1553 */ 1554 int ceph_do_getattr(struct inode *inode, int mask) 1555 { 1556 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 1557 struct ceph_mds_client *mdsc = &client->mdsc; 1558 struct ceph_mds_request *req; 1559 int err; 1560 1561 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1562 dout("do_getattr inode %p SNAPDIR\n", inode); 1563 return 0; 1564 } 1565 1566 dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask)); 1567 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1568 return 0; 1569 1570 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1571 if (IS_ERR(req)) 1572 return PTR_ERR(req); 1573 req->r_inode = igrab(inode); 1574 req->r_num_caps = 1; 1575 req->r_args.getattr.mask = cpu_to_le32(mask); 1576 err = ceph_mdsc_do_request(mdsc, NULL, req); 1577 ceph_mdsc_put_request(req); 1578 dout("do_getattr result=%d\n", err); 1579 return err; 1580 } 1581 1582 1583 /* 1584 * Check inode permissions. We verify we have a valid value for 1585 * the AUTH cap, then call the generic handler. 1586 */ 1587 int ceph_permission(struct inode *inode, int mask) 1588 { 1589 int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1590 1591 if (!err) 1592 err = generic_permission(inode, mask, NULL); 1593 return err; 1594 } 1595 1596 /* 1597 * Get all attributes. Hopefully somedata we'll have a statlite() 1598 * and can limit the fields we require to be accurate. 1599 */ 1600 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1601 struct kstat *stat) 1602 { 1603 struct inode *inode = dentry->d_inode; 1604 struct ceph_inode_info *ci = ceph_inode(inode); 1605 int err; 1606 1607 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1608 if (!err) { 1609 generic_fillattr(inode, stat); 1610 stat->ino = inode->i_ino; 1611 if (ceph_snap(inode) != CEPH_NOSNAP) 1612 stat->dev = ceph_snap(inode); 1613 else 1614 stat->dev = 0; 1615 if (S_ISDIR(inode->i_mode)) { 1616 stat->size = ci->i_rbytes; 1617 stat->blocks = 0; 1618 stat->blksize = 65536; 1619 } 1620 } 1621 return err; 1622 } 1623