1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/namei.h> 10 #include <linux/writeback.h> 11 #include <linux/vmalloc.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 #include "cache.h" 16 #include <linux/ceph/decode.h> 17 18 /* 19 * Ceph inode operations 20 * 21 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 22 * setattr, etc.), xattr helpers, and helpers for assimilating 23 * metadata returned by the MDS into our cache. 24 * 25 * Also define helpers for doing asynchronous writeback, invalidation, 26 * and truncation for the benefit of those who can't afford to block 27 * (typically because they are in the message handler path). 28 */ 29 30 static const struct inode_operations ceph_symlink_iops; 31 32 static void ceph_invalidate_work(struct work_struct *work); 33 static void ceph_writeback_work(struct work_struct *work); 34 static void ceph_vmtruncate_work(struct work_struct *work); 35 36 /* 37 * find or create an inode, given the ceph ino number 38 */ 39 static int ceph_set_ino_cb(struct inode *inode, void *data) 40 { 41 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 42 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); 43 return 0; 44 } 45 46 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 47 { 48 struct inode *inode; 49 ino_t t = ceph_vino_to_ino(vino); 50 51 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 52 if (inode == NULL) 53 return ERR_PTR(-ENOMEM); 54 if (inode->i_state & I_NEW) { 55 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 56 inode, ceph_vinop(inode), (u64)inode->i_ino); 57 unlock_new_inode(inode); 58 } 59 60 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 61 vino.snap, inode); 62 return inode; 63 } 64 65 /* 66 * get/constuct snapdir inode for a given directory 67 */ 68 struct inode *ceph_get_snapdir(struct inode *parent) 69 { 70 struct ceph_vino vino = { 71 .ino = ceph_ino(parent), 72 .snap = CEPH_SNAPDIR, 73 }; 74 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 75 struct ceph_inode_info *ci = ceph_inode(inode); 76 77 BUG_ON(!S_ISDIR(parent->i_mode)); 78 if (IS_ERR(inode)) 79 return inode; 80 inode->i_mode = parent->i_mode; 81 inode->i_uid = parent->i_uid; 82 inode->i_gid = parent->i_gid; 83 inode->i_op = &ceph_dir_iops; 84 inode->i_fop = &ceph_dir_fops; 85 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 86 ci->i_rbytes = 0; 87 return inode; 88 } 89 90 const struct inode_operations ceph_file_iops = { 91 .permission = ceph_permission, 92 .setattr = ceph_setattr, 93 .getattr = ceph_getattr, 94 .setxattr = ceph_setxattr, 95 .getxattr = ceph_getxattr, 96 .listxattr = ceph_listxattr, 97 .removexattr = ceph_removexattr, 98 }; 99 100 101 /* 102 * We use a 'frag tree' to keep track of the MDS's directory fragments 103 * for a given inode (usually there is just a single fragment). We 104 * need to know when a child frag is delegated to a new MDS, or when 105 * it is flagged as replicated, so we can direct our requests 106 * accordingly. 107 */ 108 109 /* 110 * find/create a frag in the tree 111 */ 112 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 113 u32 f) 114 { 115 struct rb_node **p; 116 struct rb_node *parent = NULL; 117 struct ceph_inode_frag *frag; 118 int c; 119 120 p = &ci->i_fragtree.rb_node; 121 while (*p) { 122 parent = *p; 123 frag = rb_entry(parent, struct ceph_inode_frag, node); 124 c = ceph_frag_compare(f, frag->frag); 125 if (c < 0) 126 p = &(*p)->rb_left; 127 else if (c > 0) 128 p = &(*p)->rb_right; 129 else 130 return frag; 131 } 132 133 frag = kmalloc(sizeof(*frag), GFP_NOFS); 134 if (!frag) { 135 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 136 "frag %x\n", &ci->vfs_inode, 137 ceph_vinop(&ci->vfs_inode), f); 138 return ERR_PTR(-ENOMEM); 139 } 140 frag->frag = f; 141 frag->split_by = 0; 142 frag->mds = -1; 143 frag->ndist = 0; 144 145 rb_link_node(&frag->node, parent, p); 146 rb_insert_color(&frag->node, &ci->i_fragtree); 147 148 dout("get_or_create_frag added %llx.%llx frag %x\n", 149 ceph_vinop(&ci->vfs_inode), f); 150 return frag; 151 } 152 153 /* 154 * find a specific frag @f 155 */ 156 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 157 { 158 struct rb_node *n = ci->i_fragtree.rb_node; 159 160 while (n) { 161 struct ceph_inode_frag *frag = 162 rb_entry(n, struct ceph_inode_frag, node); 163 int c = ceph_frag_compare(f, frag->frag); 164 if (c < 0) 165 n = n->rb_left; 166 else if (c > 0) 167 n = n->rb_right; 168 else 169 return frag; 170 } 171 return NULL; 172 } 173 174 /* 175 * Choose frag containing the given value @v. If @pfrag is 176 * specified, copy the frag delegation info to the caller if 177 * it is present. 178 */ 179 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 180 struct ceph_inode_frag *pfrag, 181 int *found) 182 { 183 u32 t = ceph_frag_make(0, 0); 184 struct ceph_inode_frag *frag; 185 unsigned nway, i; 186 u32 n; 187 188 if (found) 189 *found = 0; 190 191 mutex_lock(&ci->i_fragtree_mutex); 192 while (1) { 193 WARN_ON(!ceph_frag_contains_value(t, v)); 194 frag = __ceph_find_frag(ci, t); 195 if (!frag) 196 break; /* t is a leaf */ 197 if (frag->split_by == 0) { 198 if (pfrag) 199 memcpy(pfrag, frag, sizeof(*pfrag)); 200 if (found) 201 *found = 1; 202 break; 203 } 204 205 /* choose child */ 206 nway = 1 << frag->split_by; 207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 208 frag->split_by, nway); 209 for (i = 0; i < nway; i++) { 210 n = ceph_frag_make_child(t, frag->split_by, i); 211 if (ceph_frag_contains_value(n, v)) { 212 t = n; 213 break; 214 } 215 } 216 BUG_ON(i == nway); 217 } 218 dout("choose_frag(%x) = %x\n", v, t); 219 220 mutex_unlock(&ci->i_fragtree_mutex); 221 return t; 222 } 223 224 /* 225 * Process dirfrag (delegation) info from the mds. Include leaf 226 * fragment in tree ONLY if ndist > 0. Otherwise, only 227 * branches/splits are included in i_fragtree) 228 */ 229 static int ceph_fill_dirfrag(struct inode *inode, 230 struct ceph_mds_reply_dirfrag *dirinfo) 231 { 232 struct ceph_inode_info *ci = ceph_inode(inode); 233 struct ceph_inode_frag *frag; 234 u32 id = le32_to_cpu(dirinfo->frag); 235 int mds = le32_to_cpu(dirinfo->auth); 236 int ndist = le32_to_cpu(dirinfo->ndist); 237 int i; 238 int err = 0; 239 240 mutex_lock(&ci->i_fragtree_mutex); 241 if (ndist == 0) { 242 /* no delegation info needed. */ 243 frag = __ceph_find_frag(ci, id); 244 if (!frag) 245 goto out; 246 if (frag->split_by == 0) { 247 /* tree leaf, remove */ 248 dout("fill_dirfrag removed %llx.%llx frag %x" 249 " (no ref)\n", ceph_vinop(inode), id); 250 rb_erase(&frag->node, &ci->i_fragtree); 251 kfree(frag); 252 } else { 253 /* tree branch, keep and clear */ 254 dout("fill_dirfrag cleared %llx.%llx frag %x" 255 " referral\n", ceph_vinop(inode), id); 256 frag->mds = -1; 257 frag->ndist = 0; 258 } 259 goto out; 260 } 261 262 263 /* find/add this frag to store mds delegation info */ 264 frag = __get_or_create_frag(ci, id); 265 if (IS_ERR(frag)) { 266 /* this is not the end of the world; we can continue 267 with bad/inaccurate delegation info */ 268 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 269 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 270 err = -ENOMEM; 271 goto out; 272 } 273 274 frag->mds = mds; 275 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 276 for (i = 0; i < frag->ndist; i++) 277 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 278 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 279 ceph_vinop(inode), frag->frag, frag->ndist); 280 281 out: 282 mutex_unlock(&ci->i_fragtree_mutex); 283 return err; 284 } 285 286 287 /* 288 * initialize a newly allocated inode. 289 */ 290 struct inode *ceph_alloc_inode(struct super_block *sb) 291 { 292 struct ceph_inode_info *ci; 293 int i; 294 295 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 296 if (!ci) 297 return NULL; 298 299 dout("alloc_inode %p\n", &ci->vfs_inode); 300 301 spin_lock_init(&ci->i_ceph_lock); 302 303 ci->i_version = 0; 304 ci->i_time_warp_seq = 0; 305 ci->i_ceph_flags = 0; 306 atomic_set(&ci->i_release_count, 1); 307 atomic_set(&ci->i_complete_count, 0); 308 ci->i_symlink = NULL; 309 310 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 311 312 ci->i_fragtree = RB_ROOT; 313 mutex_init(&ci->i_fragtree_mutex); 314 315 ci->i_xattrs.blob = NULL; 316 ci->i_xattrs.prealloc_blob = NULL; 317 ci->i_xattrs.dirty = false; 318 ci->i_xattrs.index = RB_ROOT; 319 ci->i_xattrs.count = 0; 320 ci->i_xattrs.names_size = 0; 321 ci->i_xattrs.vals_size = 0; 322 ci->i_xattrs.version = 0; 323 ci->i_xattrs.index_version = 0; 324 325 ci->i_caps = RB_ROOT; 326 ci->i_auth_cap = NULL; 327 ci->i_dirty_caps = 0; 328 ci->i_flushing_caps = 0; 329 INIT_LIST_HEAD(&ci->i_dirty_item); 330 INIT_LIST_HEAD(&ci->i_flushing_item); 331 ci->i_cap_flush_seq = 0; 332 ci->i_cap_flush_last_tid = 0; 333 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 334 init_waitqueue_head(&ci->i_cap_wq); 335 ci->i_hold_caps_min = 0; 336 ci->i_hold_caps_max = 0; 337 INIT_LIST_HEAD(&ci->i_cap_delay_list); 338 ci->i_cap_exporting_mds = 0; 339 ci->i_cap_exporting_mseq = 0; 340 ci->i_cap_exporting_issued = 0; 341 INIT_LIST_HEAD(&ci->i_cap_snaps); 342 ci->i_head_snapc = NULL; 343 ci->i_snap_caps = 0; 344 345 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 346 ci->i_nr_by_mode[i] = 0; 347 348 mutex_init(&ci->i_truncate_mutex); 349 ci->i_truncate_seq = 0; 350 ci->i_truncate_size = 0; 351 ci->i_truncate_pending = 0; 352 353 ci->i_max_size = 0; 354 ci->i_reported_size = 0; 355 ci->i_wanted_max_size = 0; 356 ci->i_requested_max_size = 0; 357 358 ci->i_pin_ref = 0; 359 ci->i_rd_ref = 0; 360 ci->i_rdcache_ref = 0; 361 ci->i_wr_ref = 0; 362 ci->i_wb_ref = 0; 363 ci->i_wrbuffer_ref = 0; 364 ci->i_wrbuffer_ref_head = 0; 365 ci->i_shared_gen = 0; 366 ci->i_rdcache_gen = 0; 367 ci->i_rdcache_revoking = 0; 368 369 INIT_LIST_HEAD(&ci->i_unsafe_writes); 370 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 371 spin_lock_init(&ci->i_unsafe_lock); 372 373 ci->i_snap_realm = NULL; 374 INIT_LIST_HEAD(&ci->i_snap_realm_item); 375 INIT_LIST_HEAD(&ci->i_snap_flush_item); 376 377 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 378 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 379 380 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 381 382 ceph_fscache_inode_init(ci); 383 384 return &ci->vfs_inode; 385 } 386 387 static void ceph_i_callback(struct rcu_head *head) 388 { 389 struct inode *inode = container_of(head, struct inode, i_rcu); 390 struct ceph_inode_info *ci = ceph_inode(inode); 391 392 kmem_cache_free(ceph_inode_cachep, ci); 393 } 394 395 void ceph_destroy_inode(struct inode *inode) 396 { 397 struct ceph_inode_info *ci = ceph_inode(inode); 398 struct ceph_inode_frag *frag; 399 struct rb_node *n; 400 401 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 402 403 ceph_fscache_unregister_inode_cookie(ci); 404 405 ceph_queue_caps_release(inode); 406 407 /* 408 * we may still have a snap_realm reference if there are stray 409 * caps in i_cap_exporting_issued or i_snap_caps. 410 */ 411 if (ci->i_snap_realm) { 412 struct ceph_mds_client *mdsc = 413 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 414 struct ceph_snap_realm *realm = ci->i_snap_realm; 415 416 dout(" dropping residual ref to snap realm %p\n", realm); 417 spin_lock(&realm->inodes_with_caps_lock); 418 list_del_init(&ci->i_snap_realm_item); 419 spin_unlock(&realm->inodes_with_caps_lock); 420 ceph_put_snap_realm(mdsc, realm); 421 } 422 423 kfree(ci->i_symlink); 424 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 425 frag = rb_entry(n, struct ceph_inode_frag, node); 426 rb_erase(n, &ci->i_fragtree); 427 kfree(frag); 428 } 429 430 __ceph_destroy_xattrs(ci); 431 if (ci->i_xattrs.blob) 432 ceph_buffer_put(ci->i_xattrs.blob); 433 if (ci->i_xattrs.prealloc_blob) 434 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 435 436 call_rcu(&inode->i_rcu, ceph_i_callback); 437 } 438 439 int ceph_drop_inode(struct inode *inode) 440 { 441 /* 442 * Positve dentry and corresponding inode are always accompanied 443 * in MDS reply. So no need to keep inode in the cache after 444 * dropping all its aliases. 445 */ 446 return 1; 447 } 448 449 /* 450 * Helpers to fill in size, ctime, mtime, and atime. We have to be 451 * careful because either the client or MDS may have more up to date 452 * info, depending on which capabilities are held, and whether 453 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 454 * and size are monotonically increasing, except when utimes() or 455 * truncate() increments the corresponding _seq values.) 456 */ 457 int ceph_fill_file_size(struct inode *inode, int issued, 458 u32 truncate_seq, u64 truncate_size, u64 size) 459 { 460 struct ceph_inode_info *ci = ceph_inode(inode); 461 int queue_trunc = 0; 462 463 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 464 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 465 dout("size %lld -> %llu\n", inode->i_size, size); 466 inode->i_size = size; 467 inode->i_blocks = (size + (1<<9) - 1) >> 9; 468 ci->i_reported_size = size; 469 if (truncate_seq != ci->i_truncate_seq) { 470 dout("truncate_seq %u -> %u\n", 471 ci->i_truncate_seq, truncate_seq); 472 ci->i_truncate_seq = truncate_seq; 473 474 /* the MDS should have revoked these caps */ 475 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | 476 CEPH_CAP_FILE_RD | 477 CEPH_CAP_FILE_WR | 478 CEPH_CAP_FILE_LAZYIO)); 479 /* 480 * If we hold relevant caps, or in the case where we're 481 * not the only client referencing this file and we 482 * don't hold those caps, then we need to check whether 483 * the file is either opened or mmaped 484 */ 485 if ((issued & (CEPH_CAP_FILE_CACHE| 486 CEPH_CAP_FILE_BUFFER)) || 487 mapping_mapped(inode->i_mapping) || 488 __ceph_caps_file_wanted(ci)) { 489 ci->i_truncate_pending++; 490 queue_trunc = 1; 491 } 492 } 493 } 494 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 495 ci->i_truncate_size != truncate_size) { 496 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 497 truncate_size); 498 ci->i_truncate_size = truncate_size; 499 } 500 501 if (queue_trunc) 502 ceph_fscache_invalidate(inode); 503 504 return queue_trunc; 505 } 506 507 void ceph_fill_file_time(struct inode *inode, int issued, 508 u64 time_warp_seq, struct timespec *ctime, 509 struct timespec *mtime, struct timespec *atime) 510 { 511 struct ceph_inode_info *ci = ceph_inode(inode); 512 int warn = 0; 513 514 if (issued & (CEPH_CAP_FILE_EXCL| 515 CEPH_CAP_FILE_WR| 516 CEPH_CAP_FILE_BUFFER| 517 CEPH_CAP_AUTH_EXCL| 518 CEPH_CAP_XATTR_EXCL)) { 519 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 520 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 521 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 522 ctime->tv_sec, ctime->tv_nsec); 523 inode->i_ctime = *ctime; 524 } 525 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 526 /* the MDS did a utimes() */ 527 dout("mtime %ld.%09ld -> %ld.%09ld " 528 "tw %d -> %d\n", 529 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 530 mtime->tv_sec, mtime->tv_nsec, 531 ci->i_time_warp_seq, (int)time_warp_seq); 532 533 inode->i_mtime = *mtime; 534 inode->i_atime = *atime; 535 ci->i_time_warp_seq = time_warp_seq; 536 } else if (time_warp_seq == ci->i_time_warp_seq) { 537 /* nobody did utimes(); take the max */ 538 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 539 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 540 inode->i_mtime.tv_sec, 541 inode->i_mtime.tv_nsec, 542 mtime->tv_sec, mtime->tv_nsec); 543 inode->i_mtime = *mtime; 544 } 545 if (timespec_compare(atime, &inode->i_atime) > 0) { 546 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 547 inode->i_atime.tv_sec, 548 inode->i_atime.tv_nsec, 549 atime->tv_sec, atime->tv_nsec); 550 inode->i_atime = *atime; 551 } 552 } else if (issued & CEPH_CAP_FILE_EXCL) { 553 /* we did a utimes(); ignore mds values */ 554 } else { 555 warn = 1; 556 } 557 } else { 558 /* we have no write|excl caps; whatever the MDS says is true */ 559 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 560 inode->i_ctime = *ctime; 561 inode->i_mtime = *mtime; 562 inode->i_atime = *atime; 563 ci->i_time_warp_seq = time_warp_seq; 564 } else { 565 warn = 1; 566 } 567 } 568 if (warn) /* time_warp_seq shouldn't go backwards */ 569 dout("%p mds time_warp_seq %llu < %u\n", 570 inode, time_warp_seq, ci->i_time_warp_seq); 571 } 572 573 /* 574 * Populate an inode based on info from mds. May be called on new or 575 * existing inodes. 576 */ 577 static int fill_inode(struct inode *inode, 578 struct ceph_mds_reply_info_in *iinfo, 579 struct ceph_mds_reply_dirfrag *dirinfo, 580 struct ceph_mds_session *session, 581 unsigned long ttl_from, int cap_fmode, 582 struct ceph_cap_reservation *caps_reservation) 583 { 584 struct ceph_mds_reply_inode *info = iinfo->in; 585 struct ceph_inode_info *ci = ceph_inode(inode); 586 int i; 587 int issued = 0, implemented; 588 struct timespec mtime, atime, ctime; 589 u32 nsplits; 590 struct ceph_inode_frag *frag; 591 struct rb_node *rb_node; 592 struct ceph_buffer *xattr_blob = NULL; 593 int err = 0; 594 int queue_trunc = 0; 595 596 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 597 inode, ceph_vinop(inode), le64_to_cpu(info->version), 598 ci->i_version); 599 600 /* 601 * prealloc xattr data, if it looks like we'll need it. only 602 * if len > 4 (meaning there are actually xattrs; the first 4 603 * bytes are the xattr count). 604 */ 605 if (iinfo->xattr_len > 4) { 606 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 607 if (!xattr_blob) 608 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 609 iinfo->xattr_len); 610 } 611 612 spin_lock(&ci->i_ceph_lock); 613 614 /* 615 * provided version will be odd if inode value is projected, 616 * even if stable. skip the update if we have newer stable 617 * info (ours>=theirs, e.g. due to racing mds replies), unless 618 * we are getting projected (unstable) info (in which case the 619 * version is odd, and we want ours>theirs). 620 * us them 621 * 2 2 skip 622 * 3 2 skip 623 * 3 3 update 624 */ 625 if (le64_to_cpu(info->version) > 0 && 626 (ci->i_version & ~1) >= le64_to_cpu(info->version)) 627 goto no_change; 628 629 issued = __ceph_caps_issued(ci, &implemented); 630 issued |= implemented | __ceph_caps_dirty(ci); 631 632 /* update inode */ 633 ci->i_version = le64_to_cpu(info->version); 634 inode->i_version++; 635 inode->i_rdev = le32_to_cpu(info->rdev); 636 637 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 638 inode->i_mode = le32_to_cpu(info->mode); 639 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 640 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 641 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 642 from_kuid(&init_user_ns, inode->i_uid), 643 from_kgid(&init_user_ns, inode->i_gid)); 644 } 645 646 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 647 set_nlink(inode, le32_to_cpu(info->nlink)); 648 649 /* be careful with mtime, atime, size */ 650 ceph_decode_timespec(&atime, &info->atime); 651 ceph_decode_timespec(&mtime, &info->mtime); 652 ceph_decode_timespec(&ctime, &info->ctime); 653 queue_trunc = ceph_fill_file_size(inode, issued, 654 le32_to_cpu(info->truncate_seq), 655 le64_to_cpu(info->truncate_size), 656 le64_to_cpu(info->size)); 657 ceph_fill_file_time(inode, issued, 658 le32_to_cpu(info->time_warp_seq), 659 &ctime, &mtime, &atime); 660 661 /* only update max_size on auth cap */ 662 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 663 ci->i_max_size != le64_to_cpu(info->max_size)) { 664 dout("max_size %lld -> %llu\n", ci->i_max_size, 665 le64_to_cpu(info->max_size)); 666 ci->i_max_size = le64_to_cpu(info->max_size); 667 } 668 669 ci->i_layout = info->layout; 670 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 671 672 /* xattrs */ 673 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 674 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 675 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 676 if (ci->i_xattrs.blob) 677 ceph_buffer_put(ci->i_xattrs.blob); 678 ci->i_xattrs.blob = xattr_blob; 679 if (xattr_blob) 680 memcpy(ci->i_xattrs.blob->vec.iov_base, 681 iinfo->xattr_data, iinfo->xattr_len); 682 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 683 xattr_blob = NULL; 684 } 685 686 inode->i_mapping->a_ops = &ceph_aops; 687 inode->i_mapping->backing_dev_info = 688 &ceph_sb_to_client(inode->i_sb)->backing_dev_info; 689 690 switch (inode->i_mode & S_IFMT) { 691 case S_IFIFO: 692 case S_IFBLK: 693 case S_IFCHR: 694 case S_IFSOCK: 695 init_special_inode(inode, inode->i_mode, inode->i_rdev); 696 inode->i_op = &ceph_file_iops; 697 break; 698 case S_IFREG: 699 inode->i_op = &ceph_file_iops; 700 inode->i_fop = &ceph_file_fops; 701 break; 702 case S_IFLNK: 703 inode->i_op = &ceph_symlink_iops; 704 if (!ci->i_symlink) { 705 u32 symlen = iinfo->symlink_len; 706 char *sym; 707 708 spin_unlock(&ci->i_ceph_lock); 709 710 err = -EINVAL; 711 if (WARN_ON(symlen != inode->i_size)) 712 goto out; 713 714 err = -ENOMEM; 715 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 716 if (!sym) 717 goto out; 718 719 spin_lock(&ci->i_ceph_lock); 720 if (!ci->i_symlink) 721 ci->i_symlink = sym; 722 else 723 kfree(sym); /* lost a race */ 724 } 725 break; 726 case S_IFDIR: 727 inode->i_op = &ceph_dir_iops; 728 inode->i_fop = &ceph_dir_fops; 729 730 ci->i_dir_layout = iinfo->dir_layout; 731 732 ci->i_files = le64_to_cpu(info->files); 733 ci->i_subdirs = le64_to_cpu(info->subdirs); 734 ci->i_rbytes = le64_to_cpu(info->rbytes); 735 ci->i_rfiles = le64_to_cpu(info->rfiles); 736 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 737 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 738 break; 739 default: 740 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 741 ceph_vinop(inode), inode->i_mode); 742 } 743 744 /* set dir completion flag? */ 745 if (S_ISDIR(inode->i_mode) && 746 ci->i_files == 0 && ci->i_subdirs == 0 && 747 ceph_snap(inode) == CEPH_NOSNAP && 748 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 749 (issued & CEPH_CAP_FILE_EXCL) == 0 && 750 !__ceph_dir_is_complete(ci)) { 751 dout(" marking %p complete (empty)\n", inode); 752 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count)); 753 ci->i_max_offset = 2; 754 } 755 no_change: 756 spin_unlock(&ci->i_ceph_lock); 757 758 /* queue truncate if we saw i_size decrease */ 759 if (queue_trunc) 760 ceph_queue_vmtruncate(inode); 761 762 /* populate frag tree */ 763 /* FIXME: move me up, if/when version reflects fragtree changes */ 764 nsplits = le32_to_cpu(info->fragtree.nsplits); 765 mutex_lock(&ci->i_fragtree_mutex); 766 rb_node = rb_first(&ci->i_fragtree); 767 for (i = 0; i < nsplits; i++) { 768 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 769 frag = NULL; 770 while (rb_node) { 771 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 772 if (ceph_frag_compare(frag->frag, id) >= 0) { 773 if (frag->frag != id) 774 frag = NULL; 775 else 776 rb_node = rb_next(rb_node); 777 break; 778 } 779 rb_node = rb_next(rb_node); 780 rb_erase(&frag->node, &ci->i_fragtree); 781 kfree(frag); 782 frag = NULL; 783 } 784 if (!frag) { 785 frag = __get_or_create_frag(ci, id); 786 if (IS_ERR(frag)) 787 continue; 788 } 789 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 790 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 791 } 792 while (rb_node) { 793 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 794 rb_node = rb_next(rb_node); 795 rb_erase(&frag->node, &ci->i_fragtree); 796 kfree(frag); 797 } 798 mutex_unlock(&ci->i_fragtree_mutex); 799 800 /* were we issued a capability? */ 801 if (info->cap.caps) { 802 if (ceph_snap(inode) == CEPH_NOSNAP) { 803 ceph_add_cap(inode, session, 804 le64_to_cpu(info->cap.cap_id), 805 cap_fmode, 806 le32_to_cpu(info->cap.caps), 807 le32_to_cpu(info->cap.wanted), 808 le32_to_cpu(info->cap.seq), 809 le32_to_cpu(info->cap.mseq), 810 le64_to_cpu(info->cap.realm), 811 info->cap.flags, 812 caps_reservation); 813 } else { 814 spin_lock(&ci->i_ceph_lock); 815 dout(" %p got snap_caps %s\n", inode, 816 ceph_cap_string(le32_to_cpu(info->cap.caps))); 817 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 818 if (cap_fmode >= 0) 819 __ceph_get_fmode(ci, cap_fmode); 820 spin_unlock(&ci->i_ceph_lock); 821 } 822 } else if (cap_fmode >= 0) { 823 pr_warning("mds issued no caps on %llx.%llx\n", 824 ceph_vinop(inode)); 825 __ceph_get_fmode(ci, cap_fmode); 826 } 827 828 /* update delegation info? */ 829 if (dirinfo) 830 ceph_fill_dirfrag(inode, dirinfo); 831 832 err = 0; 833 834 out: 835 if (xattr_blob) 836 ceph_buffer_put(xattr_blob); 837 return err; 838 } 839 840 /* 841 * caller should hold session s_mutex. 842 */ 843 static void update_dentry_lease(struct dentry *dentry, 844 struct ceph_mds_reply_lease *lease, 845 struct ceph_mds_session *session, 846 unsigned long from_time) 847 { 848 struct ceph_dentry_info *di = ceph_dentry(dentry); 849 long unsigned duration = le32_to_cpu(lease->duration_ms); 850 long unsigned ttl = from_time + (duration * HZ) / 1000; 851 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 852 struct inode *dir; 853 854 /* only track leases on regular dentries */ 855 if (dentry->d_op != &ceph_dentry_ops) 856 return; 857 858 spin_lock(&dentry->d_lock); 859 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 860 dentry, duration, ttl); 861 862 /* make lease_rdcache_gen match directory */ 863 dir = dentry->d_parent->d_inode; 864 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 865 866 if (duration == 0) 867 goto out_unlock; 868 869 if (di->lease_gen == session->s_cap_gen && 870 time_before(ttl, dentry->d_time)) 871 goto out_unlock; /* we already have a newer lease. */ 872 873 if (di->lease_session && di->lease_session != session) 874 goto out_unlock; 875 876 ceph_dentry_lru_touch(dentry); 877 878 if (!di->lease_session) 879 di->lease_session = ceph_get_mds_session(session); 880 di->lease_gen = session->s_cap_gen; 881 di->lease_seq = le32_to_cpu(lease->seq); 882 di->lease_renew_after = half_ttl; 883 di->lease_renew_from = 0; 884 dentry->d_time = ttl; 885 out_unlock: 886 spin_unlock(&dentry->d_lock); 887 return; 888 } 889 890 /* 891 * Set dentry's directory position based on the current dir's max, and 892 * order it in d_subdirs, so that dcache_readdir behaves. 893 * 894 * Always called under directory's i_mutex. 895 */ 896 static void ceph_set_dentry_offset(struct dentry *dn) 897 { 898 struct dentry *dir = dn->d_parent; 899 struct inode *inode = dir->d_inode; 900 struct ceph_inode_info *ci; 901 struct ceph_dentry_info *di; 902 903 BUG_ON(!inode); 904 905 ci = ceph_inode(inode); 906 di = ceph_dentry(dn); 907 908 spin_lock(&ci->i_ceph_lock); 909 if (!__ceph_dir_is_complete(ci)) { 910 spin_unlock(&ci->i_ceph_lock); 911 return; 912 } 913 di->offset = ceph_inode(inode)->i_max_offset++; 914 spin_unlock(&ci->i_ceph_lock); 915 916 spin_lock(&dir->d_lock); 917 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 918 list_move(&dn->d_u.d_child, &dir->d_subdirs); 919 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 920 dn->d_u.d_child.prev, dn->d_u.d_child.next); 921 spin_unlock(&dn->d_lock); 922 spin_unlock(&dir->d_lock); 923 } 924 925 /* 926 * splice a dentry to an inode. 927 * caller must hold directory i_mutex for this to be safe. 928 * 929 * we will only rehash the resulting dentry if @prehash is 930 * true; @prehash will be set to false (for the benefit of 931 * the caller) if we fail. 932 */ 933 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 934 bool *prehash, bool set_offset) 935 { 936 struct dentry *realdn; 937 938 BUG_ON(dn->d_inode); 939 940 /* dn must be unhashed */ 941 if (!d_unhashed(dn)) 942 d_drop(dn); 943 realdn = d_materialise_unique(dn, in); 944 if (IS_ERR(realdn)) { 945 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 946 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 947 if (prehash) 948 *prehash = false; /* don't rehash on error */ 949 dn = realdn; /* note realdn contains the error */ 950 goto out; 951 } else if (realdn) { 952 dout("dn %p (%d) spliced with %p (%d) " 953 "inode %p ino %llx.%llx\n", 954 dn, d_count(dn), 955 realdn, d_count(realdn), 956 realdn->d_inode, ceph_vinop(realdn->d_inode)); 957 dput(dn); 958 dn = realdn; 959 } else { 960 BUG_ON(!ceph_dentry(dn)); 961 dout("dn %p attached to %p ino %llx.%llx\n", 962 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 963 } 964 if ((!prehash || *prehash) && d_unhashed(dn)) 965 d_rehash(dn); 966 if (set_offset) 967 ceph_set_dentry_offset(dn); 968 out: 969 return dn; 970 } 971 972 /* 973 * Incorporate results into the local cache. This is either just 974 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 975 * after a lookup). 976 * 977 * A reply may contain 978 * a directory inode along with a dentry. 979 * and/or a target inode 980 * 981 * Called with snap_rwsem (read). 982 */ 983 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 984 struct ceph_mds_session *session) 985 { 986 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 987 struct inode *in = NULL; 988 struct ceph_mds_reply_inode *ininfo; 989 struct ceph_vino vino; 990 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 991 int err = 0; 992 993 dout("fill_trace %p is_dentry %d is_target %d\n", req, 994 rinfo->head->is_dentry, rinfo->head->is_target); 995 996 #if 0 997 /* 998 * Debugging hook: 999 * 1000 * If we resend completed ops to a recovering mds, we get no 1001 * trace. Since that is very rare, pretend this is the case 1002 * to ensure the 'no trace' handlers in the callers behave. 1003 * 1004 * Fill in inodes unconditionally to avoid breaking cap 1005 * invariants. 1006 */ 1007 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 1008 pr_info("fill_trace faking empty trace on %lld %s\n", 1009 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 1010 if (rinfo->head->is_dentry) { 1011 rinfo->head->is_dentry = 0; 1012 err = fill_inode(req->r_locked_dir, 1013 &rinfo->diri, rinfo->dirfrag, 1014 session, req->r_request_started, -1); 1015 } 1016 if (rinfo->head->is_target) { 1017 rinfo->head->is_target = 0; 1018 ininfo = rinfo->targeti.in; 1019 vino.ino = le64_to_cpu(ininfo->ino); 1020 vino.snap = le64_to_cpu(ininfo->snapid); 1021 in = ceph_get_inode(sb, vino); 1022 err = fill_inode(in, &rinfo->targeti, NULL, 1023 session, req->r_request_started, 1024 req->r_fmode); 1025 iput(in); 1026 } 1027 } 1028 #endif 1029 1030 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 1031 dout("fill_trace reply is empty!\n"); 1032 if (rinfo->head->result == 0 && req->r_locked_dir) 1033 ceph_invalidate_dir_request(req); 1034 return 0; 1035 } 1036 1037 if (rinfo->head->is_dentry) { 1038 struct inode *dir = req->r_locked_dir; 1039 1040 if (dir) { 1041 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 1042 session, req->r_request_started, -1, 1043 &req->r_caps_reservation); 1044 if (err < 0) 1045 return err; 1046 } else { 1047 WARN_ON_ONCE(1); 1048 } 1049 } 1050 1051 if (rinfo->head->is_target) { 1052 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1053 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1054 1055 in = ceph_get_inode(sb, vino); 1056 if (IS_ERR(in)) { 1057 err = PTR_ERR(in); 1058 goto done; 1059 } 1060 req->r_target_inode = in; 1061 1062 err = fill_inode(in, &rinfo->targeti, NULL, 1063 session, req->r_request_started, 1064 (le32_to_cpu(rinfo->head->result) == 0) ? 1065 req->r_fmode : -1, 1066 &req->r_caps_reservation); 1067 if (err < 0) { 1068 pr_err("fill_inode badness %p %llx.%llx\n", 1069 in, ceph_vinop(in)); 1070 goto done; 1071 } 1072 } 1073 1074 /* 1075 * ignore null lease/binding on snapdir ENOENT, or else we 1076 * will have trouble splicing in the virtual snapdir later 1077 */ 1078 if (rinfo->head->is_dentry && !req->r_aborted && 1079 req->r_locked_dir && 1080 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1081 fsc->mount_options->snapdir_name, 1082 req->r_dentry->d_name.len))) { 1083 /* 1084 * lookup link rename : null -> possibly existing inode 1085 * mknod symlink mkdir : null -> new inode 1086 * unlink : linked -> null 1087 */ 1088 struct inode *dir = req->r_locked_dir; 1089 struct dentry *dn = req->r_dentry; 1090 bool have_dir_cap, have_lease; 1091 1092 BUG_ON(!dn); 1093 BUG_ON(!dir); 1094 BUG_ON(dn->d_parent->d_inode != dir); 1095 BUG_ON(ceph_ino(dir) != 1096 le64_to_cpu(rinfo->diri.in->ino)); 1097 BUG_ON(ceph_snap(dir) != 1098 le64_to_cpu(rinfo->diri.in->snapid)); 1099 1100 /* do we have a lease on the whole dir? */ 1101 have_dir_cap = 1102 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1103 CEPH_CAP_FILE_SHARED); 1104 1105 /* do we have a dn lease? */ 1106 have_lease = have_dir_cap || 1107 le32_to_cpu(rinfo->dlease->duration_ms); 1108 if (!have_lease) 1109 dout("fill_trace no dentry lease or dir cap\n"); 1110 1111 /* rename? */ 1112 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1113 dout(" src %p '%.*s' dst %p '%.*s'\n", 1114 req->r_old_dentry, 1115 req->r_old_dentry->d_name.len, 1116 req->r_old_dentry->d_name.name, 1117 dn, dn->d_name.len, dn->d_name.name); 1118 dout("fill_trace doing d_move %p -> %p\n", 1119 req->r_old_dentry, dn); 1120 1121 d_move(req->r_old_dentry, dn); 1122 dout(" src %p '%.*s' dst %p '%.*s'\n", 1123 req->r_old_dentry, 1124 req->r_old_dentry->d_name.len, 1125 req->r_old_dentry->d_name.name, 1126 dn, dn->d_name.len, dn->d_name.name); 1127 1128 /* ensure target dentry is invalidated, despite 1129 rehashing bug in vfs_rename_dir */ 1130 ceph_invalidate_dentry_lease(dn); 1131 1132 /* 1133 * d_move() puts the renamed dentry at the end of 1134 * d_subdirs. We need to assign it an appropriate 1135 * directory offset so we can behave when dir is 1136 * complete. 1137 */ 1138 ceph_set_dentry_offset(req->r_old_dentry); 1139 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1140 ceph_dentry(req->r_old_dentry)->offset); 1141 1142 dn = req->r_old_dentry; /* use old_dentry */ 1143 } 1144 1145 /* null dentry? */ 1146 if (!rinfo->head->is_target) { 1147 dout("fill_trace null dentry\n"); 1148 if (dn->d_inode) { 1149 dout("d_delete %p\n", dn); 1150 d_delete(dn); 1151 } else { 1152 dout("d_instantiate %p NULL\n", dn); 1153 d_instantiate(dn, NULL); 1154 if (have_lease && d_unhashed(dn)) 1155 d_rehash(dn); 1156 update_dentry_lease(dn, rinfo->dlease, 1157 session, 1158 req->r_request_started); 1159 } 1160 goto done; 1161 } 1162 1163 /* attach proper inode */ 1164 if (!dn->d_inode) { 1165 ihold(in); 1166 dn = splice_dentry(dn, in, &have_lease, true); 1167 if (IS_ERR(dn)) { 1168 err = PTR_ERR(dn); 1169 goto done; 1170 } 1171 req->r_dentry = dn; /* may have spliced */ 1172 } else if (dn->d_inode && dn->d_inode != in) { 1173 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1174 dn, dn->d_inode, ceph_vinop(dn->d_inode), 1175 ceph_vinop(in)); 1176 have_lease = false; 1177 } 1178 1179 if (have_lease) 1180 update_dentry_lease(dn, rinfo->dlease, session, 1181 req->r_request_started); 1182 dout(" final dn %p\n", dn); 1183 } else if (!req->r_aborted && 1184 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1185 req->r_op == CEPH_MDS_OP_MKSNAP)) { 1186 struct dentry *dn = req->r_dentry; 1187 1188 /* fill out a snapdir LOOKUPSNAP dentry */ 1189 BUG_ON(!dn); 1190 BUG_ON(!req->r_locked_dir); 1191 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1192 ininfo = rinfo->targeti.in; 1193 vino.ino = le64_to_cpu(ininfo->ino); 1194 vino.snap = le64_to_cpu(ininfo->snapid); 1195 dout(" linking snapped dir %p to dn %p\n", in, dn); 1196 ihold(in); 1197 dn = splice_dentry(dn, in, NULL, true); 1198 if (IS_ERR(dn)) { 1199 err = PTR_ERR(dn); 1200 goto done; 1201 } 1202 req->r_dentry = dn; /* may have spliced */ 1203 } 1204 done: 1205 dout("fill_trace done err=%d\n", err); 1206 return err; 1207 } 1208 1209 /* 1210 * Prepopulate our cache with readdir results, leases, etc. 1211 */ 1212 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, 1213 struct ceph_mds_session *session) 1214 { 1215 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1216 int i, err = 0; 1217 1218 for (i = 0; i < rinfo->dir_nr; i++) { 1219 struct ceph_vino vino; 1220 struct inode *in; 1221 int rc; 1222 1223 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1224 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1225 1226 in = ceph_get_inode(req->r_dentry->d_sb, vino); 1227 if (IS_ERR(in)) { 1228 err = PTR_ERR(in); 1229 dout("new_inode badness got %d\n", err); 1230 continue; 1231 } 1232 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session, 1233 req->r_request_started, -1, 1234 &req->r_caps_reservation); 1235 if (rc < 0) { 1236 pr_err("fill_inode badness on %p got %d\n", in, rc); 1237 err = rc; 1238 continue; 1239 } 1240 } 1241 1242 return err; 1243 } 1244 1245 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1246 struct ceph_mds_session *session) 1247 { 1248 struct dentry *parent = req->r_dentry; 1249 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1250 struct qstr dname; 1251 struct dentry *dn; 1252 struct inode *in; 1253 int err = 0, ret, i; 1254 struct inode *snapdir = NULL; 1255 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1256 struct ceph_dentry_info *di; 1257 u64 r_readdir_offset = req->r_readdir_offset; 1258 u32 frag = le32_to_cpu(rhead->args.readdir.frag); 1259 1260 if (rinfo->dir_dir && 1261 le32_to_cpu(rinfo->dir_dir->frag) != frag) { 1262 dout("readdir_prepopulate got new frag %x -> %x\n", 1263 frag, le32_to_cpu(rinfo->dir_dir->frag)); 1264 frag = le32_to_cpu(rinfo->dir_dir->frag); 1265 if (ceph_frag_is_leftmost(frag)) 1266 r_readdir_offset = 2; 1267 else 1268 r_readdir_offset = 0; 1269 } 1270 1271 if (req->r_aborted) 1272 return readdir_prepopulate_inodes_only(req, session); 1273 1274 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1275 snapdir = ceph_get_snapdir(parent->d_inode); 1276 parent = d_find_alias(snapdir); 1277 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1278 rinfo->dir_nr, parent); 1279 } else { 1280 dout("readdir_prepopulate %d items under dn %p\n", 1281 rinfo->dir_nr, parent); 1282 if (rinfo->dir_dir) 1283 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1284 } 1285 1286 /* FIXME: release caps/leases if error occurs */ 1287 for (i = 0; i < rinfo->dir_nr; i++) { 1288 struct ceph_vino vino; 1289 1290 dname.name = rinfo->dir_dname[i]; 1291 dname.len = rinfo->dir_dname_len[i]; 1292 dname.hash = full_name_hash(dname.name, dname.len); 1293 1294 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1295 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1296 1297 retry_lookup: 1298 dn = d_lookup(parent, &dname); 1299 dout("d_lookup on parent=%p name=%.*s got %p\n", 1300 parent, dname.len, dname.name, dn); 1301 1302 if (!dn) { 1303 dn = d_alloc(parent, &dname); 1304 dout("d_alloc %p '%.*s' = %p\n", parent, 1305 dname.len, dname.name, dn); 1306 if (dn == NULL) { 1307 dout("d_alloc badness\n"); 1308 err = -ENOMEM; 1309 goto out; 1310 } 1311 ret = ceph_init_dentry(dn); 1312 if (ret < 0) { 1313 dput(dn); 1314 err = ret; 1315 goto out; 1316 } 1317 } else if (dn->d_inode && 1318 (ceph_ino(dn->d_inode) != vino.ino || 1319 ceph_snap(dn->d_inode) != vino.snap)) { 1320 dout(" dn %p points to wrong inode %p\n", 1321 dn, dn->d_inode); 1322 d_delete(dn); 1323 dput(dn); 1324 goto retry_lookup; 1325 } else { 1326 /* reorder parent's d_subdirs */ 1327 spin_lock(&parent->d_lock); 1328 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 1329 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1330 spin_unlock(&dn->d_lock); 1331 spin_unlock(&parent->d_lock); 1332 } 1333 1334 /* inode */ 1335 if (dn->d_inode) { 1336 in = dn->d_inode; 1337 } else { 1338 in = ceph_get_inode(parent->d_sb, vino); 1339 if (IS_ERR(in)) { 1340 dout("new_inode badness\n"); 1341 d_drop(dn); 1342 dput(dn); 1343 err = PTR_ERR(in); 1344 goto out; 1345 } 1346 } 1347 1348 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1349 req->r_request_started, -1, 1350 &req->r_caps_reservation) < 0) { 1351 pr_err("fill_inode badness on %p\n", in); 1352 if (!dn->d_inode) 1353 iput(in); 1354 d_drop(dn); 1355 goto next_item; 1356 } 1357 1358 if (!dn->d_inode) { 1359 dn = splice_dentry(dn, in, NULL, false); 1360 if (IS_ERR(dn)) { 1361 err = PTR_ERR(dn); 1362 dn = NULL; 1363 goto next_item; 1364 } 1365 } 1366 1367 di = dn->d_fsdata; 1368 di->offset = ceph_make_fpos(frag, i + r_readdir_offset); 1369 1370 update_dentry_lease(dn, rinfo->dir_dlease[i], 1371 req->r_session, 1372 req->r_request_started); 1373 next_item: 1374 if (dn) 1375 dput(dn); 1376 } 1377 if (err == 0) 1378 req->r_did_prepopulate = true; 1379 1380 out: 1381 if (snapdir) { 1382 iput(snapdir); 1383 dput(parent); 1384 } 1385 dout("readdir_prepopulate done\n"); 1386 return err; 1387 } 1388 1389 int ceph_inode_set_size(struct inode *inode, loff_t size) 1390 { 1391 struct ceph_inode_info *ci = ceph_inode(inode); 1392 int ret = 0; 1393 1394 spin_lock(&ci->i_ceph_lock); 1395 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1396 inode->i_size = size; 1397 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1398 1399 /* tell the MDS if we are approaching max_size */ 1400 if ((size << 1) >= ci->i_max_size && 1401 (ci->i_reported_size << 1) < ci->i_max_size) 1402 ret = 1; 1403 1404 spin_unlock(&ci->i_ceph_lock); 1405 return ret; 1406 } 1407 1408 /* 1409 * Write back inode data in a worker thread. (This can't be done 1410 * in the message handler context.) 1411 */ 1412 void ceph_queue_writeback(struct inode *inode) 1413 { 1414 ihold(inode); 1415 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1416 &ceph_inode(inode)->i_wb_work)) { 1417 dout("ceph_queue_writeback %p\n", inode); 1418 } else { 1419 dout("ceph_queue_writeback %p failed\n", inode); 1420 iput(inode); 1421 } 1422 } 1423 1424 static void ceph_writeback_work(struct work_struct *work) 1425 { 1426 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1427 i_wb_work); 1428 struct inode *inode = &ci->vfs_inode; 1429 1430 dout("writeback %p\n", inode); 1431 filemap_fdatawrite(&inode->i_data); 1432 iput(inode); 1433 } 1434 1435 /* 1436 * queue an async invalidation 1437 */ 1438 void ceph_queue_invalidate(struct inode *inode) 1439 { 1440 ihold(inode); 1441 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1442 &ceph_inode(inode)->i_pg_inv_work)) { 1443 dout("ceph_queue_invalidate %p\n", inode); 1444 } else { 1445 dout("ceph_queue_invalidate %p failed\n", inode); 1446 iput(inode); 1447 } 1448 } 1449 1450 /* 1451 * Invalidate inode pages in a worker thread. (This can't be done 1452 * in the message handler context.) 1453 */ 1454 static void ceph_invalidate_work(struct work_struct *work) 1455 { 1456 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1457 i_pg_inv_work); 1458 struct inode *inode = &ci->vfs_inode; 1459 u32 orig_gen; 1460 int check = 0; 1461 1462 mutex_lock(&ci->i_truncate_mutex); 1463 spin_lock(&ci->i_ceph_lock); 1464 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1465 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1466 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1467 /* nevermind! */ 1468 spin_unlock(&ci->i_ceph_lock); 1469 mutex_unlock(&ci->i_truncate_mutex); 1470 goto out; 1471 } 1472 orig_gen = ci->i_rdcache_gen; 1473 spin_unlock(&ci->i_ceph_lock); 1474 1475 truncate_inode_pages(inode->i_mapping, 0); 1476 1477 spin_lock(&ci->i_ceph_lock); 1478 if (orig_gen == ci->i_rdcache_gen && 1479 orig_gen == ci->i_rdcache_revoking) { 1480 dout("invalidate_pages %p gen %d successful\n", inode, 1481 ci->i_rdcache_gen); 1482 ci->i_rdcache_revoking--; 1483 check = 1; 1484 } else { 1485 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1486 inode, orig_gen, ci->i_rdcache_gen, 1487 ci->i_rdcache_revoking); 1488 } 1489 spin_unlock(&ci->i_ceph_lock); 1490 mutex_unlock(&ci->i_truncate_mutex); 1491 1492 if (check) 1493 ceph_check_caps(ci, 0, NULL); 1494 out: 1495 iput(inode); 1496 } 1497 1498 1499 /* 1500 * called by trunc_wq; 1501 * 1502 * We also truncate in a separate thread as well. 1503 */ 1504 static void ceph_vmtruncate_work(struct work_struct *work) 1505 { 1506 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1507 i_vmtruncate_work); 1508 struct inode *inode = &ci->vfs_inode; 1509 1510 dout("vmtruncate_work %p\n", inode); 1511 __ceph_do_pending_vmtruncate(inode); 1512 iput(inode); 1513 } 1514 1515 /* 1516 * Queue an async vmtruncate. If we fail to queue work, we will handle 1517 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1518 */ 1519 void ceph_queue_vmtruncate(struct inode *inode) 1520 { 1521 struct ceph_inode_info *ci = ceph_inode(inode); 1522 1523 ihold(inode); 1524 1525 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1526 &ci->i_vmtruncate_work)) { 1527 dout("ceph_queue_vmtruncate %p\n", inode); 1528 } else { 1529 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1530 inode, ci->i_truncate_pending); 1531 iput(inode); 1532 } 1533 } 1534 1535 /* 1536 * Make sure any pending truncation is applied before doing anything 1537 * that may depend on it. 1538 */ 1539 void __ceph_do_pending_vmtruncate(struct inode *inode) 1540 { 1541 struct ceph_inode_info *ci = ceph_inode(inode); 1542 u64 to; 1543 int wrbuffer_refs, finish = 0; 1544 1545 mutex_lock(&ci->i_truncate_mutex); 1546 retry: 1547 spin_lock(&ci->i_ceph_lock); 1548 if (ci->i_truncate_pending == 0) { 1549 dout("__do_pending_vmtruncate %p none pending\n", inode); 1550 spin_unlock(&ci->i_ceph_lock); 1551 mutex_unlock(&ci->i_truncate_mutex); 1552 return; 1553 } 1554 1555 /* 1556 * make sure any dirty snapped pages are flushed before we 1557 * possibly truncate them.. so write AND block! 1558 */ 1559 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1560 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1561 inode); 1562 spin_unlock(&ci->i_ceph_lock); 1563 filemap_write_and_wait_range(&inode->i_data, 0, 1564 inode->i_sb->s_maxbytes); 1565 goto retry; 1566 } 1567 1568 /* there should be no reader or writer */ 1569 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); 1570 1571 to = ci->i_truncate_size; 1572 wrbuffer_refs = ci->i_wrbuffer_ref; 1573 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1574 ci->i_truncate_pending, to); 1575 spin_unlock(&ci->i_ceph_lock); 1576 1577 truncate_inode_pages(inode->i_mapping, to); 1578 1579 spin_lock(&ci->i_ceph_lock); 1580 if (to == ci->i_truncate_size) { 1581 ci->i_truncate_pending = 0; 1582 finish = 1; 1583 } 1584 spin_unlock(&ci->i_ceph_lock); 1585 if (!finish) 1586 goto retry; 1587 1588 mutex_unlock(&ci->i_truncate_mutex); 1589 1590 if (wrbuffer_refs == 0) 1591 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1592 1593 wake_up_all(&ci->i_cap_wq); 1594 } 1595 1596 /* 1597 * symlinks 1598 */ 1599 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1600 { 1601 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1602 nd_set_link(nd, ci->i_symlink); 1603 return NULL; 1604 } 1605 1606 static const struct inode_operations ceph_symlink_iops = { 1607 .readlink = generic_readlink, 1608 .follow_link = ceph_sym_follow_link, 1609 .setattr = ceph_setattr, 1610 .getattr = ceph_getattr, 1611 .setxattr = ceph_setxattr, 1612 .getxattr = ceph_getxattr, 1613 .listxattr = ceph_listxattr, 1614 .removexattr = ceph_removexattr, 1615 }; 1616 1617 /* 1618 * setattr 1619 */ 1620 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1621 { 1622 struct inode *inode = dentry->d_inode; 1623 struct ceph_inode_info *ci = ceph_inode(inode); 1624 struct inode *parent_inode; 1625 const unsigned int ia_valid = attr->ia_valid; 1626 struct ceph_mds_request *req; 1627 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; 1628 int issued; 1629 int release = 0, dirtied = 0; 1630 int mask = 0; 1631 int err = 0; 1632 int inode_dirty_flags = 0; 1633 1634 if (ceph_snap(inode) != CEPH_NOSNAP) 1635 return -EROFS; 1636 1637 err = inode_change_ok(inode, attr); 1638 if (err != 0) 1639 return err; 1640 1641 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1642 USE_AUTH_MDS); 1643 if (IS_ERR(req)) 1644 return PTR_ERR(req); 1645 1646 spin_lock(&ci->i_ceph_lock); 1647 issued = __ceph_caps_issued(ci, NULL); 1648 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1649 1650 if (ia_valid & ATTR_UID) { 1651 dout("setattr %p uid %d -> %d\n", inode, 1652 from_kuid(&init_user_ns, inode->i_uid), 1653 from_kuid(&init_user_ns, attr->ia_uid)); 1654 if (issued & CEPH_CAP_AUTH_EXCL) { 1655 inode->i_uid = attr->ia_uid; 1656 dirtied |= CEPH_CAP_AUTH_EXCL; 1657 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1658 !uid_eq(attr->ia_uid, inode->i_uid)) { 1659 req->r_args.setattr.uid = cpu_to_le32( 1660 from_kuid(&init_user_ns, attr->ia_uid)); 1661 mask |= CEPH_SETATTR_UID; 1662 release |= CEPH_CAP_AUTH_SHARED; 1663 } 1664 } 1665 if (ia_valid & ATTR_GID) { 1666 dout("setattr %p gid %d -> %d\n", inode, 1667 from_kgid(&init_user_ns, inode->i_gid), 1668 from_kgid(&init_user_ns, attr->ia_gid)); 1669 if (issued & CEPH_CAP_AUTH_EXCL) { 1670 inode->i_gid = attr->ia_gid; 1671 dirtied |= CEPH_CAP_AUTH_EXCL; 1672 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1673 !gid_eq(attr->ia_gid, inode->i_gid)) { 1674 req->r_args.setattr.gid = cpu_to_le32( 1675 from_kgid(&init_user_ns, attr->ia_gid)); 1676 mask |= CEPH_SETATTR_GID; 1677 release |= CEPH_CAP_AUTH_SHARED; 1678 } 1679 } 1680 if (ia_valid & ATTR_MODE) { 1681 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1682 attr->ia_mode); 1683 if (issued & CEPH_CAP_AUTH_EXCL) { 1684 inode->i_mode = attr->ia_mode; 1685 dirtied |= CEPH_CAP_AUTH_EXCL; 1686 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1687 attr->ia_mode != inode->i_mode) { 1688 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1689 mask |= CEPH_SETATTR_MODE; 1690 release |= CEPH_CAP_AUTH_SHARED; 1691 } 1692 } 1693 1694 if (ia_valid & ATTR_ATIME) { 1695 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1696 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1697 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1698 if (issued & CEPH_CAP_FILE_EXCL) { 1699 ci->i_time_warp_seq++; 1700 inode->i_atime = attr->ia_atime; 1701 dirtied |= CEPH_CAP_FILE_EXCL; 1702 } else if ((issued & CEPH_CAP_FILE_WR) && 1703 timespec_compare(&inode->i_atime, 1704 &attr->ia_atime) < 0) { 1705 inode->i_atime = attr->ia_atime; 1706 dirtied |= CEPH_CAP_FILE_WR; 1707 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1708 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1709 ceph_encode_timespec(&req->r_args.setattr.atime, 1710 &attr->ia_atime); 1711 mask |= CEPH_SETATTR_ATIME; 1712 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1713 CEPH_CAP_FILE_WR; 1714 } 1715 } 1716 if (ia_valid & ATTR_MTIME) { 1717 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1718 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1719 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1720 if (issued & CEPH_CAP_FILE_EXCL) { 1721 ci->i_time_warp_seq++; 1722 inode->i_mtime = attr->ia_mtime; 1723 dirtied |= CEPH_CAP_FILE_EXCL; 1724 } else if ((issued & CEPH_CAP_FILE_WR) && 1725 timespec_compare(&inode->i_mtime, 1726 &attr->ia_mtime) < 0) { 1727 inode->i_mtime = attr->ia_mtime; 1728 dirtied |= CEPH_CAP_FILE_WR; 1729 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1730 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1731 ceph_encode_timespec(&req->r_args.setattr.mtime, 1732 &attr->ia_mtime); 1733 mask |= CEPH_SETATTR_MTIME; 1734 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1735 CEPH_CAP_FILE_WR; 1736 } 1737 } 1738 if (ia_valid & ATTR_SIZE) { 1739 dout("setattr %p size %lld -> %lld\n", inode, 1740 inode->i_size, attr->ia_size); 1741 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1742 err = -EINVAL; 1743 goto out; 1744 } 1745 if ((issued & CEPH_CAP_FILE_EXCL) && 1746 attr->ia_size > inode->i_size) { 1747 inode->i_size = attr->ia_size; 1748 inode->i_blocks = 1749 (attr->ia_size + (1 << 9) - 1) >> 9; 1750 inode->i_ctime = attr->ia_ctime; 1751 ci->i_reported_size = attr->ia_size; 1752 dirtied |= CEPH_CAP_FILE_EXCL; 1753 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1754 attr->ia_size != inode->i_size) { 1755 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1756 req->r_args.setattr.old_size = 1757 cpu_to_le64(inode->i_size); 1758 mask |= CEPH_SETATTR_SIZE; 1759 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1760 CEPH_CAP_FILE_WR; 1761 } 1762 } 1763 1764 /* these do nothing */ 1765 if (ia_valid & ATTR_CTIME) { 1766 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1767 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1768 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1769 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1770 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1771 only ? "ctime only" : "ignored"); 1772 inode->i_ctime = attr->ia_ctime; 1773 if (only) { 1774 /* 1775 * if kernel wants to dirty ctime but nothing else, 1776 * we need to choose a cap to dirty under, or do 1777 * a almost-no-op setattr 1778 */ 1779 if (issued & CEPH_CAP_AUTH_EXCL) 1780 dirtied |= CEPH_CAP_AUTH_EXCL; 1781 else if (issued & CEPH_CAP_FILE_EXCL) 1782 dirtied |= CEPH_CAP_FILE_EXCL; 1783 else if (issued & CEPH_CAP_XATTR_EXCL) 1784 dirtied |= CEPH_CAP_XATTR_EXCL; 1785 else 1786 mask |= CEPH_SETATTR_CTIME; 1787 } 1788 } 1789 if (ia_valid & ATTR_FILE) 1790 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1791 1792 if (dirtied) { 1793 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); 1794 inode->i_ctime = CURRENT_TIME; 1795 } 1796 1797 release &= issued; 1798 spin_unlock(&ci->i_ceph_lock); 1799 1800 if (inode_dirty_flags) 1801 __mark_inode_dirty(inode, inode_dirty_flags); 1802 1803 if (mask) { 1804 req->r_inode = inode; 1805 ihold(inode); 1806 req->r_inode_drop = release; 1807 req->r_args.setattr.mask = cpu_to_le32(mask); 1808 req->r_num_caps = 1; 1809 parent_inode = ceph_get_dentry_parent_inode(dentry); 1810 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1811 iput(parent_inode); 1812 } 1813 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1814 ceph_cap_string(dirtied), mask); 1815 1816 ceph_mdsc_put_request(req); 1817 if (mask & CEPH_SETATTR_SIZE) 1818 __ceph_do_pending_vmtruncate(inode); 1819 return err; 1820 out: 1821 spin_unlock(&ci->i_ceph_lock); 1822 ceph_mdsc_put_request(req); 1823 return err; 1824 } 1825 1826 /* 1827 * Verify that we have a lease on the given mask. If not, 1828 * do a getattr against an mds. 1829 */ 1830 int ceph_do_getattr(struct inode *inode, int mask) 1831 { 1832 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 1833 struct ceph_mds_client *mdsc = fsc->mdsc; 1834 struct ceph_mds_request *req; 1835 int err; 1836 1837 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1838 dout("do_getattr inode %p SNAPDIR\n", inode); 1839 return 0; 1840 } 1841 1842 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode); 1843 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1844 return 0; 1845 1846 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1847 if (IS_ERR(req)) 1848 return PTR_ERR(req); 1849 req->r_inode = inode; 1850 ihold(inode); 1851 req->r_num_caps = 1; 1852 req->r_args.getattr.mask = cpu_to_le32(mask); 1853 err = ceph_mdsc_do_request(mdsc, NULL, req); 1854 ceph_mdsc_put_request(req); 1855 dout("do_getattr result=%d\n", err); 1856 return err; 1857 } 1858 1859 1860 /* 1861 * Check inode permissions. We verify we have a valid value for 1862 * the AUTH cap, then call the generic handler. 1863 */ 1864 int ceph_permission(struct inode *inode, int mask) 1865 { 1866 int err; 1867 1868 if (mask & MAY_NOT_BLOCK) 1869 return -ECHILD; 1870 1871 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1872 1873 if (!err) 1874 err = generic_permission(inode, mask); 1875 return err; 1876 } 1877 1878 /* 1879 * Get all attributes. Hopefully somedata we'll have a statlite() 1880 * and can limit the fields we require to be accurate. 1881 */ 1882 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1883 struct kstat *stat) 1884 { 1885 struct inode *inode = dentry->d_inode; 1886 struct ceph_inode_info *ci = ceph_inode(inode); 1887 int err; 1888 1889 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1890 if (!err) { 1891 generic_fillattr(inode, stat); 1892 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); 1893 if (ceph_snap(inode) != CEPH_NOSNAP) 1894 stat->dev = ceph_snap(inode); 1895 else 1896 stat->dev = 0; 1897 if (S_ISDIR(inode->i_mode)) { 1898 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 1899 RBYTES)) 1900 stat->size = ci->i_rbytes; 1901 else 1902 stat->size = ci->i_files + ci->i_subdirs; 1903 stat->blocks = 0; 1904 stat->blksize = 65536; 1905 } 1906 } 1907 return err; 1908 } 1909