1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/namei.h> 10 #include <linux/writeback.h> 11 #include <linux/vmalloc.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 #include <linux/ceph/decode.h> 16 17 /* 18 * Ceph inode operations 19 * 20 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 21 * setattr, etc.), xattr helpers, and helpers for assimilating 22 * metadata returned by the MDS into our cache. 23 * 24 * Also define helpers for doing asynchronous writeback, invalidation, 25 * and truncation for the benefit of those who can't afford to block 26 * (typically because they are in the message handler path). 27 */ 28 29 static const struct inode_operations ceph_symlink_iops; 30 31 static void ceph_invalidate_work(struct work_struct *work); 32 static void ceph_writeback_work(struct work_struct *work); 33 static void ceph_vmtruncate_work(struct work_struct *work); 34 35 /* 36 * find or create an inode, given the ceph ino number 37 */ 38 static int ceph_set_ino_cb(struct inode *inode, void *data) 39 { 40 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 41 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); 42 return 0; 43 } 44 45 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 46 { 47 struct inode *inode; 48 ino_t t = ceph_vino_to_ino(vino); 49 50 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 51 if (inode == NULL) 52 return ERR_PTR(-ENOMEM); 53 if (inode->i_state & I_NEW) { 54 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 55 inode, ceph_vinop(inode), (u64)inode->i_ino); 56 unlock_new_inode(inode); 57 } 58 59 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 60 vino.snap, inode); 61 return inode; 62 } 63 64 struct inode *ceph_lookup_inode(struct super_block *sb, struct ceph_vino vino) 65 { 66 struct inode *inode; 67 ino_t t = ceph_vino_to_ino(vino); 68 inode = ilookup5_nowait(sb, t, ceph_ino_compare, &vino); 69 return inode; 70 } 71 72 /* 73 * get/constuct snapdir inode for a given directory 74 */ 75 struct inode *ceph_get_snapdir(struct inode *parent) 76 { 77 struct ceph_vino vino = { 78 .ino = ceph_ino(parent), 79 .snap = CEPH_SNAPDIR, 80 }; 81 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 82 struct ceph_inode_info *ci = ceph_inode(inode); 83 84 BUG_ON(!S_ISDIR(parent->i_mode)); 85 if (IS_ERR(inode)) 86 return inode; 87 inode->i_mode = parent->i_mode; 88 inode->i_uid = parent->i_uid; 89 inode->i_gid = parent->i_gid; 90 inode->i_op = &ceph_dir_iops; 91 inode->i_fop = &ceph_dir_fops; 92 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 93 ci->i_rbytes = 0; 94 return inode; 95 } 96 97 const struct inode_operations ceph_file_iops = { 98 .permission = ceph_permission, 99 .setattr = ceph_setattr, 100 .getattr = ceph_getattr, 101 .setxattr = ceph_setxattr, 102 .getxattr = ceph_getxattr, 103 .listxattr = ceph_listxattr, 104 .removexattr = ceph_removexattr, 105 }; 106 107 108 /* 109 * We use a 'frag tree' to keep track of the MDS's directory fragments 110 * for a given inode (usually there is just a single fragment). We 111 * need to know when a child frag is delegated to a new MDS, or when 112 * it is flagged as replicated, so we can direct our requests 113 * accordingly. 114 */ 115 116 /* 117 * find/create a frag in the tree 118 */ 119 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 120 u32 f) 121 { 122 struct rb_node **p; 123 struct rb_node *parent = NULL; 124 struct ceph_inode_frag *frag; 125 int c; 126 127 p = &ci->i_fragtree.rb_node; 128 while (*p) { 129 parent = *p; 130 frag = rb_entry(parent, struct ceph_inode_frag, node); 131 c = ceph_frag_compare(f, frag->frag); 132 if (c < 0) 133 p = &(*p)->rb_left; 134 else if (c > 0) 135 p = &(*p)->rb_right; 136 else 137 return frag; 138 } 139 140 frag = kmalloc(sizeof(*frag), GFP_NOFS); 141 if (!frag) { 142 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 143 "frag %x\n", &ci->vfs_inode, 144 ceph_vinop(&ci->vfs_inode), f); 145 return ERR_PTR(-ENOMEM); 146 } 147 frag->frag = f; 148 frag->split_by = 0; 149 frag->mds = -1; 150 frag->ndist = 0; 151 152 rb_link_node(&frag->node, parent, p); 153 rb_insert_color(&frag->node, &ci->i_fragtree); 154 155 dout("get_or_create_frag added %llx.%llx frag %x\n", 156 ceph_vinop(&ci->vfs_inode), f); 157 return frag; 158 } 159 160 /* 161 * find a specific frag @f 162 */ 163 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 164 { 165 struct rb_node *n = ci->i_fragtree.rb_node; 166 167 while (n) { 168 struct ceph_inode_frag *frag = 169 rb_entry(n, struct ceph_inode_frag, node); 170 int c = ceph_frag_compare(f, frag->frag); 171 if (c < 0) 172 n = n->rb_left; 173 else if (c > 0) 174 n = n->rb_right; 175 else 176 return frag; 177 } 178 return NULL; 179 } 180 181 /* 182 * Choose frag containing the given value @v. If @pfrag is 183 * specified, copy the frag delegation info to the caller if 184 * it is present. 185 */ 186 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 187 struct ceph_inode_frag *pfrag, 188 int *found) 189 { 190 u32 t = ceph_frag_make(0, 0); 191 struct ceph_inode_frag *frag; 192 unsigned nway, i; 193 u32 n; 194 195 if (found) 196 *found = 0; 197 198 mutex_lock(&ci->i_fragtree_mutex); 199 while (1) { 200 WARN_ON(!ceph_frag_contains_value(t, v)); 201 frag = __ceph_find_frag(ci, t); 202 if (!frag) 203 break; /* t is a leaf */ 204 if (frag->split_by == 0) { 205 if (pfrag) 206 memcpy(pfrag, frag, sizeof(*pfrag)); 207 if (found) 208 *found = 1; 209 break; 210 } 211 212 /* choose child */ 213 nway = 1 << frag->split_by; 214 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 215 frag->split_by, nway); 216 for (i = 0; i < nway; i++) { 217 n = ceph_frag_make_child(t, frag->split_by, i); 218 if (ceph_frag_contains_value(n, v)) { 219 t = n; 220 break; 221 } 222 } 223 BUG_ON(i == nway); 224 } 225 dout("choose_frag(%x) = %x\n", v, t); 226 227 mutex_unlock(&ci->i_fragtree_mutex); 228 return t; 229 } 230 231 /* 232 * Process dirfrag (delegation) info from the mds. Include leaf 233 * fragment in tree ONLY if ndist > 0. Otherwise, only 234 * branches/splits are included in i_fragtree) 235 */ 236 static int ceph_fill_dirfrag(struct inode *inode, 237 struct ceph_mds_reply_dirfrag *dirinfo) 238 { 239 struct ceph_inode_info *ci = ceph_inode(inode); 240 struct ceph_inode_frag *frag; 241 u32 id = le32_to_cpu(dirinfo->frag); 242 int mds = le32_to_cpu(dirinfo->auth); 243 int ndist = le32_to_cpu(dirinfo->ndist); 244 int i; 245 int err = 0; 246 247 mutex_lock(&ci->i_fragtree_mutex); 248 if (ndist == 0) { 249 /* no delegation info needed. */ 250 frag = __ceph_find_frag(ci, id); 251 if (!frag) 252 goto out; 253 if (frag->split_by == 0) { 254 /* tree leaf, remove */ 255 dout("fill_dirfrag removed %llx.%llx frag %x" 256 " (no ref)\n", ceph_vinop(inode), id); 257 rb_erase(&frag->node, &ci->i_fragtree); 258 kfree(frag); 259 } else { 260 /* tree branch, keep and clear */ 261 dout("fill_dirfrag cleared %llx.%llx frag %x" 262 " referral\n", ceph_vinop(inode), id); 263 frag->mds = -1; 264 frag->ndist = 0; 265 } 266 goto out; 267 } 268 269 270 /* find/add this frag to store mds delegation info */ 271 frag = __get_or_create_frag(ci, id); 272 if (IS_ERR(frag)) { 273 /* this is not the end of the world; we can continue 274 with bad/inaccurate delegation info */ 275 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 276 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 277 err = -ENOMEM; 278 goto out; 279 } 280 281 frag->mds = mds; 282 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 283 for (i = 0; i < frag->ndist; i++) 284 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 285 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 286 ceph_vinop(inode), frag->frag, frag->ndist); 287 288 out: 289 mutex_unlock(&ci->i_fragtree_mutex); 290 return err; 291 } 292 293 294 /* 295 * initialize a newly allocated inode. 296 */ 297 struct inode *ceph_alloc_inode(struct super_block *sb) 298 { 299 struct ceph_inode_info *ci; 300 int i; 301 302 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 303 if (!ci) 304 return NULL; 305 306 dout("alloc_inode %p\n", &ci->vfs_inode); 307 308 spin_lock_init(&ci->i_ceph_lock); 309 310 ci->i_version = 0; 311 ci->i_time_warp_seq = 0; 312 ci->i_ceph_flags = 0; 313 atomic_set(&ci->i_release_count, 1); 314 atomic_set(&ci->i_complete_count, 0); 315 ci->i_symlink = NULL; 316 317 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 318 319 ci->i_fragtree = RB_ROOT; 320 mutex_init(&ci->i_fragtree_mutex); 321 322 ci->i_xattrs.blob = NULL; 323 ci->i_xattrs.prealloc_blob = NULL; 324 ci->i_xattrs.dirty = false; 325 ci->i_xattrs.index = RB_ROOT; 326 ci->i_xattrs.count = 0; 327 ci->i_xattrs.names_size = 0; 328 ci->i_xattrs.vals_size = 0; 329 ci->i_xattrs.version = 0; 330 ci->i_xattrs.index_version = 0; 331 332 ci->i_caps = RB_ROOT; 333 ci->i_auth_cap = NULL; 334 ci->i_dirty_caps = 0; 335 ci->i_flushing_caps = 0; 336 INIT_LIST_HEAD(&ci->i_dirty_item); 337 INIT_LIST_HEAD(&ci->i_flushing_item); 338 ci->i_cap_flush_seq = 0; 339 ci->i_cap_flush_last_tid = 0; 340 memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); 341 init_waitqueue_head(&ci->i_cap_wq); 342 ci->i_hold_caps_min = 0; 343 ci->i_hold_caps_max = 0; 344 INIT_LIST_HEAD(&ci->i_cap_delay_list); 345 ci->i_cap_exporting_mds = 0; 346 ci->i_cap_exporting_mseq = 0; 347 ci->i_cap_exporting_issued = 0; 348 INIT_LIST_HEAD(&ci->i_cap_snaps); 349 ci->i_head_snapc = NULL; 350 ci->i_snap_caps = 0; 351 352 for (i = 0; i < CEPH_FILE_MODE_NUM; i++) 353 ci->i_nr_by_mode[i] = 0; 354 355 mutex_init(&ci->i_truncate_mutex); 356 ci->i_truncate_seq = 0; 357 ci->i_truncate_size = 0; 358 ci->i_truncate_pending = 0; 359 360 ci->i_max_size = 0; 361 ci->i_reported_size = 0; 362 ci->i_wanted_max_size = 0; 363 ci->i_requested_max_size = 0; 364 365 ci->i_pin_ref = 0; 366 ci->i_rd_ref = 0; 367 ci->i_rdcache_ref = 0; 368 ci->i_wr_ref = 0; 369 ci->i_wb_ref = 0; 370 ci->i_wrbuffer_ref = 0; 371 ci->i_wrbuffer_ref_head = 0; 372 ci->i_shared_gen = 0; 373 ci->i_rdcache_gen = 0; 374 ci->i_rdcache_revoking = 0; 375 376 INIT_LIST_HEAD(&ci->i_unsafe_writes); 377 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 378 spin_lock_init(&ci->i_unsafe_lock); 379 380 ci->i_snap_realm = NULL; 381 INIT_LIST_HEAD(&ci->i_snap_realm_item); 382 INIT_LIST_HEAD(&ci->i_snap_flush_item); 383 384 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 385 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 386 387 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 388 389 return &ci->vfs_inode; 390 } 391 392 static void ceph_i_callback(struct rcu_head *head) 393 { 394 struct inode *inode = container_of(head, struct inode, i_rcu); 395 struct ceph_inode_info *ci = ceph_inode(inode); 396 397 kmem_cache_free(ceph_inode_cachep, ci); 398 } 399 400 void ceph_destroy_inode(struct inode *inode) 401 { 402 struct ceph_inode_info *ci = ceph_inode(inode); 403 struct ceph_inode_frag *frag; 404 struct rb_node *n; 405 406 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 407 408 ceph_queue_caps_release(inode); 409 410 /* 411 * we may still have a snap_realm reference if there are stray 412 * caps in i_cap_exporting_issued or i_snap_caps. 413 */ 414 if (ci->i_snap_realm) { 415 struct ceph_mds_client *mdsc = 416 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 417 struct ceph_snap_realm *realm = ci->i_snap_realm; 418 419 dout(" dropping residual ref to snap realm %p\n", realm); 420 spin_lock(&realm->inodes_with_caps_lock); 421 list_del_init(&ci->i_snap_realm_item); 422 spin_unlock(&realm->inodes_with_caps_lock); 423 ceph_put_snap_realm(mdsc, realm); 424 } 425 426 kfree(ci->i_symlink); 427 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 428 frag = rb_entry(n, struct ceph_inode_frag, node); 429 rb_erase(n, &ci->i_fragtree); 430 kfree(frag); 431 } 432 433 __ceph_destroy_xattrs(ci); 434 if (ci->i_xattrs.blob) 435 ceph_buffer_put(ci->i_xattrs.blob); 436 if (ci->i_xattrs.prealloc_blob) 437 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 438 439 call_rcu(&inode->i_rcu, ceph_i_callback); 440 } 441 442 443 /* 444 * Helpers to fill in size, ctime, mtime, and atime. We have to be 445 * careful because either the client or MDS may have more up to date 446 * info, depending on which capabilities are held, and whether 447 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 448 * and size are monotonically increasing, except when utimes() or 449 * truncate() increments the corresponding _seq values.) 450 */ 451 int ceph_fill_file_size(struct inode *inode, int issued, 452 u32 truncate_seq, u64 truncate_size, u64 size) 453 { 454 struct ceph_inode_info *ci = ceph_inode(inode); 455 int queue_trunc = 0; 456 457 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 458 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 459 dout("size %lld -> %llu\n", inode->i_size, size); 460 inode->i_size = size; 461 inode->i_blocks = (size + (1<<9) - 1) >> 9; 462 ci->i_reported_size = size; 463 if (truncate_seq != ci->i_truncate_seq) { 464 dout("truncate_seq %u -> %u\n", 465 ci->i_truncate_seq, truncate_seq); 466 ci->i_truncate_seq = truncate_seq; 467 468 /* the MDS should have revoked these caps */ 469 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | 470 CEPH_CAP_FILE_RD | 471 CEPH_CAP_FILE_WR | 472 CEPH_CAP_FILE_LAZYIO)); 473 /* 474 * If we hold relevant caps, or in the case where we're 475 * not the only client referencing this file and we 476 * don't hold those caps, then we need to check whether 477 * the file is either opened or mmaped 478 */ 479 if ((issued & (CEPH_CAP_FILE_CACHE| 480 CEPH_CAP_FILE_BUFFER)) || 481 mapping_mapped(inode->i_mapping) || 482 __ceph_caps_file_wanted(ci)) { 483 ci->i_truncate_pending++; 484 queue_trunc = 1; 485 } 486 } 487 } 488 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 489 ci->i_truncate_size != truncate_size) { 490 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 491 truncate_size); 492 ci->i_truncate_size = truncate_size; 493 } 494 return queue_trunc; 495 } 496 497 void ceph_fill_file_time(struct inode *inode, int issued, 498 u64 time_warp_seq, struct timespec *ctime, 499 struct timespec *mtime, struct timespec *atime) 500 { 501 struct ceph_inode_info *ci = ceph_inode(inode); 502 int warn = 0; 503 504 if (issued & (CEPH_CAP_FILE_EXCL| 505 CEPH_CAP_FILE_WR| 506 CEPH_CAP_FILE_BUFFER| 507 CEPH_CAP_AUTH_EXCL| 508 CEPH_CAP_XATTR_EXCL)) { 509 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 510 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 511 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 512 ctime->tv_sec, ctime->tv_nsec); 513 inode->i_ctime = *ctime; 514 } 515 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 516 /* the MDS did a utimes() */ 517 dout("mtime %ld.%09ld -> %ld.%09ld " 518 "tw %d -> %d\n", 519 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 520 mtime->tv_sec, mtime->tv_nsec, 521 ci->i_time_warp_seq, (int)time_warp_seq); 522 523 inode->i_mtime = *mtime; 524 inode->i_atime = *atime; 525 ci->i_time_warp_seq = time_warp_seq; 526 } else if (time_warp_seq == ci->i_time_warp_seq) { 527 /* nobody did utimes(); take the max */ 528 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 529 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 530 inode->i_mtime.tv_sec, 531 inode->i_mtime.tv_nsec, 532 mtime->tv_sec, mtime->tv_nsec); 533 inode->i_mtime = *mtime; 534 } 535 if (timespec_compare(atime, &inode->i_atime) > 0) { 536 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 537 inode->i_atime.tv_sec, 538 inode->i_atime.tv_nsec, 539 atime->tv_sec, atime->tv_nsec); 540 inode->i_atime = *atime; 541 } 542 } else if (issued & CEPH_CAP_FILE_EXCL) { 543 /* we did a utimes(); ignore mds values */ 544 } else { 545 warn = 1; 546 } 547 } else { 548 /* we have no write|excl caps; whatever the MDS says is true */ 549 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 550 inode->i_ctime = *ctime; 551 inode->i_mtime = *mtime; 552 inode->i_atime = *atime; 553 ci->i_time_warp_seq = time_warp_seq; 554 } else { 555 warn = 1; 556 } 557 } 558 if (warn) /* time_warp_seq shouldn't go backwards */ 559 dout("%p mds time_warp_seq %llu < %u\n", 560 inode, time_warp_seq, ci->i_time_warp_seq); 561 } 562 563 /* 564 * Populate an inode based on info from mds. May be called on new or 565 * existing inodes. 566 */ 567 static int fill_inode(struct inode *inode, 568 struct ceph_mds_reply_info_in *iinfo, 569 struct ceph_mds_reply_dirfrag *dirinfo, 570 struct ceph_mds_session *session, 571 unsigned long ttl_from, int cap_fmode, 572 struct ceph_cap_reservation *caps_reservation) 573 { 574 struct ceph_mds_reply_inode *info = iinfo->in; 575 struct ceph_inode_info *ci = ceph_inode(inode); 576 int i; 577 int issued = 0, implemented; 578 struct timespec mtime, atime, ctime; 579 u32 nsplits; 580 struct ceph_buffer *xattr_blob = NULL; 581 int err = 0; 582 int queue_trunc = 0; 583 584 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 585 inode, ceph_vinop(inode), le64_to_cpu(info->version), 586 ci->i_version); 587 588 /* 589 * prealloc xattr data, if it looks like we'll need it. only 590 * if len > 4 (meaning there are actually xattrs; the first 4 591 * bytes are the xattr count). 592 */ 593 if (iinfo->xattr_len > 4) { 594 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 595 if (!xattr_blob) 596 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 597 iinfo->xattr_len); 598 } 599 600 spin_lock(&ci->i_ceph_lock); 601 602 /* 603 * provided version will be odd if inode value is projected, 604 * even if stable. skip the update if we have newer stable 605 * info (ours>=theirs, e.g. due to racing mds replies), unless 606 * we are getting projected (unstable) info (in which case the 607 * version is odd, and we want ours>theirs). 608 * us them 609 * 2 2 skip 610 * 3 2 skip 611 * 3 3 update 612 */ 613 if (le64_to_cpu(info->version) > 0 && 614 (ci->i_version & ~1) >= le64_to_cpu(info->version)) 615 goto no_change; 616 617 issued = __ceph_caps_issued(ci, &implemented); 618 issued |= implemented | __ceph_caps_dirty(ci); 619 620 /* update inode */ 621 ci->i_version = le64_to_cpu(info->version); 622 inode->i_version++; 623 inode->i_rdev = le32_to_cpu(info->rdev); 624 625 if ((issued & CEPH_CAP_AUTH_EXCL) == 0) { 626 inode->i_mode = le32_to_cpu(info->mode); 627 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 628 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 629 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 630 from_kuid(&init_user_ns, inode->i_uid), 631 from_kgid(&init_user_ns, inode->i_gid)); 632 } 633 634 if ((issued & CEPH_CAP_LINK_EXCL) == 0) 635 set_nlink(inode, le32_to_cpu(info->nlink)); 636 637 /* be careful with mtime, atime, size */ 638 ceph_decode_timespec(&atime, &info->atime); 639 ceph_decode_timespec(&mtime, &info->mtime); 640 ceph_decode_timespec(&ctime, &info->ctime); 641 queue_trunc = ceph_fill_file_size(inode, issued, 642 le32_to_cpu(info->truncate_seq), 643 le64_to_cpu(info->truncate_size), 644 le64_to_cpu(info->size)); 645 ceph_fill_file_time(inode, issued, 646 le32_to_cpu(info->time_warp_seq), 647 &ctime, &mtime, &atime); 648 649 /* only update max_size on auth cap */ 650 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 651 ci->i_max_size != le64_to_cpu(info->max_size)) { 652 dout("max_size %lld -> %llu\n", ci->i_max_size, 653 le64_to_cpu(info->max_size)); 654 ci->i_max_size = le64_to_cpu(info->max_size); 655 } 656 657 ci->i_layout = info->layout; 658 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 659 660 /* xattrs */ 661 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 662 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && 663 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 664 if (ci->i_xattrs.blob) 665 ceph_buffer_put(ci->i_xattrs.blob); 666 ci->i_xattrs.blob = xattr_blob; 667 if (xattr_blob) 668 memcpy(ci->i_xattrs.blob->vec.iov_base, 669 iinfo->xattr_data, iinfo->xattr_len); 670 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 671 xattr_blob = NULL; 672 } 673 674 inode->i_mapping->a_ops = &ceph_aops; 675 inode->i_mapping->backing_dev_info = 676 &ceph_sb_to_client(inode->i_sb)->backing_dev_info; 677 678 switch (inode->i_mode & S_IFMT) { 679 case S_IFIFO: 680 case S_IFBLK: 681 case S_IFCHR: 682 case S_IFSOCK: 683 init_special_inode(inode, inode->i_mode, inode->i_rdev); 684 inode->i_op = &ceph_file_iops; 685 break; 686 case S_IFREG: 687 inode->i_op = &ceph_file_iops; 688 inode->i_fop = &ceph_file_fops; 689 break; 690 case S_IFLNK: 691 inode->i_op = &ceph_symlink_iops; 692 if (!ci->i_symlink) { 693 u32 symlen = iinfo->symlink_len; 694 char *sym; 695 696 spin_unlock(&ci->i_ceph_lock); 697 698 err = -EINVAL; 699 if (WARN_ON(symlen != inode->i_size)) 700 goto out; 701 702 err = -ENOMEM; 703 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 704 if (!sym) 705 goto out; 706 707 spin_lock(&ci->i_ceph_lock); 708 if (!ci->i_symlink) 709 ci->i_symlink = sym; 710 else 711 kfree(sym); /* lost a race */ 712 } 713 break; 714 case S_IFDIR: 715 inode->i_op = &ceph_dir_iops; 716 inode->i_fop = &ceph_dir_fops; 717 718 ci->i_dir_layout = iinfo->dir_layout; 719 720 ci->i_files = le64_to_cpu(info->files); 721 ci->i_subdirs = le64_to_cpu(info->subdirs); 722 ci->i_rbytes = le64_to_cpu(info->rbytes); 723 ci->i_rfiles = le64_to_cpu(info->rfiles); 724 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 725 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 726 break; 727 default: 728 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 729 ceph_vinop(inode), inode->i_mode); 730 } 731 732 /* set dir completion flag? */ 733 if (S_ISDIR(inode->i_mode) && 734 ci->i_files == 0 && ci->i_subdirs == 0 && 735 ceph_snap(inode) == CEPH_NOSNAP && 736 (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && 737 (issued & CEPH_CAP_FILE_EXCL) == 0 && 738 !__ceph_dir_is_complete(ci)) { 739 dout(" marking %p complete (empty)\n", inode); 740 __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count)); 741 ci->i_max_offset = 2; 742 } 743 no_change: 744 spin_unlock(&ci->i_ceph_lock); 745 746 /* queue truncate if we saw i_size decrease */ 747 if (queue_trunc) 748 ceph_queue_vmtruncate(inode); 749 750 /* populate frag tree */ 751 /* FIXME: move me up, if/when version reflects fragtree changes */ 752 nsplits = le32_to_cpu(info->fragtree.nsplits); 753 mutex_lock(&ci->i_fragtree_mutex); 754 for (i = 0; i < nsplits; i++) { 755 u32 id = le32_to_cpu(info->fragtree.splits[i].frag); 756 struct ceph_inode_frag *frag = __get_or_create_frag(ci, id); 757 758 if (IS_ERR(frag)) 759 continue; 760 frag->split_by = le32_to_cpu(info->fragtree.splits[i].by); 761 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 762 } 763 mutex_unlock(&ci->i_fragtree_mutex); 764 765 /* were we issued a capability? */ 766 if (info->cap.caps) { 767 if (ceph_snap(inode) == CEPH_NOSNAP) { 768 ceph_add_cap(inode, session, 769 le64_to_cpu(info->cap.cap_id), 770 cap_fmode, 771 le32_to_cpu(info->cap.caps), 772 le32_to_cpu(info->cap.wanted), 773 le32_to_cpu(info->cap.seq), 774 le32_to_cpu(info->cap.mseq), 775 le64_to_cpu(info->cap.realm), 776 info->cap.flags, 777 caps_reservation); 778 } else { 779 spin_lock(&ci->i_ceph_lock); 780 dout(" %p got snap_caps %s\n", inode, 781 ceph_cap_string(le32_to_cpu(info->cap.caps))); 782 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 783 if (cap_fmode >= 0) 784 __ceph_get_fmode(ci, cap_fmode); 785 spin_unlock(&ci->i_ceph_lock); 786 } 787 } else if (cap_fmode >= 0) { 788 pr_warning("mds issued no caps on %llx.%llx\n", 789 ceph_vinop(inode)); 790 __ceph_get_fmode(ci, cap_fmode); 791 } 792 793 /* update delegation info? */ 794 if (dirinfo) 795 ceph_fill_dirfrag(inode, dirinfo); 796 797 err = 0; 798 799 out: 800 if (xattr_blob) 801 ceph_buffer_put(xattr_blob); 802 return err; 803 } 804 805 /* 806 * caller should hold session s_mutex. 807 */ 808 static void update_dentry_lease(struct dentry *dentry, 809 struct ceph_mds_reply_lease *lease, 810 struct ceph_mds_session *session, 811 unsigned long from_time) 812 { 813 struct ceph_dentry_info *di = ceph_dentry(dentry); 814 long unsigned duration = le32_to_cpu(lease->duration_ms); 815 long unsigned ttl = from_time + (duration * HZ) / 1000; 816 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 817 struct inode *dir; 818 819 /* only track leases on regular dentries */ 820 if (dentry->d_op != &ceph_dentry_ops) 821 return; 822 823 spin_lock(&dentry->d_lock); 824 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 825 dentry, duration, ttl); 826 827 /* make lease_rdcache_gen match directory */ 828 dir = dentry->d_parent->d_inode; 829 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 830 831 if (duration == 0) 832 goto out_unlock; 833 834 if (di->lease_gen == session->s_cap_gen && 835 time_before(ttl, dentry->d_time)) 836 goto out_unlock; /* we already have a newer lease. */ 837 838 if (di->lease_session && di->lease_session != session) 839 goto out_unlock; 840 841 ceph_dentry_lru_touch(dentry); 842 843 if (!di->lease_session) 844 di->lease_session = ceph_get_mds_session(session); 845 di->lease_gen = session->s_cap_gen; 846 di->lease_seq = le32_to_cpu(lease->seq); 847 di->lease_renew_after = half_ttl; 848 di->lease_renew_from = 0; 849 dentry->d_time = ttl; 850 out_unlock: 851 spin_unlock(&dentry->d_lock); 852 return; 853 } 854 855 /* 856 * Set dentry's directory position based on the current dir's max, and 857 * order it in d_subdirs, so that dcache_readdir behaves. 858 * 859 * Always called under directory's i_mutex. 860 */ 861 static void ceph_set_dentry_offset(struct dentry *dn) 862 { 863 struct dentry *dir = dn->d_parent; 864 struct inode *inode = dir->d_inode; 865 struct ceph_inode_info *ci; 866 struct ceph_dentry_info *di; 867 868 BUG_ON(!inode); 869 870 ci = ceph_inode(inode); 871 di = ceph_dentry(dn); 872 873 spin_lock(&ci->i_ceph_lock); 874 if (!__ceph_dir_is_complete(ci)) { 875 spin_unlock(&ci->i_ceph_lock); 876 return; 877 } 878 di->offset = ceph_inode(inode)->i_max_offset++; 879 spin_unlock(&ci->i_ceph_lock); 880 881 spin_lock(&dir->d_lock); 882 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 883 list_move(&dn->d_u.d_child, &dir->d_subdirs); 884 dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, 885 dn->d_u.d_child.prev, dn->d_u.d_child.next); 886 spin_unlock(&dn->d_lock); 887 spin_unlock(&dir->d_lock); 888 } 889 890 /* 891 * splice a dentry to an inode. 892 * caller must hold directory i_mutex for this to be safe. 893 * 894 * we will only rehash the resulting dentry if @prehash is 895 * true; @prehash will be set to false (for the benefit of 896 * the caller) if we fail. 897 */ 898 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in, 899 bool *prehash, bool set_offset) 900 { 901 struct dentry *realdn; 902 903 BUG_ON(dn->d_inode); 904 905 /* dn must be unhashed */ 906 if (!d_unhashed(dn)) 907 d_drop(dn); 908 realdn = d_materialise_unique(dn, in); 909 if (IS_ERR(realdn)) { 910 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 911 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 912 if (prehash) 913 *prehash = false; /* don't rehash on error */ 914 dn = realdn; /* note realdn contains the error */ 915 goto out; 916 } else if (realdn) { 917 dout("dn %p (%d) spliced with %p (%d) " 918 "inode %p ino %llx.%llx\n", 919 dn, d_count(dn), 920 realdn, d_count(realdn), 921 realdn->d_inode, ceph_vinop(realdn->d_inode)); 922 dput(dn); 923 dn = realdn; 924 } else { 925 BUG_ON(!ceph_dentry(dn)); 926 dout("dn %p attached to %p ino %llx.%llx\n", 927 dn, dn->d_inode, ceph_vinop(dn->d_inode)); 928 } 929 if ((!prehash || *prehash) && d_unhashed(dn)) 930 d_rehash(dn); 931 if (set_offset) 932 ceph_set_dentry_offset(dn); 933 out: 934 return dn; 935 } 936 937 /* 938 * Incorporate results into the local cache. This is either just 939 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 940 * after a lookup). 941 * 942 * A reply may contain 943 * a directory inode along with a dentry. 944 * and/or a target inode 945 * 946 * Called with snap_rwsem (read). 947 */ 948 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 949 struct ceph_mds_session *session) 950 { 951 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 952 struct inode *in = NULL; 953 struct ceph_mds_reply_inode *ininfo; 954 struct ceph_vino vino; 955 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 956 int i = 0; 957 int err = 0; 958 959 dout("fill_trace %p is_dentry %d is_target %d\n", req, 960 rinfo->head->is_dentry, rinfo->head->is_target); 961 962 #if 0 963 /* 964 * Debugging hook: 965 * 966 * If we resend completed ops to a recovering mds, we get no 967 * trace. Since that is very rare, pretend this is the case 968 * to ensure the 'no trace' handlers in the callers behave. 969 * 970 * Fill in inodes unconditionally to avoid breaking cap 971 * invariants. 972 */ 973 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 974 pr_info("fill_trace faking empty trace on %lld %s\n", 975 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 976 if (rinfo->head->is_dentry) { 977 rinfo->head->is_dentry = 0; 978 err = fill_inode(req->r_locked_dir, 979 &rinfo->diri, rinfo->dirfrag, 980 session, req->r_request_started, -1); 981 } 982 if (rinfo->head->is_target) { 983 rinfo->head->is_target = 0; 984 ininfo = rinfo->targeti.in; 985 vino.ino = le64_to_cpu(ininfo->ino); 986 vino.snap = le64_to_cpu(ininfo->snapid); 987 in = ceph_get_inode(sb, vino); 988 err = fill_inode(in, &rinfo->targeti, NULL, 989 session, req->r_request_started, 990 req->r_fmode); 991 iput(in); 992 } 993 } 994 #endif 995 996 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 997 dout("fill_trace reply is empty!\n"); 998 if (rinfo->head->result == 0 && req->r_locked_dir) 999 ceph_invalidate_dir_request(req); 1000 return 0; 1001 } 1002 1003 if (rinfo->head->is_dentry) { 1004 struct inode *dir = req->r_locked_dir; 1005 1006 if (dir) { 1007 err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, 1008 session, req->r_request_started, -1, 1009 &req->r_caps_reservation); 1010 if (err < 0) 1011 return err; 1012 } else { 1013 WARN_ON_ONCE(1); 1014 } 1015 } 1016 1017 /* 1018 * ignore null lease/binding on snapdir ENOENT, or else we 1019 * will have trouble splicing in the virtual snapdir later 1020 */ 1021 if (rinfo->head->is_dentry && !req->r_aborted && 1022 req->r_locked_dir && 1023 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1024 fsc->mount_options->snapdir_name, 1025 req->r_dentry->d_name.len))) { 1026 /* 1027 * lookup link rename : null -> possibly existing inode 1028 * mknod symlink mkdir : null -> new inode 1029 * unlink : linked -> null 1030 */ 1031 struct inode *dir = req->r_locked_dir; 1032 struct dentry *dn = req->r_dentry; 1033 bool have_dir_cap, have_lease; 1034 1035 BUG_ON(!dn); 1036 BUG_ON(!dir); 1037 BUG_ON(dn->d_parent->d_inode != dir); 1038 BUG_ON(ceph_ino(dir) != 1039 le64_to_cpu(rinfo->diri.in->ino)); 1040 BUG_ON(ceph_snap(dir) != 1041 le64_to_cpu(rinfo->diri.in->snapid)); 1042 1043 /* do we have a lease on the whole dir? */ 1044 have_dir_cap = 1045 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1046 CEPH_CAP_FILE_SHARED); 1047 1048 /* do we have a dn lease? */ 1049 have_lease = have_dir_cap || 1050 le32_to_cpu(rinfo->dlease->duration_ms); 1051 if (!have_lease) 1052 dout("fill_trace no dentry lease or dir cap\n"); 1053 1054 /* rename? */ 1055 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1056 dout(" src %p '%.*s' dst %p '%.*s'\n", 1057 req->r_old_dentry, 1058 req->r_old_dentry->d_name.len, 1059 req->r_old_dentry->d_name.name, 1060 dn, dn->d_name.len, dn->d_name.name); 1061 dout("fill_trace doing d_move %p -> %p\n", 1062 req->r_old_dentry, dn); 1063 1064 d_move(req->r_old_dentry, dn); 1065 dout(" src %p '%.*s' dst %p '%.*s'\n", 1066 req->r_old_dentry, 1067 req->r_old_dentry->d_name.len, 1068 req->r_old_dentry->d_name.name, 1069 dn, dn->d_name.len, dn->d_name.name); 1070 1071 /* ensure target dentry is invalidated, despite 1072 rehashing bug in vfs_rename_dir */ 1073 ceph_invalidate_dentry_lease(dn); 1074 1075 /* 1076 * d_move() puts the renamed dentry at the end of 1077 * d_subdirs. We need to assign it an appropriate 1078 * directory offset so we can behave when dir is 1079 * complete. 1080 */ 1081 ceph_set_dentry_offset(req->r_old_dentry); 1082 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1083 ceph_dentry(req->r_old_dentry)->offset); 1084 1085 dn = req->r_old_dentry; /* use old_dentry */ 1086 in = dn->d_inode; 1087 } 1088 1089 /* null dentry? */ 1090 if (!rinfo->head->is_target) { 1091 dout("fill_trace null dentry\n"); 1092 if (dn->d_inode) { 1093 dout("d_delete %p\n", dn); 1094 d_delete(dn); 1095 } else { 1096 dout("d_instantiate %p NULL\n", dn); 1097 d_instantiate(dn, NULL); 1098 if (have_lease && d_unhashed(dn)) 1099 d_rehash(dn); 1100 update_dentry_lease(dn, rinfo->dlease, 1101 session, 1102 req->r_request_started); 1103 } 1104 goto done; 1105 } 1106 1107 /* attach proper inode */ 1108 ininfo = rinfo->targeti.in; 1109 vino.ino = le64_to_cpu(ininfo->ino); 1110 vino.snap = le64_to_cpu(ininfo->snapid); 1111 in = dn->d_inode; 1112 if (!in) { 1113 in = ceph_get_inode(sb, vino); 1114 if (IS_ERR(in)) { 1115 pr_err("fill_trace bad get_inode " 1116 "%llx.%llx\n", vino.ino, vino.snap); 1117 err = PTR_ERR(in); 1118 d_drop(dn); 1119 goto done; 1120 } 1121 dn = splice_dentry(dn, in, &have_lease, true); 1122 if (IS_ERR(dn)) { 1123 err = PTR_ERR(dn); 1124 goto done; 1125 } 1126 req->r_dentry = dn; /* may have spliced */ 1127 ihold(in); 1128 } else if (ceph_ino(in) == vino.ino && 1129 ceph_snap(in) == vino.snap) { 1130 ihold(in); 1131 } else { 1132 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1133 dn, in, ceph_ino(in), ceph_snap(in), 1134 vino.ino, vino.snap); 1135 have_lease = false; 1136 in = NULL; 1137 } 1138 1139 if (have_lease) 1140 update_dentry_lease(dn, rinfo->dlease, session, 1141 req->r_request_started); 1142 dout(" final dn %p\n", dn); 1143 i++; 1144 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1145 req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) { 1146 struct dentry *dn = req->r_dentry; 1147 1148 /* fill out a snapdir LOOKUPSNAP dentry */ 1149 BUG_ON(!dn); 1150 BUG_ON(!req->r_locked_dir); 1151 BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR); 1152 ininfo = rinfo->targeti.in; 1153 vino.ino = le64_to_cpu(ininfo->ino); 1154 vino.snap = le64_to_cpu(ininfo->snapid); 1155 in = ceph_get_inode(sb, vino); 1156 if (IS_ERR(in)) { 1157 pr_err("fill_inode get_inode badness %llx.%llx\n", 1158 vino.ino, vino.snap); 1159 err = PTR_ERR(in); 1160 d_delete(dn); 1161 goto done; 1162 } 1163 dout(" linking snapped dir %p to dn %p\n", in, dn); 1164 dn = splice_dentry(dn, in, NULL, true); 1165 if (IS_ERR(dn)) { 1166 err = PTR_ERR(dn); 1167 goto done; 1168 } 1169 req->r_dentry = dn; /* may have spliced */ 1170 ihold(in); 1171 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1172 } 1173 1174 if (rinfo->head->is_target) { 1175 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1176 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1177 1178 if (in == NULL || ceph_ino(in) != vino.ino || 1179 ceph_snap(in) != vino.snap) { 1180 in = ceph_get_inode(sb, vino); 1181 if (IS_ERR(in)) { 1182 err = PTR_ERR(in); 1183 goto done; 1184 } 1185 } 1186 req->r_target_inode = in; 1187 1188 err = fill_inode(in, 1189 &rinfo->targeti, NULL, 1190 session, req->r_request_started, 1191 (le32_to_cpu(rinfo->head->result) == 0) ? 1192 req->r_fmode : -1, 1193 &req->r_caps_reservation); 1194 if (err < 0) { 1195 pr_err("fill_inode badness %p %llx.%llx\n", 1196 in, ceph_vinop(in)); 1197 goto done; 1198 } 1199 } 1200 1201 done: 1202 dout("fill_trace done err=%d\n", err); 1203 return err; 1204 } 1205 1206 /* 1207 * Prepopulate our cache with readdir results, leases, etc. 1208 */ 1209 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, 1210 struct ceph_mds_session *session) 1211 { 1212 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1213 int i, err = 0; 1214 1215 for (i = 0; i < rinfo->dir_nr; i++) { 1216 struct ceph_vino vino; 1217 struct inode *in; 1218 int rc; 1219 1220 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1221 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1222 1223 in = ceph_get_inode(req->r_dentry->d_sb, vino); 1224 if (IS_ERR(in)) { 1225 err = PTR_ERR(in); 1226 dout("new_inode badness got %d\n", err); 1227 continue; 1228 } 1229 rc = fill_inode(in, &rinfo->dir_in[i], NULL, session, 1230 req->r_request_started, -1, 1231 &req->r_caps_reservation); 1232 if (rc < 0) { 1233 pr_err("fill_inode badness on %p got %d\n", in, rc); 1234 err = rc; 1235 continue; 1236 } 1237 } 1238 1239 return err; 1240 } 1241 1242 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1243 struct ceph_mds_session *session) 1244 { 1245 struct dentry *parent = req->r_dentry; 1246 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1247 struct qstr dname; 1248 struct dentry *dn; 1249 struct inode *in; 1250 int err = 0, i; 1251 struct inode *snapdir = NULL; 1252 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1253 u64 frag = le32_to_cpu(rhead->args.readdir.frag); 1254 struct ceph_dentry_info *di; 1255 1256 if (req->r_aborted) 1257 return readdir_prepopulate_inodes_only(req, session); 1258 1259 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1260 snapdir = ceph_get_snapdir(parent->d_inode); 1261 parent = d_find_alias(snapdir); 1262 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1263 rinfo->dir_nr, parent); 1264 } else { 1265 dout("readdir_prepopulate %d items under dn %p\n", 1266 rinfo->dir_nr, parent); 1267 if (rinfo->dir_dir) 1268 ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir); 1269 } 1270 1271 for (i = 0; i < rinfo->dir_nr; i++) { 1272 struct ceph_vino vino; 1273 1274 dname.name = rinfo->dir_dname[i]; 1275 dname.len = rinfo->dir_dname_len[i]; 1276 dname.hash = full_name_hash(dname.name, dname.len); 1277 1278 vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino); 1279 vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid); 1280 1281 retry_lookup: 1282 dn = d_lookup(parent, &dname); 1283 dout("d_lookup on parent=%p name=%.*s got %p\n", 1284 parent, dname.len, dname.name, dn); 1285 1286 if (!dn) { 1287 dn = d_alloc(parent, &dname); 1288 dout("d_alloc %p '%.*s' = %p\n", parent, 1289 dname.len, dname.name, dn); 1290 if (dn == NULL) { 1291 dout("d_alloc badness\n"); 1292 err = -ENOMEM; 1293 goto out; 1294 } 1295 err = ceph_init_dentry(dn); 1296 if (err < 0) { 1297 dput(dn); 1298 goto out; 1299 } 1300 } else if (dn->d_inode && 1301 (ceph_ino(dn->d_inode) != vino.ino || 1302 ceph_snap(dn->d_inode) != vino.snap)) { 1303 dout(" dn %p points to wrong inode %p\n", 1304 dn, dn->d_inode); 1305 d_delete(dn); 1306 dput(dn); 1307 goto retry_lookup; 1308 } else { 1309 /* reorder parent's d_subdirs */ 1310 spin_lock(&parent->d_lock); 1311 spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); 1312 list_move(&dn->d_u.d_child, &parent->d_subdirs); 1313 spin_unlock(&dn->d_lock); 1314 spin_unlock(&parent->d_lock); 1315 } 1316 1317 di = dn->d_fsdata; 1318 di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); 1319 1320 /* inode */ 1321 if (dn->d_inode) { 1322 in = dn->d_inode; 1323 } else { 1324 in = ceph_get_inode(parent->d_sb, vino); 1325 if (IS_ERR(in)) { 1326 dout("new_inode badness\n"); 1327 d_drop(dn); 1328 dput(dn); 1329 err = PTR_ERR(in); 1330 goto out; 1331 } 1332 dn = splice_dentry(dn, in, NULL, false); 1333 if (IS_ERR(dn)) 1334 dn = NULL; 1335 } 1336 1337 if (fill_inode(in, &rinfo->dir_in[i], NULL, session, 1338 req->r_request_started, -1, 1339 &req->r_caps_reservation) < 0) { 1340 pr_err("fill_inode badness on %p\n", in); 1341 goto next_item; 1342 } 1343 if (dn) 1344 update_dentry_lease(dn, rinfo->dir_dlease[i], 1345 req->r_session, 1346 req->r_request_started); 1347 next_item: 1348 if (dn) 1349 dput(dn); 1350 } 1351 req->r_did_prepopulate = true; 1352 1353 out: 1354 if (snapdir) { 1355 iput(snapdir); 1356 dput(parent); 1357 } 1358 dout("readdir_prepopulate done\n"); 1359 return err; 1360 } 1361 1362 int ceph_inode_set_size(struct inode *inode, loff_t size) 1363 { 1364 struct ceph_inode_info *ci = ceph_inode(inode); 1365 int ret = 0; 1366 1367 spin_lock(&ci->i_ceph_lock); 1368 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1369 inode->i_size = size; 1370 inode->i_blocks = (size + (1 << 9) - 1) >> 9; 1371 1372 /* tell the MDS if we are approaching max_size */ 1373 if ((size << 1) >= ci->i_max_size && 1374 (ci->i_reported_size << 1) < ci->i_max_size) 1375 ret = 1; 1376 1377 spin_unlock(&ci->i_ceph_lock); 1378 return ret; 1379 } 1380 1381 /* 1382 * Write back inode data in a worker thread. (This can't be done 1383 * in the message handler context.) 1384 */ 1385 void ceph_queue_writeback(struct inode *inode) 1386 { 1387 ihold(inode); 1388 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1389 &ceph_inode(inode)->i_wb_work)) { 1390 dout("ceph_queue_writeback %p\n", inode); 1391 } else { 1392 dout("ceph_queue_writeback %p failed\n", inode); 1393 iput(inode); 1394 } 1395 } 1396 1397 static void ceph_writeback_work(struct work_struct *work) 1398 { 1399 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1400 i_wb_work); 1401 struct inode *inode = &ci->vfs_inode; 1402 1403 dout("writeback %p\n", inode); 1404 filemap_fdatawrite(&inode->i_data); 1405 iput(inode); 1406 } 1407 1408 /* 1409 * queue an async invalidation 1410 */ 1411 void ceph_queue_invalidate(struct inode *inode) 1412 { 1413 ihold(inode); 1414 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1415 &ceph_inode(inode)->i_pg_inv_work)) { 1416 dout("ceph_queue_invalidate %p\n", inode); 1417 } else { 1418 dout("ceph_queue_invalidate %p failed\n", inode); 1419 iput(inode); 1420 } 1421 } 1422 1423 /* 1424 * Invalidate inode pages in a worker thread. (This can't be done 1425 * in the message handler context.) 1426 */ 1427 static void ceph_invalidate_work(struct work_struct *work) 1428 { 1429 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1430 i_pg_inv_work); 1431 struct inode *inode = &ci->vfs_inode; 1432 u32 orig_gen; 1433 int check = 0; 1434 1435 mutex_lock(&ci->i_truncate_mutex); 1436 spin_lock(&ci->i_ceph_lock); 1437 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1438 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1439 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1440 /* nevermind! */ 1441 spin_unlock(&ci->i_ceph_lock); 1442 mutex_unlock(&ci->i_truncate_mutex); 1443 goto out; 1444 } 1445 orig_gen = ci->i_rdcache_gen; 1446 spin_unlock(&ci->i_ceph_lock); 1447 1448 truncate_inode_pages(inode->i_mapping, 0); 1449 1450 spin_lock(&ci->i_ceph_lock); 1451 if (orig_gen == ci->i_rdcache_gen && 1452 orig_gen == ci->i_rdcache_revoking) { 1453 dout("invalidate_pages %p gen %d successful\n", inode, 1454 ci->i_rdcache_gen); 1455 ci->i_rdcache_revoking--; 1456 check = 1; 1457 } else { 1458 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1459 inode, orig_gen, ci->i_rdcache_gen, 1460 ci->i_rdcache_revoking); 1461 } 1462 spin_unlock(&ci->i_ceph_lock); 1463 mutex_unlock(&ci->i_truncate_mutex); 1464 1465 if (check) 1466 ceph_check_caps(ci, 0, NULL); 1467 out: 1468 iput(inode); 1469 } 1470 1471 1472 /* 1473 * called by trunc_wq; 1474 * 1475 * We also truncate in a separate thread as well. 1476 */ 1477 static void ceph_vmtruncate_work(struct work_struct *work) 1478 { 1479 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1480 i_vmtruncate_work); 1481 struct inode *inode = &ci->vfs_inode; 1482 1483 dout("vmtruncate_work %p\n", inode); 1484 __ceph_do_pending_vmtruncate(inode); 1485 iput(inode); 1486 } 1487 1488 /* 1489 * Queue an async vmtruncate. If we fail to queue work, we will handle 1490 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1491 */ 1492 void ceph_queue_vmtruncate(struct inode *inode) 1493 { 1494 struct ceph_inode_info *ci = ceph_inode(inode); 1495 1496 ihold(inode); 1497 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1498 &ci->i_vmtruncate_work)) { 1499 dout("ceph_queue_vmtruncate %p\n", inode); 1500 } else { 1501 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1502 inode, ci->i_truncate_pending); 1503 iput(inode); 1504 } 1505 } 1506 1507 /* 1508 * Make sure any pending truncation is applied before doing anything 1509 * that may depend on it. 1510 */ 1511 void __ceph_do_pending_vmtruncate(struct inode *inode) 1512 { 1513 struct ceph_inode_info *ci = ceph_inode(inode); 1514 u64 to; 1515 int wrbuffer_refs, finish = 0; 1516 1517 mutex_lock(&ci->i_truncate_mutex); 1518 retry: 1519 spin_lock(&ci->i_ceph_lock); 1520 if (ci->i_truncate_pending == 0) { 1521 dout("__do_pending_vmtruncate %p none pending\n", inode); 1522 spin_unlock(&ci->i_ceph_lock); 1523 mutex_unlock(&ci->i_truncate_mutex); 1524 return; 1525 } 1526 1527 /* 1528 * make sure any dirty snapped pages are flushed before we 1529 * possibly truncate them.. so write AND block! 1530 */ 1531 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1532 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1533 inode); 1534 spin_unlock(&ci->i_ceph_lock); 1535 filemap_write_and_wait_range(&inode->i_data, 0, 1536 inode->i_sb->s_maxbytes); 1537 goto retry; 1538 } 1539 1540 /* there should be no reader or writer */ 1541 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); 1542 1543 to = ci->i_truncate_size; 1544 wrbuffer_refs = ci->i_wrbuffer_ref; 1545 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1546 ci->i_truncate_pending, to); 1547 spin_unlock(&ci->i_ceph_lock); 1548 1549 truncate_inode_pages(inode->i_mapping, to); 1550 1551 spin_lock(&ci->i_ceph_lock); 1552 if (to == ci->i_truncate_size) { 1553 ci->i_truncate_pending = 0; 1554 finish = 1; 1555 } 1556 spin_unlock(&ci->i_ceph_lock); 1557 if (!finish) 1558 goto retry; 1559 1560 mutex_unlock(&ci->i_truncate_mutex); 1561 1562 if (wrbuffer_refs == 0) 1563 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1564 1565 wake_up_all(&ci->i_cap_wq); 1566 } 1567 1568 1569 /* 1570 * symlinks 1571 */ 1572 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) 1573 { 1574 struct ceph_inode_info *ci = ceph_inode(dentry->d_inode); 1575 nd_set_link(nd, ci->i_symlink); 1576 return NULL; 1577 } 1578 1579 static const struct inode_operations ceph_symlink_iops = { 1580 .readlink = generic_readlink, 1581 .follow_link = ceph_sym_follow_link, 1582 .setattr = ceph_setattr, 1583 .getattr = ceph_getattr, 1584 .setxattr = ceph_setxattr, 1585 .getxattr = ceph_getxattr, 1586 .listxattr = ceph_listxattr, 1587 .removexattr = ceph_removexattr, 1588 }; 1589 1590 /* 1591 * setattr 1592 */ 1593 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 1594 { 1595 struct inode *inode = dentry->d_inode; 1596 struct ceph_inode_info *ci = ceph_inode(inode); 1597 struct inode *parent_inode; 1598 const unsigned int ia_valid = attr->ia_valid; 1599 struct ceph_mds_request *req; 1600 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; 1601 int issued; 1602 int release = 0, dirtied = 0; 1603 int mask = 0; 1604 int err = 0; 1605 int inode_dirty_flags = 0; 1606 1607 if (ceph_snap(inode) != CEPH_NOSNAP) 1608 return -EROFS; 1609 1610 err = inode_change_ok(inode, attr); 1611 if (err != 0) 1612 return err; 1613 1614 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1615 USE_AUTH_MDS); 1616 if (IS_ERR(req)) 1617 return PTR_ERR(req); 1618 1619 spin_lock(&ci->i_ceph_lock); 1620 issued = __ceph_caps_issued(ci, NULL); 1621 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1622 1623 if (ia_valid & ATTR_UID) { 1624 dout("setattr %p uid %d -> %d\n", inode, 1625 from_kuid(&init_user_ns, inode->i_uid), 1626 from_kuid(&init_user_ns, attr->ia_uid)); 1627 if (issued & CEPH_CAP_AUTH_EXCL) { 1628 inode->i_uid = attr->ia_uid; 1629 dirtied |= CEPH_CAP_AUTH_EXCL; 1630 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1631 !uid_eq(attr->ia_uid, inode->i_uid)) { 1632 req->r_args.setattr.uid = cpu_to_le32( 1633 from_kuid(&init_user_ns, attr->ia_uid)); 1634 mask |= CEPH_SETATTR_UID; 1635 release |= CEPH_CAP_AUTH_SHARED; 1636 } 1637 } 1638 if (ia_valid & ATTR_GID) { 1639 dout("setattr %p gid %d -> %d\n", inode, 1640 from_kgid(&init_user_ns, inode->i_gid), 1641 from_kgid(&init_user_ns, attr->ia_gid)); 1642 if (issued & CEPH_CAP_AUTH_EXCL) { 1643 inode->i_gid = attr->ia_gid; 1644 dirtied |= CEPH_CAP_AUTH_EXCL; 1645 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1646 !gid_eq(attr->ia_gid, inode->i_gid)) { 1647 req->r_args.setattr.gid = cpu_to_le32( 1648 from_kgid(&init_user_ns, attr->ia_gid)); 1649 mask |= CEPH_SETATTR_GID; 1650 release |= CEPH_CAP_AUTH_SHARED; 1651 } 1652 } 1653 if (ia_valid & ATTR_MODE) { 1654 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1655 attr->ia_mode); 1656 if (issued & CEPH_CAP_AUTH_EXCL) { 1657 inode->i_mode = attr->ia_mode; 1658 dirtied |= CEPH_CAP_AUTH_EXCL; 1659 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1660 attr->ia_mode != inode->i_mode) { 1661 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1662 mask |= CEPH_SETATTR_MODE; 1663 release |= CEPH_CAP_AUTH_SHARED; 1664 } 1665 } 1666 1667 if (ia_valid & ATTR_ATIME) { 1668 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1669 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1670 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1671 if (issued & CEPH_CAP_FILE_EXCL) { 1672 ci->i_time_warp_seq++; 1673 inode->i_atime = attr->ia_atime; 1674 dirtied |= CEPH_CAP_FILE_EXCL; 1675 } else if ((issued & CEPH_CAP_FILE_WR) && 1676 timespec_compare(&inode->i_atime, 1677 &attr->ia_atime) < 0) { 1678 inode->i_atime = attr->ia_atime; 1679 dirtied |= CEPH_CAP_FILE_WR; 1680 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1681 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1682 ceph_encode_timespec(&req->r_args.setattr.atime, 1683 &attr->ia_atime); 1684 mask |= CEPH_SETATTR_ATIME; 1685 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1686 CEPH_CAP_FILE_WR; 1687 } 1688 } 1689 if (ia_valid & ATTR_MTIME) { 1690 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1691 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 1692 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 1693 if (issued & CEPH_CAP_FILE_EXCL) { 1694 ci->i_time_warp_seq++; 1695 inode->i_mtime = attr->ia_mtime; 1696 dirtied |= CEPH_CAP_FILE_EXCL; 1697 } else if ((issued & CEPH_CAP_FILE_WR) && 1698 timespec_compare(&inode->i_mtime, 1699 &attr->ia_mtime) < 0) { 1700 inode->i_mtime = attr->ia_mtime; 1701 dirtied |= CEPH_CAP_FILE_WR; 1702 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1703 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 1704 ceph_encode_timespec(&req->r_args.setattr.mtime, 1705 &attr->ia_mtime); 1706 mask |= CEPH_SETATTR_MTIME; 1707 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1708 CEPH_CAP_FILE_WR; 1709 } 1710 } 1711 if (ia_valid & ATTR_SIZE) { 1712 dout("setattr %p size %lld -> %lld\n", inode, 1713 inode->i_size, attr->ia_size); 1714 if (attr->ia_size > inode->i_sb->s_maxbytes) { 1715 err = -EINVAL; 1716 goto out; 1717 } 1718 if ((issued & CEPH_CAP_FILE_EXCL) && 1719 attr->ia_size > inode->i_size) { 1720 inode->i_size = attr->ia_size; 1721 inode->i_blocks = 1722 (attr->ia_size + (1 << 9) - 1) >> 9; 1723 inode->i_ctime = attr->ia_ctime; 1724 ci->i_reported_size = attr->ia_size; 1725 dirtied |= CEPH_CAP_FILE_EXCL; 1726 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1727 attr->ia_size != inode->i_size) { 1728 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 1729 req->r_args.setattr.old_size = 1730 cpu_to_le64(inode->i_size); 1731 mask |= CEPH_SETATTR_SIZE; 1732 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 1733 CEPH_CAP_FILE_WR; 1734 } 1735 } 1736 1737 /* these do nothing */ 1738 if (ia_valid & ATTR_CTIME) { 1739 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 1740 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 1741 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 1742 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 1743 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 1744 only ? "ctime only" : "ignored"); 1745 inode->i_ctime = attr->ia_ctime; 1746 if (only) { 1747 /* 1748 * if kernel wants to dirty ctime but nothing else, 1749 * we need to choose a cap to dirty under, or do 1750 * a almost-no-op setattr 1751 */ 1752 if (issued & CEPH_CAP_AUTH_EXCL) 1753 dirtied |= CEPH_CAP_AUTH_EXCL; 1754 else if (issued & CEPH_CAP_FILE_EXCL) 1755 dirtied |= CEPH_CAP_FILE_EXCL; 1756 else if (issued & CEPH_CAP_XATTR_EXCL) 1757 dirtied |= CEPH_CAP_XATTR_EXCL; 1758 else 1759 mask |= CEPH_SETATTR_CTIME; 1760 } 1761 } 1762 if (ia_valid & ATTR_FILE) 1763 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 1764 1765 if (dirtied) { 1766 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); 1767 inode->i_ctime = CURRENT_TIME; 1768 } 1769 1770 release &= issued; 1771 spin_unlock(&ci->i_ceph_lock); 1772 1773 if (inode_dirty_flags) 1774 __mark_inode_dirty(inode, inode_dirty_flags); 1775 1776 if (mask) { 1777 req->r_inode = inode; 1778 ihold(inode); 1779 req->r_inode_drop = release; 1780 req->r_args.setattr.mask = cpu_to_le32(mask); 1781 req->r_num_caps = 1; 1782 parent_inode = ceph_get_dentry_parent_inode(dentry); 1783 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 1784 iput(parent_inode); 1785 } 1786 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 1787 ceph_cap_string(dirtied), mask); 1788 1789 ceph_mdsc_put_request(req); 1790 if (mask & CEPH_SETATTR_SIZE) 1791 __ceph_do_pending_vmtruncate(inode); 1792 return err; 1793 out: 1794 spin_unlock(&ci->i_ceph_lock); 1795 ceph_mdsc_put_request(req); 1796 return err; 1797 } 1798 1799 /* 1800 * Verify that we have a lease on the given mask. If not, 1801 * do a getattr against an mds. 1802 */ 1803 int ceph_do_getattr(struct inode *inode, int mask) 1804 { 1805 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 1806 struct ceph_mds_client *mdsc = fsc->mdsc; 1807 struct ceph_mds_request *req; 1808 int err; 1809 1810 if (ceph_snap(inode) == CEPH_SNAPDIR) { 1811 dout("do_getattr inode %p SNAPDIR\n", inode); 1812 return 0; 1813 } 1814 1815 dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode); 1816 if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 1817 return 0; 1818 1819 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1820 if (IS_ERR(req)) 1821 return PTR_ERR(req); 1822 req->r_inode = inode; 1823 ihold(inode); 1824 req->r_num_caps = 1; 1825 req->r_args.getattr.mask = cpu_to_le32(mask); 1826 err = ceph_mdsc_do_request(mdsc, NULL, req); 1827 ceph_mdsc_put_request(req); 1828 dout("do_getattr result=%d\n", err); 1829 return err; 1830 } 1831 1832 1833 /* 1834 * Check inode permissions. We verify we have a valid value for 1835 * the AUTH cap, then call the generic handler. 1836 */ 1837 int ceph_permission(struct inode *inode, int mask) 1838 { 1839 int err; 1840 1841 if (mask & MAY_NOT_BLOCK) 1842 return -ECHILD; 1843 1844 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED); 1845 1846 if (!err) 1847 err = generic_permission(inode, mask); 1848 return err; 1849 } 1850 1851 /* 1852 * Get all attributes. Hopefully somedata we'll have a statlite() 1853 * and can limit the fields we require to be accurate. 1854 */ 1855 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 1856 struct kstat *stat) 1857 { 1858 struct inode *inode = dentry->d_inode; 1859 struct ceph_inode_info *ci = ceph_inode(inode); 1860 int err; 1861 1862 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL); 1863 if (!err) { 1864 generic_fillattr(inode, stat); 1865 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); 1866 if (ceph_snap(inode) != CEPH_NOSNAP) 1867 stat->dev = ceph_snap(inode); 1868 else 1869 stat->dev = 0; 1870 if (S_ISDIR(inode->i_mode)) { 1871 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 1872 RBYTES)) 1873 stat->size = ci->i_rbytes; 1874 else 1875 stat->size = ci->i_files + ci->i_subdirs; 1876 stat->blocks = 0; 1877 stat->blksize = 65536; 1878 } 1879 } 1880 return err; 1881 } 1882