Lines Matching refs:ci
50 struct ceph_inode_info *ci,
435 struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) in __get_cap_for_mds() argument
438 struct rb_node *n = ci->i_caps.rb_node; in __get_cap_for_mds()
452 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) in ceph_get_cap_for_mds() argument
456 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_for_mds()
457 cap = __get_cap_for_mds(ci, mds); in ceph_get_cap_for_mds()
458 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_for_mds()
465 static void __insert_cap_node(struct ceph_inode_info *ci, in __insert_cap_node() argument
468 struct rb_node **p = &ci->i_caps.rb_node; in __insert_cap_node()
484 rb_insert_color(&new->ci_node, &ci->i_caps); in __insert_cap_node()
492 struct ceph_inode_info *ci) in __cap_set_timeouts() argument
495 ci->i_hold_caps_max = round_jiffies(jiffies + in __cap_set_timeouts()
497 dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode, in __cap_set_timeouts()
498 ci->i_hold_caps_max - jiffies); in __cap_set_timeouts()
510 struct ceph_inode_info *ci) in __cap_delay_requeue() argument
512 dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode, in __cap_delay_requeue()
513 ci->i_ceph_flags, ci->i_hold_caps_max); in __cap_delay_requeue()
516 if (!list_empty(&ci->i_cap_delay_list)) { in __cap_delay_requeue()
517 if (ci->i_ceph_flags & CEPH_I_FLUSH) in __cap_delay_requeue()
519 list_del_init(&ci->i_cap_delay_list); in __cap_delay_requeue()
521 __cap_set_timeouts(mdsc, ci); in __cap_delay_requeue()
522 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue()
534 struct ceph_inode_info *ci) in __cap_delay_requeue_front() argument
536 dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode); in __cap_delay_requeue_front()
538 ci->i_ceph_flags |= CEPH_I_FLUSH; in __cap_delay_requeue_front()
539 if (!list_empty(&ci->i_cap_delay_list)) in __cap_delay_requeue_front()
540 list_del_init(&ci->i_cap_delay_list); in __cap_delay_requeue_front()
541 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); in __cap_delay_requeue_front()
551 struct ceph_inode_info *ci) in __cap_delay_cancel() argument
553 dout("__cap_delay_cancel %p\n", &ci->netfs.inode); in __cap_delay_cancel()
554 if (list_empty(&ci->i_cap_delay_list)) in __cap_delay_cancel()
557 list_del_init(&ci->i_cap_delay_list); in __cap_delay_cancel()
562 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, in __check_cap_issue() argument
565 unsigned had = __ceph_caps_issued(ci, NULL); in __check_cap_issue()
567 lockdep_assert_held(&ci->i_ceph_lock); in __check_cap_issue()
573 if (S_ISREG(ci->netfs.inode.i_mode) && in __check_cap_issue()
576 ci->i_rdcache_gen++; in __check_cap_issue()
587 atomic_inc(&ci->i_shared_gen); in __check_cap_issue()
588 if (S_ISDIR(ci->netfs.inode.i_mode)) { in __check_cap_issue()
589 dout(" marking %p NOT complete\n", &ci->netfs.inode); in __check_cap_issue()
590 __ceph_dir_clear_complete(ci); in __check_cap_issue()
595 if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && in __check_cap_issue()
597 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); in __check_cap_issue()
598 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); in __check_cap_issue()
607 void change_auth_cap_ses(struct ceph_inode_info *ci, in change_auth_cap_ses() argument
610 lockdep_assert_held(&ci->i_ceph_lock); in change_auth_cap_ses()
612 if (list_empty(&ci->i_dirty_item) && list_empty(&ci->i_flushing_item)) in change_auth_cap_ses()
616 if (!list_empty(&ci->i_dirty_item)) in change_auth_cap_ses()
617 list_move(&ci->i_dirty_item, &session->s_cap_dirty); in change_auth_cap_ses()
618 if (!list_empty(&ci->i_flushing_item)) in change_auth_cap_ses()
619 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); in change_auth_cap_ses()
639 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_add_cap() local
645 lockdep_assert_held(&ci->i_ceph_lock); in ceph_add_cap()
652 cap = __get_cap_for_mds(ci, mds); in ceph_add_cap()
663 cap->ci = ci; in ceph_add_cap()
664 __insert_cap_node(ci, cap); in ceph_add_cap()
691 WARN_ON(cap != ci->i_auth_cap); in ceph_add_cap()
700 if (!ci->i_snap_realm || in ceph_add_cap()
702 realmino != (u64)-1 && ci->i_snap_realm->ino != realmino)) { in ceph_add_cap()
712 __func__, realmino, ci->i_vino.ino, in ceph_add_cap()
713 ci->i_snap_realm ? ci->i_snap_realm->ino : 0); in ceph_add_cap()
716 __check_cap_issue(ci, cap, issued); in ceph_add_cap()
723 actual_wanted = __ceph_caps_wanted(ci); in ceph_add_cap()
729 __cap_delay_requeue(mdsc, ci); in ceph_add_cap()
733 if (!ci->i_auth_cap || in ceph_add_cap()
734 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) { in ceph_add_cap()
735 if (ci->i_auth_cap && in ceph_add_cap()
736 ci->i_auth_cap->session != cap->session) in ceph_add_cap()
737 change_auth_cap_ses(ci, cap->session); in ceph_add_cap()
738 ci->i_auth_cap = cap; in ceph_add_cap()
742 WARN_ON(ci->i_auth_cap == cap); in ceph_add_cap()
759 wake_up_all(&ci->i_cap_wq); in ceph_add_cap()
777 "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode, in __cap_is_valid()
790 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) in __ceph_caps_issued() argument
792 int have = ci->i_snap_caps; in __ceph_caps_issued()
798 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued()
803 &ci->netfs.inode, cap, ceph_cap_string(cap->issued)); in __ceph_caps_issued()
813 if (ci->i_auth_cap) { in __ceph_caps_issued()
814 cap = ci->i_auth_cap; in __ceph_caps_issued()
823 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) in __ceph_caps_issued_other() argument
825 int have = ci->i_snap_caps; in __ceph_caps_issued_other()
829 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_other()
850 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap, in __touch_cap()
855 &cap->ci->netfs.inode, cap, s->s_mds); in __touch_cap()
865 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) in __ceph_caps_issued_mask() argument
869 int have = ci->i_snap_caps; in __ceph_caps_issued_mask()
873 " (mask %s)\n", ceph_ino(&ci->netfs.inode), in __ceph_caps_issued_mask()
879 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_issued_mask()
885 " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap, in __ceph_caps_issued_mask()
897 " (mask %s)\n", ceph_ino(&ci->netfs.inode), in __ceph_caps_issued_mask()
905 for (q = rb_first(&ci->i_caps); q != p; in __ceph_caps_issued_mask()
922 int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask, in __ceph_caps_issued_mask_metric() argument
925 struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb); in __ceph_caps_issued_mask_metric()
928 r = __ceph_caps_issued_mask(ci, mask, touch); in __ceph_caps_issued_mask_metric()
939 int __ceph_caps_revoking_other(struct ceph_inode_info *ci, in __ceph_caps_revoking_other() argument
945 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_revoking_other()
954 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) in ceph_caps_revoking() argument
956 struct inode *inode = &ci->netfs.inode; in ceph_caps_revoking()
959 spin_lock(&ci->i_ceph_lock); in ceph_caps_revoking()
960 ret = __ceph_caps_revoking_other(ci, NULL, mask); in ceph_caps_revoking()
961 spin_unlock(&ci->i_ceph_lock); in ceph_caps_revoking()
967 int __ceph_caps_used(struct ceph_inode_info *ci) in __ceph_caps_used() argument
970 if (ci->i_pin_ref) in __ceph_caps_used()
972 if (ci->i_rd_ref) in __ceph_caps_used()
974 if (ci->i_rdcache_ref || in __ceph_caps_used()
975 (S_ISREG(ci->netfs.inode.i_mode) && in __ceph_caps_used()
976 ci->netfs.inode.i_data.nrpages)) in __ceph_caps_used()
978 if (ci->i_wr_ref) in __ceph_caps_used()
980 if (ci->i_wb_ref || ci->i_wrbuffer_ref) in __ceph_caps_used()
982 if (ci->i_fx_ref) in __ceph_caps_used()
992 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) in __ceph_caps_file_wanted() argument
999 ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options; in __ceph_caps_file_wanted()
1003 if (S_ISDIR(ci->netfs.inode.i_mode)) { in __ceph_caps_file_wanted()
1007 if (ci->i_nr_by_mode[RD_SHIFT] > 0 || in __ceph_caps_file_wanted()
1008 time_after(ci->i_last_rd, used_cutoff)) in __ceph_caps_file_wanted()
1011 if (ci->i_nr_by_mode[WR_SHIFT] > 0 || in __ceph_caps_file_wanted()
1012 time_after(ci->i_last_wr, used_cutoff)) { in __ceph_caps_file_wanted()
1018 if (want || ci->i_nr_by_mode[PIN_SHIFT] > 0) in __ceph_caps_file_wanted()
1025 if (ci->i_nr_by_mode[RD_SHIFT] > 0) { in __ceph_caps_file_wanted()
1026 if (ci->i_nr_by_mode[RD_SHIFT] >= FMODE_WAIT_BIAS || in __ceph_caps_file_wanted()
1027 time_after(ci->i_last_rd, used_cutoff)) in __ceph_caps_file_wanted()
1029 } else if (time_after(ci->i_last_rd, idle_cutoff)) { in __ceph_caps_file_wanted()
1033 if (ci->i_nr_by_mode[WR_SHIFT] > 0) { in __ceph_caps_file_wanted()
1034 if (ci->i_nr_by_mode[WR_SHIFT] >= FMODE_WAIT_BIAS || in __ceph_caps_file_wanted()
1035 time_after(ci->i_last_wr, used_cutoff)) in __ceph_caps_file_wanted()
1037 } else if (time_after(ci->i_last_wr, idle_cutoff)) { in __ceph_caps_file_wanted()
1043 ci->i_nr_by_mode[LAZY_SHIFT] > 0) in __ceph_caps_file_wanted()
1053 int __ceph_caps_wanted(struct ceph_inode_info *ci) in __ceph_caps_wanted() argument
1055 int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); in __ceph_caps_wanted()
1056 if (S_ISDIR(ci->netfs.inode.i_mode)) { in __ceph_caps_wanted()
1071 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check) in __ceph_caps_mds_wanted() argument
1077 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in __ceph_caps_mds_wanted()
1081 if (cap == ci->i_auth_cap) in __ceph_caps_mds_wanted()
1091 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_is_any_caps() local
1094 spin_lock(&ci->i_ceph_lock); in ceph_is_any_caps()
1095 ret = __ceph_is_any_real_caps(ci); in ceph_is_any_caps()
1096 spin_unlock(&ci->i_ceph_lock); in ceph_is_any_caps()
1110 struct ceph_inode_info *ci = cap->ci; in __ceph_remove_cap() local
1115 if (!ci) { in __ceph_remove_cap()
1120 lockdep_assert_held(&ci->i_ceph_lock); in __ceph_remove_cap()
1122 dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode); in __ceph_remove_cap()
1124 mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc; in __ceph_remove_cap()
1127 rb_erase(&cap->ci_node, &ci->i_caps); in __ceph_remove_cap()
1128 if (ci->i_auth_cap == cap) in __ceph_remove_cap()
1129 ci->i_auth_cap = NULL; in __ceph_remove_cap()
1145 cap->ci = NULL; in __ceph_remove_cap()
1162 cap->cap_ino = ci->i_vino.ino; in __ceph_remove_cap()
1169 if (!__ceph_is_any_real_caps(ci)) { in __ceph_remove_cap()
1174 if (ci->i_wr_ref == 0 && ci->i_snap_realm) in __ceph_remove_cap()
1175 ceph_change_snap_realm(&ci->netfs.inode, NULL); in __ceph_remove_cap()
1177 __cap_delay_cancel(mdsc, ci); in __ceph_remove_cap()
1184 struct ceph_inode_info *ci = cap->ci; in ceph_remove_cap() local
1188 if (!ci) { in ceph_remove_cap()
1193 lockdep_assert_held(&ci->i_ceph_lock); in ceph_remove_cap()
1195 fsc = ceph_inode_to_fs_client(&ci->netfs.inode); in ceph_remove_cap()
1196 WARN_ON_ONCE(ci->i_auth_cap == cap && in ceph_remove_cap()
1197 !list_empty(&ci->i_dirty_item) && in ceph_remove_cap()
1199 !ceph_inode_is_shutdown(&ci->netfs.inode)); in ceph_remove_cap()
1344 void __ceph_remove_caps(struct ceph_inode_info *ci) in __ceph_remove_caps() argument
1346 struct inode *inode = &ci->netfs.inode; in __ceph_remove_caps()
1352 spin_lock(&ci->i_ceph_lock); in __ceph_remove_caps()
1353 p = rb_first(&ci->i_caps); in __ceph_remove_caps()
1359 spin_unlock(&ci->i_ceph_lock); in __ceph_remove_caps()
1374 struct ceph_inode_info *ci = cap->ci; in __prep_cap() local
1375 struct inode *inode = &ci->netfs.inode; in __prep_cap()
1378 lockdep_assert_held(&ci->i_ceph_lock); in __prep_cap()
1390 ci->i_ceph_flags &= ~CEPH_I_FLUSH; in __prep_cap()
1405 arg->follows = flushing ? ci->i_head_snapc->seq : 0; in __prep_cap()
1409 ci->i_reported_size = arg->size; in __prep_cap()
1410 arg->max_size = ci->i_wanted_max_size; in __prep_cap()
1411 if (cap == ci->i_auth_cap) { in __prep_cap()
1413 ci->i_requested_max_size = arg->max_size; in __prep_cap()
1415 ci->i_requested_max_size = 0; in __prep_cap()
1419 arg->old_xattr_buf = __ceph_build_xattrs_blob(ci); in __prep_cap()
1420 arg->xattr_version = ci->i_xattrs.version; in __prep_cap()
1421 arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob); in __prep_cap()
1430 arg->btime = ci->i_btime; in __prep_cap()
1441 arg->time_warp_seq = ci->i_time_warp_seq; in __prep_cap()
1447 arg->inline_data = ci->i_inline_version != CEPH_INLINE_NONE; in __prep_cap()
1449 !list_empty(&ci->i_cap_snaps)) { in __prep_cap()
1451 list_for_each_entry_reverse(capsnap, &ci->i_cap_snaps, ci_item) { in __prep_cap()
1463 if (ci->fscrypt_auth_len && in __prep_cap()
1464 WARN_ON_ONCE(ci->fscrypt_auth_len > sizeof(struct ceph_fscrypt_auth))) { in __prep_cap()
1468 arg->fscrypt_auth_len = ci->fscrypt_auth_len; in __prep_cap()
1469 memcpy(arg->fscrypt_auth, ci->fscrypt_auth, in __prep_cap()
1470 min_t(size_t, ci->fscrypt_auth_len, in __prep_cap()
1499 static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci) in __send_cap() argument
1502 struct inode *inode = &ci->netfs.inode; in __send_cap()
1510 spin_lock(&ci->i_ceph_lock); in __send_cap()
1511 __cap_delay_requeue(arg->session->s_mdsc, ci); in __send_cap()
1512 spin_unlock(&ci->i_ceph_lock); in __send_cap()
1521 wake_up_all(&ci->i_cap_wq); in __send_cap()
1592 static void __ceph_flush_snaps(struct ceph_inode_info *ci, in __ceph_flush_snaps() argument
1594 __releases(ci->i_ceph_lock) in __ceph_flush_snaps()
1595 __acquires(ci->i_ceph_lock) in __ceph_flush_snaps()
1597 struct inode *inode = &ci->netfs.inode; in __ceph_flush_snaps()
1605 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { in __ceph_flush_snaps()
1628 if (list_empty(&ci->i_flushing_item)) { in __ceph_flush_snaps()
1629 list_add_tail(&ci->i_flushing_item, in __ceph_flush_snaps()
1635 &ci->i_cap_flush_list); in __ceph_flush_snaps()
1642 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS; in __ceph_flush_snaps()
1645 struct ceph_cap *cap = ci->i_auth_cap; in __ceph_flush_snaps()
1656 list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) { in __ceph_flush_snaps()
1670 spin_unlock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1684 spin_lock(&ci->i_ceph_lock); in __ceph_flush_snaps()
1688 void ceph_flush_snaps(struct ceph_inode_info *ci, in ceph_flush_snaps() argument
1691 struct inode *inode = &ci->netfs.inode; in ceph_flush_snaps()
1701 spin_lock(&ci->i_ceph_lock); in ceph_flush_snaps()
1702 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { in ceph_flush_snaps()
1706 if (!ci->i_auth_cap) { in ceph_flush_snaps()
1711 mds = ci->i_auth_cap->session->s_mds; in ceph_flush_snaps()
1718 spin_unlock(&ci->i_ceph_lock); in ceph_flush_snaps()
1726 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) in ceph_flush_snaps()
1727 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_flush_snaps()
1729 __ceph_flush_snaps(ci, session); in ceph_flush_snaps()
1731 spin_unlock(&ci->i_ceph_lock); in ceph_flush_snaps()
1739 if (!list_empty(&ci->i_snap_flush_item)) in ceph_flush_snaps()
1741 list_del_init(&ci->i_snap_flush_item); in ceph_flush_snaps()
1753 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, in __ceph_mark_dirty_caps() argument
1757 ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc; in __ceph_mark_dirty_caps()
1758 struct inode *inode = &ci->netfs.inode; in __ceph_mark_dirty_caps()
1759 int was = ci->i_dirty_caps; in __ceph_mark_dirty_caps()
1762 lockdep_assert_held(&ci->i_ceph_lock); in __ceph_mark_dirty_caps()
1764 if (!ci->i_auth_cap) { in __ceph_mark_dirty_caps()
1771 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode, in __ceph_mark_dirty_caps()
1774 ci->i_dirty_caps |= mask; in __ceph_mark_dirty_caps()
1776 struct ceph_mds_session *session = ci->i_auth_cap->session; in __ceph_mark_dirty_caps()
1778 WARN_ON_ONCE(ci->i_prealloc_cap_flush); in __ceph_mark_dirty_caps()
1779 swap(ci->i_prealloc_cap_flush, *pcf); in __ceph_mark_dirty_caps()
1781 if (!ci->i_head_snapc) { in __ceph_mark_dirty_caps()
1783 ci->i_head_snapc = ceph_get_snap_context( in __ceph_mark_dirty_caps()
1784 ci->i_snap_realm->cached_context); in __ceph_mark_dirty_caps()
1787 &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap); in __ceph_mark_dirty_caps()
1788 BUG_ON(!list_empty(&ci->i_dirty_item)); in __ceph_mark_dirty_caps()
1790 list_add(&ci->i_dirty_item, &session->s_cap_dirty); in __ceph_mark_dirty_caps()
1792 if (ci->i_flushing_caps == 0) { in __ceph_mark_dirty_caps()
1797 WARN_ON_ONCE(!ci->i_prealloc_cap_flush); in __ceph_mark_dirty_caps()
1799 BUG_ON(list_empty(&ci->i_dirty_item)); in __ceph_mark_dirty_caps()
1800 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && in __ceph_mark_dirty_caps()
1803 __cap_delay_requeue(mdsc, ci); in __ceph_mark_dirty_caps()
1855 static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci, in __detach_cap_flush_from_ci() argument
1861 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) { in __detach_cap_flush_from_ci()
1881 struct ceph_inode_info *ci = ceph_inode(inode); in __mark_caps_flushing() local
1885 lockdep_assert_held(&ci->i_ceph_lock); in __mark_caps_flushing()
1886 BUG_ON(ci->i_dirty_caps == 0); in __mark_caps_flushing()
1887 BUG_ON(list_empty(&ci->i_dirty_item)); in __mark_caps_flushing()
1888 BUG_ON(!ci->i_prealloc_cap_flush); in __mark_caps_flushing()
1890 flushing = ci->i_dirty_caps; in __mark_caps_flushing()
1893 ceph_cap_string(ci->i_flushing_caps), in __mark_caps_flushing()
1894 ceph_cap_string(ci->i_flushing_caps | flushing)); in __mark_caps_flushing()
1895 ci->i_flushing_caps |= flushing; in __mark_caps_flushing()
1896 ci->i_dirty_caps = 0; in __mark_caps_flushing()
1899 swap(cf, ci->i_prealloc_cap_flush); in __mark_caps_flushing()
1904 list_del_init(&ci->i_dirty_item); in __mark_caps_flushing()
1910 if (list_empty(&ci->i_flushing_item)) { in __mark_caps_flushing()
1911 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); in __mark_caps_flushing()
1916 list_add_tail(&cf->i_list, &ci->i_cap_flush_list); in __mark_caps_flushing()
1925 __releases(ci->i_ceph_lock) in try_nonblocking_invalidate()
1926 __acquires(ci->i_ceph_lock) in try_nonblocking_invalidate()
1928 struct ceph_inode_info *ci = ceph_inode(inode); in try_nonblocking_invalidate() local
1929 u32 invalidating_gen = ci->i_rdcache_gen; in try_nonblocking_invalidate()
1931 spin_unlock(&ci->i_ceph_lock); in try_nonblocking_invalidate()
1934 spin_lock(&ci->i_ceph_lock); in try_nonblocking_invalidate()
1937 invalidating_gen == ci->i_rdcache_gen) { in try_nonblocking_invalidate()
1941 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; in try_nonblocking_invalidate()
1948 bool __ceph_should_report_size(struct ceph_inode_info *ci) in __ceph_should_report_size() argument
1950 loff_t size = i_size_read(&ci->netfs.inode); in __ceph_should_report_size()
1952 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR) in __ceph_should_report_size()
1954 if (size >= ci->i_max_size) in __ceph_should_report_size()
1957 if (ci->i_max_size > ci->i_reported_size && in __ceph_should_report_size()
1958 (size << 1) >= ci->i_max_size + ci->i_reported_size) in __ceph_should_report_size()
1972 void ceph_check_caps(struct ceph_inode_info *ci, int flags) in ceph_check_caps() argument
1974 struct inode *inode = &ci->netfs.inode; in ceph_check_caps()
1988 spin_lock(&ci->i_ceph_lock); in ceph_check_caps()
1989 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) { in ceph_check_caps()
1990 ci->i_ceph_flags |= CEPH_I_ASYNC_CHECK_CAPS; in ceph_check_caps()
1993 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
1997 if (ci->i_ceph_flags & CEPH_I_FLUSH) in ceph_check_caps()
2001 file_wanted = __ceph_caps_file_wanted(ci); in ceph_check_caps()
2004 used = __ceph_caps_used(ci); in ceph_check_caps()
2012 issued = __ceph_caps_issued(ci, &implemented); in ceph_check_caps()
2024 __ceph_dir_is_complete(ci)) { in ceph_check_caps()
2045 if (ci->i_max_size == 0) in ceph_check_caps()
2053 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), in ceph_check_caps()
2054 ceph_cap_string(ci->i_flushing_caps), in ceph_check_caps()
2068 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */ in ceph_check_caps()
2078 ci->i_rdcache_revoking = ci->i_rdcache_gen; in ceph_check_caps()
2084 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { in ceph_check_caps()
2092 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) in ceph_check_caps()
2100 if (ci->i_auth_cap && cap != ci->i_auth_cap) in ceph_check_caps()
2101 cap_used &= ~ci->i_auth_cap->issued; in ceph_check_caps()
2110 if (cap == ci->i_auth_cap && in ceph_check_caps()
2113 if (ci->i_wanted_max_size > ci->i_max_size && in ceph_check_caps()
2114 ci->i_wanted_max_size > ci->i_requested_max_size) { in ceph_check_caps()
2120 if (__ceph_should_report_size(ci)) { in ceph_check_caps()
2126 if (cap == ci->i_auth_cap) { in ceph_check_caps()
2127 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) { in ceph_check_caps()
2131 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) { in ceph_check_caps()
2156 if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref && in ceph_check_caps()
2179 if (cap == ci->i_auth_cap && in ceph_check_caps()
2180 (ci->i_ceph_flags & in ceph_check_caps()
2182 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) in ceph_check_caps()
2183 __kick_flushing_caps(mdsc, session, ci, 0); in ceph_check_caps()
2184 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) in ceph_check_caps()
2185 __ceph_flush_snaps(ci, session); in ceph_check_caps()
2190 if (cap == ci->i_auth_cap && ci->i_dirty_caps) { in ceph_check_caps()
2191 flushing = ci->i_dirty_caps; in ceph_check_caps()
2210 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
2211 __send_cap(&arg, ci); in ceph_check_caps()
2212 spin_lock(&ci->i_ceph_lock); in ceph_check_caps()
2218 if (__ceph_is_any_real_caps(ci) && in ceph_check_caps()
2219 list_empty(&ci->i_cap_delay_list) && in ceph_check_caps()
2222 __cap_delay_requeue(mdsc, ci); in ceph_check_caps()
2225 spin_unlock(&ci->i_ceph_lock); in ceph_check_caps()
2240 struct ceph_inode_info *ci = ceph_inode(inode); in try_flush_caps() local
2244 spin_lock(&ci->i_ceph_lock); in try_flush_caps()
2246 if (ci->i_dirty_caps && ci->i_auth_cap) { in try_flush_caps()
2247 struct ceph_cap *cap = ci->i_auth_cap; in try_flush_caps()
2252 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
2256 if (ci->i_ceph_flags & in try_flush_caps()
2258 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) in try_flush_caps()
2259 __kick_flushing_caps(mdsc, session, ci, 0); in try_flush_caps()
2260 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) in try_flush_caps()
2261 __ceph_flush_snaps(ci, session); in try_flush_caps()
2265 flushing = ci->i_dirty_caps; in try_flush_caps()
2270 __ceph_caps_used(ci), __ceph_caps_wanted(ci), in try_flush_caps()
2273 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
2275 __send_cap(&arg, ci); in try_flush_caps()
2277 if (!list_empty(&ci->i_cap_flush_list)) { in try_flush_caps()
2279 list_last_entry(&ci->i_cap_flush_list, in try_flush_caps()
2284 flushing = ci->i_flushing_caps; in try_flush_caps()
2285 spin_unlock(&ci->i_ceph_lock); in try_flush_caps()
2297 struct ceph_inode_info *ci = ceph_inode(inode); in caps_are_flushed() local
2300 spin_lock(&ci->i_ceph_lock); in caps_are_flushed()
2301 if (!list_empty(&ci->i_cap_flush_list)) { in caps_are_flushed()
2303 list_first_entry(&ci->i_cap_flush_list, in caps_are_flushed()
2308 spin_unlock(&ci->i_ceph_lock); in caps_are_flushed()
2318 struct ceph_inode_info *ci = ceph_inode(inode); in flush_mdlog_and_wait_inode_unsafe_requests() local
2322 spin_lock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2323 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) { in flush_mdlog_and_wait_inode_unsafe_requests()
2324 req1 = list_last_entry(&ci->i_unsafe_dirops, in flush_mdlog_and_wait_inode_unsafe_requests()
2329 if (!list_empty(&ci->i_unsafe_iops)) { in flush_mdlog_and_wait_inode_unsafe_requests()
2330 req2 = list_last_entry(&ci->i_unsafe_iops, in flush_mdlog_and_wait_inode_unsafe_requests()
2335 spin_unlock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2359 spin_lock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2361 list_for_each_entry(req, &ci->i_unsafe_dirops, in flush_mdlog_and_wait_inode_unsafe_requests()
2373 list_for_each_entry(req, &ci->i_unsafe_iops, in flush_mdlog_and_wait_inode_unsafe_requests()
2384 spin_unlock(&ci->i_unsafe_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2387 spin_lock(&ci->i_ceph_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2388 if (ci->i_auth_cap) { in flush_mdlog_and_wait_inode_unsafe_requests()
2389 s = ci->i_auth_cap->session; in flush_mdlog_and_wait_inode_unsafe_requests()
2393 spin_unlock(&ci->i_ceph_lock); in flush_mdlog_and_wait_inode_unsafe_requests()
2433 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_fsync() local
2459 err = wait_event_interruptible(ci->i_cap_wq, in ceph_fsync()
2482 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_write_inode() local
2496 err = wait_event_interruptible(ci->i_cap_wq, in ceph_write_inode()
2502 spin_lock(&ci->i_ceph_lock); in ceph_write_inode()
2503 if (__ceph_caps_dirty(ci)) in ceph_write_inode()
2504 __cap_delay_requeue_front(mdsc, ci); in ceph_write_inode()
2505 spin_unlock(&ci->i_ceph_lock); in ceph_write_inode()
2512 struct ceph_inode_info *ci, in __kick_flushing_caps() argument
2514 __releases(ci->i_ceph_lock) in __kick_flushing_caps()
2515 __acquires(ci->i_ceph_lock) in __kick_flushing_caps()
2517 struct inode *inode = &ci->netfs.inode; in __kick_flushing_caps()
2525 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) in __kick_flushing_caps()
2528 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; in __kick_flushing_caps()
2530 list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) { in __kick_flushing_caps()
2537 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) { in __kick_flushing_caps()
2541 cap = ci->i_auth_cap; in __kick_flushing_caps()
2558 __ceph_caps_used(ci), in __kick_flushing_caps()
2559 __ceph_caps_wanted(ci), in __kick_flushing_caps()
2562 spin_unlock(&ci->i_ceph_lock); in __kick_flushing_caps()
2563 __send_cap(&arg, ci); in __kick_flushing_caps()
2573 spin_unlock(&ci->i_ceph_lock); in __kick_flushing_caps()
2588 spin_lock(&ci->i_ceph_lock); in __kick_flushing_caps()
2595 struct ceph_inode_info *ci; in ceph_early_kick_flushing_caps() local
2605 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { in ceph_early_kick_flushing_caps()
2606 spin_lock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2607 cap = ci->i_auth_cap; in ceph_early_kick_flushing_caps()
2610 &ci->netfs.inode, cap, session->s_mds); in ceph_early_kick_flushing_caps()
2611 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2622 if ((cap->issued & ci->i_flushing_caps) != in ceph_early_kick_flushing_caps()
2623 ci->i_flushing_caps) { in ceph_early_kick_flushing_caps()
2630 __kick_flushing_caps(mdsc, session, ci, in ceph_early_kick_flushing_caps()
2633 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH; in ceph_early_kick_flushing_caps()
2636 spin_unlock(&ci->i_ceph_lock); in ceph_early_kick_flushing_caps()
2643 struct ceph_inode_info *ci; in ceph_kick_flushing_caps() local
2655 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { in ceph_kick_flushing_caps()
2656 spin_lock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2657 cap = ci->i_auth_cap; in ceph_kick_flushing_caps()
2660 &ci->netfs.inode, cap, session->s_mds); in ceph_kick_flushing_caps()
2661 spin_unlock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2664 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) { in ceph_kick_flushing_caps()
2665 __kick_flushing_caps(mdsc, session, ci, in ceph_kick_flushing_caps()
2668 spin_unlock(&ci->i_ceph_lock); in ceph_kick_flushing_caps()
2673 struct ceph_inode_info *ci) in ceph_kick_flushing_inode_caps() argument
2676 struct ceph_cap *cap = ci->i_auth_cap; in ceph_kick_flushing_inode_caps()
2678 lockdep_assert_held(&ci->i_ceph_lock); in ceph_kick_flushing_inode_caps()
2680 dout("%s %p flushing %s\n", __func__, &ci->netfs.inode, in ceph_kick_flushing_inode_caps()
2681 ceph_cap_string(ci->i_flushing_caps)); in ceph_kick_flushing_inode_caps()
2683 if (!list_empty(&ci->i_cap_flush_list)) { in ceph_kick_flushing_inode_caps()
2686 list_move_tail(&ci->i_flushing_item, in ceph_kick_flushing_inode_caps()
2691 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid); in ceph_kick_flushing_inode_caps()
2700 void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, in ceph_take_cap_refs() argument
2703 lockdep_assert_held(&ci->i_ceph_lock); in ceph_take_cap_refs()
2706 ci->i_pin_ref++; in ceph_take_cap_refs()
2708 ci->i_rd_ref++; in ceph_take_cap_refs()
2710 ci->i_rdcache_ref++; in ceph_take_cap_refs()
2712 ci->i_fx_ref++; in ceph_take_cap_refs()
2714 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) { in ceph_take_cap_refs()
2716 ci->i_head_snapc = ceph_get_snap_context( in ceph_take_cap_refs()
2717 ci->i_snap_realm->cached_context); in ceph_take_cap_refs()
2719 ci->i_wr_ref++; in ceph_take_cap_refs()
2722 if (ci->i_wb_ref == 0) in ceph_take_cap_refs()
2723 ihold(&ci->netfs.inode); in ceph_take_cap_refs()
2724 ci->i_wb_ref++; in ceph_take_cap_refs()
2726 &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref); in ceph_take_cap_refs()
2752 struct ceph_inode_info *ci = ceph_inode(inode); in try_get_cap_refs() local
2762 spin_lock(&ci->i_ceph_lock); in try_get_cap_refs()
2765 (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) { in try_get_cap_refs()
2772 while (ci->i_truncate_pending) { in try_get_cap_refs()
2773 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2779 spin_lock(&ci->i_ceph_lock); in try_get_cap_refs()
2782 have = __ceph_caps_issued(ci, &implemented); in try_get_cap_refs()
2785 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { in try_get_cap_refs()
2787 inode, endoff, ci->i_max_size); in try_get_cap_refs()
2788 if (endoff > ci->i_requested_max_size) in try_get_cap_refs()
2789 ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN; in try_get_cap_refs()
2796 if (__ceph_have_pending_cap_snap(ci)) { in try_get_cap_refs()
2820 !ci->i_head_snapc && in try_get_cap_refs()
2832 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2843 ceph_take_cap_refs(ci, *got, true); in try_get_cap_refs()
2849 if (ci->i_auth_cap && in try_get_cap_refs()
2851 struct ceph_mds_session *s = ci->i_auth_cap->session; in try_get_cap_refs()
2858 inode, ceph_cap_string(need), ci->i_auth_cap->mds); in try_get_cap_refs()
2868 mds_wanted = __ceph_caps_mds_wanted(ci, false); in try_get_cap_refs()
2882 __ceph_touch_fmode(ci, mdsc, flags); in try_get_cap_refs()
2884 spin_unlock(&ci->i_ceph_lock); in try_get_cap_refs()
2905 struct ceph_inode_info *ci = ceph_inode(inode); in check_max_size() local
2909 spin_lock(&ci->i_ceph_lock); in check_max_size()
2910 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) { in check_max_size()
2913 ci->i_wanted_max_size = endoff; in check_max_size()
2916 if (ci->i_auth_cap && in check_max_size()
2917 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) && in check_max_size()
2918 ci->i_wanted_max_size > ci->i_max_size && in check_max_size()
2919 ci->i_wanted_max_size > ci->i_requested_max_size) in check_max_size()
2921 spin_unlock(&ci->i_ceph_lock); in check_max_size()
2923 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY); in check_max_size()
2970 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_get_caps() local
3007 ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS); in __ceph_get_caps()
3008 add_wait_queue(&ci->i_cap_wq, &wait); in __ceph_get_caps()
3020 remove_wait_queue(&ci->i_cap_wq, &wait); in __ceph_get_caps()
3021 ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS); in __ceph_get_caps()
3034 ceph_put_cap_refs(ci, _got); in __ceph_get_caps()
3057 if (S_ISREG(ci->netfs.inode.i_mode) && in __ceph_get_caps()
3058 ceph_has_inline_data(ci) && in __ceph_get_caps()
3074 ceph_put_cap_refs(ci, _got); in __ceph_get_caps()
3107 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) in ceph_get_cap_refs() argument
3109 spin_lock(&ci->i_ceph_lock); in ceph_get_cap_refs()
3110 ceph_take_cap_refs(ci, caps, false); in ceph_get_cap_refs()
3111 spin_unlock(&ci->i_ceph_lock); in ceph_get_cap_refs()
3119 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci, in ceph_try_drop_cap_snap() argument
3128 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps)) in ceph_try_drop_cap_snap()
3129 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; in ceph_try_drop_cap_snap()
3153 static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had, in __ceph_put_cap_refs() argument
3156 struct inode *inode = &ci->netfs.inode; in __ceph_put_cap_refs()
3160 spin_lock(&ci->i_ceph_lock); in __ceph_put_cap_refs()
3162 --ci->i_pin_ref; in __ceph_put_cap_refs()
3164 if (--ci->i_rd_ref == 0) in __ceph_put_cap_refs()
3167 if (--ci->i_rdcache_ref == 0) in __ceph_put_cap_refs()
3170 if (--ci->i_fx_ref == 0) in __ceph_put_cap_refs()
3173 if (--ci->i_wb_ref == 0) { in __ceph_put_cap_refs()
3180 inode, ci->i_wb_ref+1, ci->i_wb_ref); in __ceph_put_cap_refs()
3183 if (--ci->i_wr_ref == 0) { in __ceph_put_cap_refs()
3188 WARN_ON_ONCE(ci->i_wb_ref); in __ceph_put_cap_refs()
3192 if (ci->i_wrbuffer_ref_head == 0 && in __ceph_put_cap_refs()
3193 ci->i_dirty_caps == 0 && in __ceph_put_cap_refs()
3194 ci->i_flushing_caps == 0) { in __ceph_put_cap_refs()
3195 BUG_ON(!ci->i_head_snapc); in __ceph_put_cap_refs()
3196 ceph_put_snap_context(ci->i_head_snapc); in __ceph_put_cap_refs()
3197 ci->i_head_snapc = NULL; in __ceph_put_cap_refs()
3200 if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm) in __ceph_put_cap_refs()
3204 if (check_flushsnaps && __ceph_have_pending_cap_snap(ci)) { in __ceph_put_cap_refs()
3206 list_last_entry(&ci->i_cap_snaps, in __ceph_put_cap_refs()
3211 if (ceph_try_drop_cap_snap(ci, capsnap)) in __ceph_put_cap_refs()
3214 else if (__ceph_finish_cap_snap(ci, capsnap)) in __ceph_put_cap_refs()
3218 spin_unlock(&ci->i_ceph_lock); in __ceph_put_cap_refs()
3226 ceph_check_caps(ci, 0); in __ceph_put_cap_refs()
3228 ceph_flush_snaps(ci, NULL); in __ceph_put_cap_refs()
3240 wake_up_all(&ci->i_cap_wq); in __ceph_put_cap_refs()
3245 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs() argument
3247 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_SYNC); in ceph_put_cap_refs()
3250 void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs_async() argument
3252 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC); in ceph_put_cap_refs_async()
3255 void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had) in ceph_put_cap_refs_no_check_caps() argument
3257 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK); in ceph_put_cap_refs_no_check_caps()
3267 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, in ceph_put_wrbuffer_cap_refs() argument
3270 struct inode *inode = &ci->netfs.inode; in ceph_put_wrbuffer_cap_refs()
3277 spin_lock(&ci->i_ceph_lock); in ceph_put_wrbuffer_cap_refs()
3278 ci->i_wrbuffer_ref -= nr; in ceph_put_wrbuffer_cap_refs()
3279 if (ci->i_wrbuffer_ref == 0) { in ceph_put_wrbuffer_cap_refs()
3284 if (ci->i_head_snapc == snapc) { in ceph_put_wrbuffer_cap_refs()
3285 ci->i_wrbuffer_ref_head -= nr; in ceph_put_wrbuffer_cap_refs()
3286 if (ci->i_wrbuffer_ref_head == 0 && in ceph_put_wrbuffer_cap_refs()
3287 ci->i_wr_ref == 0 && in ceph_put_wrbuffer_cap_refs()
3288 ci->i_dirty_caps == 0 && in ceph_put_wrbuffer_cap_refs()
3289 ci->i_flushing_caps == 0) { in ceph_put_wrbuffer_cap_refs()
3290 BUG_ON(!ci->i_head_snapc); in ceph_put_wrbuffer_cap_refs()
3291 ceph_put_snap_context(ci->i_head_snapc); in ceph_put_wrbuffer_cap_refs()
3292 ci->i_head_snapc = NULL; in ceph_put_wrbuffer_cap_refs()
3296 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, in ceph_put_wrbuffer_cap_refs()
3297 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, in ceph_put_wrbuffer_cap_refs()
3300 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { in ceph_put_wrbuffer_cap_refs()
3312 WARN_ON_ONCE(ci->i_auth_cap); in ceph_put_wrbuffer_cap_refs()
3320 if (ceph_try_drop_cap_snap(ci, capsnap)) { in ceph_put_wrbuffer_cap_refs()
3323 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; in ceph_put_wrbuffer_cap_refs()
3331 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, in ceph_put_wrbuffer_cap_refs()
3332 ci->i_wrbuffer_ref, capsnap->dirty_pages, in ceph_put_wrbuffer_cap_refs()
3338 spin_unlock(&ci->i_ceph_lock); in ceph_put_wrbuffer_cap_refs()
3341 ceph_check_caps(ci, 0); in ceph_put_wrbuffer_cap_refs()
3343 ceph_flush_snaps(ci, NULL); in ceph_put_wrbuffer_cap_refs()
3346 wake_up_all(&ci->i_cap_wq); in ceph_put_wrbuffer_cap_refs()
3415 __releases(ci->i_ceph_lock) in handle_cap_grant()
3418 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_grant() local
3455 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) { in handle_cap_grant()
3459 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { in handle_cap_grant()
3461 ci->i_rdcache_revoking = ci->i_rdcache_gen; in handle_cap_grant()
3478 WARN_ON(cap != ci->i_auth_cap); in handle_cap_grant()
3488 __check_cap_issue(ci, cap, newcaps); in handle_cap_grant()
3503 ci->i_btime = extra_info->btime; in handle_cap_grant()
3508 if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len || in handle_cap_grant()
3509 memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth, in handle_cap_grant()
3510 ci->fscrypt_auth_len)) in handle_cap_grant()
3512 __func__, ci->fscrypt_auth_len, in handle_cap_grant()
3529 if (version > ci->i_xattrs.version) { in handle_cap_grant()
3532 if (ci->i_xattrs.blob) in handle_cap_grant()
3533 ceph_buffer_put(ci->i_xattrs.blob); in handle_cap_grant()
3534 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); in handle_cap_grant()
3535 ci->i_xattrs.version = version; in handle_cap_grant()
3553 ci->i_files = extra_info->nfiles; in handle_cap_grant()
3554 ci->i_subdirs = extra_info->nsubdirs; in handle_cap_grant()
3559 s64 old_pool = ci->i_layout.pool_id; in handle_cap_grant()
3562 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout); in handle_cap_grant()
3563 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, in handle_cap_grant()
3564 lockdep_is_held(&ci->i_ceph_lock)); in handle_cap_grant()
3565 rcu_assign_pointer(ci->i_layout.pool_ns, extra_info->pool_ns); in handle_cap_grant()
3567 if (ci->i_layout.pool_id != old_pool || in handle_cap_grant()
3569 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; in handle_cap_grant()
3580 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) { in handle_cap_grant()
3581 if (max_size != ci->i_max_size) { in handle_cap_grant()
3583 ci->i_max_size, max_size); in handle_cap_grant()
3584 ci->i_max_size = max_size; in handle_cap_grant()
3585 if (max_size >= ci->i_wanted_max_size) { in handle_cap_grant()
3586 ci->i_wanted_max_size = 0; /* reset */ in handle_cap_grant()
3587 ci->i_requested_max_size = 0; in handle_cap_grant()
3594 wanted = __ceph_caps_wanted(ci); in handle_cap_grant()
3595 used = __ceph_caps_used(ci); in handle_cap_grant()
3596 dirty = __ceph_caps_dirty(ci); in handle_cap_grant()
3632 else if (cap == ci->i_auth_cap) in handle_cap_grant()
3648 if (cap == ci->i_auth_cap && in handle_cap_grant()
3649 __ceph_caps_revoking_other(ci, cap, newcaps)) in handle_cap_grant()
3663 if (cap == ci->i_auth_cap) in handle_cap_grant()
3670 extra_info->inline_version >= ci->i_inline_version) { in handle_cap_grant()
3671 ci->i_inline_version = extra_info->inline_version; in handle_cap_grant()
3672 if (ci->i_inline_version != CEPH_INLINE_NONE && in handle_cap_grant()
3678 if (ci->i_auth_cap == cap) { in handle_cap_grant()
3682 if (ci->i_requested_max_size > max_size || in handle_cap_grant()
3685 ci->i_requested_max_size = 0; in handle_cap_grant()
3689 ceph_kick_flushing_inode_caps(session, ci); in handle_cap_grant()
3693 spin_unlock(&ci->i_ceph_lock); in handle_cap_grant()
3714 wake_up_all(&ci->i_cap_wq); in handle_cap_grant()
3718 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL); in handle_cap_grant()
3720 ceph_check_caps(ci, CHECK_CAPS_NOINVAL); in handle_cap_grant()
3731 __releases(ci->i_ceph_lock) in handle_cap_flush_ack()
3733 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_flush_ack() local
3744 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) { in handle_cap_flush_ack()
3758 wake_ci |= __detach_cap_flush_from_ci(ci, cf); in handle_cap_flush_ack()
3774 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), in handle_cap_flush_ack()
3775 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); in handle_cap_flush_ack()
3780 ci->i_flushing_caps &= ~cleaned; in handle_cap_flush_ack()
3787 if (ci->i_flushing_caps == 0) { in handle_cap_flush_ack()
3788 if (list_empty(&ci->i_cap_flush_list)) { in handle_cap_flush_ack()
3789 list_del_init(&ci->i_flushing_item); in handle_cap_flush_ack()
3801 if (ci->i_dirty_caps == 0) { in handle_cap_flush_ack()
3803 BUG_ON(!list_empty(&ci->i_dirty_item)); in handle_cap_flush_ack()
3805 if (ci->i_wr_ref == 0 && in handle_cap_flush_ack()
3806 ci->i_wrbuffer_ref_head == 0) { in handle_cap_flush_ack()
3807 BUG_ON(!ci->i_head_snapc); in handle_cap_flush_ack()
3808 ceph_put_snap_context(ci->i_head_snapc); in handle_cap_flush_ack()
3809 ci->i_head_snapc = NULL; in handle_cap_flush_ack()
3812 BUG_ON(list_empty(&ci->i_dirty_item)); in handle_cap_flush_ack()
3818 spin_unlock(&ci->i_ceph_lock); in handle_cap_flush_ack()
3829 wake_up_all(&ci->i_cap_wq); in handle_cap_flush_ack()
3839 struct ceph_inode_info *ci = ceph_inode(inode); in __ceph_remove_capsnap() local
3843 lockdep_assert_held(&ci->i_ceph_lock); in __ceph_remove_capsnap()
3845 dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci); in __ceph_remove_capsnap()
3848 ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush); in __ceph_remove_capsnap()
3853 if (list_empty(&ci->i_cap_flush_list)) in __ceph_remove_capsnap()
3854 list_del_init(&ci->i_flushing_item); in __ceph_remove_capsnap()
3865 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_remove_capsnap() local
3867 lockdep_assert_held(&ci->i_ceph_lock); in ceph_remove_capsnap()
3883 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_flushsnap_ack() local
3891 inode, ci, session->s_mds, follows); in handle_cap_flushsnap_ack()
3893 spin_lock(&ci->i_ceph_lock); in handle_cap_flushsnap_ack()
3894 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { in handle_cap_flushsnap_ack()
3911 spin_unlock(&ci->i_ceph_lock); in handle_cap_flushsnap_ack()
3917 wake_up_all(&ci->i_cap_wq); in handle_cap_flushsnap_ack()
3934 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_trunc() local
3941 int dirty = __ceph_caps_dirty(ci); in handle_cap_trunc()
3945 lockdep_assert_held(&ci->i_ceph_lock); in handle_cap_trunc()
3979 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_export() local
3997 inode, ci, mds, mseq, target); in handle_cap_export()
4000 spin_lock(&ci->i_ceph_lock); in handle_cap_export()
4001 cap = __get_cap_for_mds(ci, mds); in handle_cap_export()
4025 tcap = __get_cap_for_mds(ci, target); in handle_cap_export()
4036 if (cap == ci->i_auth_cap) { in handle_cap_export()
4037 ci->i_auth_cap = tcap; in handle_cap_export()
4038 change_auth_cap_ses(ci, tcap->session); in handle_cap_export()
4045 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0; in handle_cap_export()
4050 if (!list_empty(&ci->i_cap_flush_list) && in handle_cap_export()
4051 ci->i_auth_cap == tcap) { in handle_cap_export()
4053 list_move_tail(&ci->i_flushing_item, in handle_cap_export()
4062 spin_unlock(&ci->i_ceph_lock); in handle_cap_export()
4088 spin_unlock(&ci->i_ceph_lock); in handle_cap_export()
4110 struct ceph_inode_info *ci = ceph_inode(inode); in handle_cap_import() local
4132 inode, ci, mds, mseq, peer); in handle_cap_import()
4134 cap = __get_cap_for_mds(ci, mds); in handle_cap_import()
4137 spin_unlock(&ci->i_ceph_lock); in handle_cap_import()
4139 spin_lock(&ci->i_ceph_lock); in handle_cap_import()
4150 __ceph_caps_issued(ci, &issued); in handle_cap_import()
4151 issued |= __ceph_caps_dirty(ci); in handle_cap_import()
4156 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; in handle_cap_import()
4235 struct ceph_inode_info *ci; in ceph_handle_caps() local
4375 ci = ceph_inode(inode); in ceph_handle_caps()
4403 spin_lock(&ci->i_ceph_lock); in ceph_handle_caps()
4414 spin_lock(&ci->i_ceph_lock); in ceph_handle_caps()
4420 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
4436 __ceph_caps_issued(ci, &extra_info.issued); in ceph_handle_caps()
4437 extra_info.issued |= __ceph_caps_dirty(ci); in ceph_handle_caps()
4450 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
4456 spin_unlock(&ci->i_ceph_lock); in ceph_handle_caps()
4514 struct ceph_inode_info *ci; in ceph_check_delayed_caps() local
4523 ci = list_first_entry(&mdsc->cap_delay_list, in ceph_check_delayed_caps()
4526 if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) { in ceph_check_delayed_caps()
4528 delay = ci->i_hold_caps_max; in ceph_check_delayed_caps()
4531 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && in ceph_check_delayed_caps()
4532 time_before(jiffies, ci->i_hold_caps_max)) in ceph_check_delayed_caps()
4534 list_del_init(&ci->i_cap_delay_list); in ceph_check_delayed_caps()
4536 inode = igrab(&ci->netfs.inode); in ceph_check_delayed_caps()
4540 ceph_check_caps(ci, 0); in ceph_check_delayed_caps()
4556 struct ceph_inode_info *ci; in flush_dirty_session_caps() local
4562 ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info, in flush_dirty_session_caps()
4564 inode = &ci->netfs.inode; in flush_dirty_session_caps()
4569 ceph_check_caps(ci, CHECK_CAPS_FLUSH); in flush_dirty_session_caps()
4582 void __ceph_touch_fmode(struct ceph_inode_info *ci, in __ceph_touch_fmode() argument
4587 ci->i_last_rd = now; in __ceph_touch_fmode()
4589 ci->i_last_wr = now; in __ceph_touch_fmode()
4592 __ceph_is_any_real_caps(ci) && in __ceph_touch_fmode()
4593 list_empty(&ci->i_cap_delay_list)) in __ceph_touch_fmode()
4594 __cap_delay_requeue(mdsc, ci); in __ceph_touch_fmode()
4597 void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) in ceph_get_fmode() argument
4599 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); in ceph_get_fmode()
4607 spin_lock(&ci->i_ceph_lock); in ceph_get_fmode()
4614 if (i && ci->i_nr_by_mode[i]) in ceph_get_fmode()
4618 ci->i_nr_by_mode[i] += count; in ceph_get_fmode()
4623 spin_unlock(&ci->i_ceph_lock); in ceph_get_fmode()
4631 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count) in ceph_put_fmode() argument
4633 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); in ceph_put_fmode()
4641 spin_lock(&ci->i_ceph_lock); in ceph_put_fmode()
4644 BUG_ON(ci->i_nr_by_mode[i] < count); in ceph_put_fmode()
4645 ci->i_nr_by_mode[i] -= count; in ceph_put_fmode()
4653 if (i && ci->i_nr_by_mode[i]) in ceph_put_fmode()
4659 spin_unlock(&ci->i_ceph_lock); in ceph_put_fmode()
4670 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_drop_caps_for_unlink() local
4673 spin_lock(&ci->i_ceph_lock); in ceph_drop_caps_for_unlink()
4675 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); in ceph_drop_caps_for_unlink()
4677 if (__ceph_caps_dirty(ci)) { in ceph_drop_caps_for_unlink()
4680 __cap_delay_requeue_front(mdsc, ci); in ceph_drop_caps_for_unlink()
4683 spin_unlock(&ci->i_ceph_lock); in ceph_drop_caps_for_unlink()
4698 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_encode_inode_release() local
4704 spin_lock(&ci->i_ceph_lock); in ceph_encode_inode_release()
4705 used = __ceph_caps_used(ci); in ceph_encode_inode_release()
4706 dirty = __ceph_caps_dirty(ci); in ceph_encode_inode_release()
4715 cap = __get_cap_for_mds(ci, mds); in ceph_encode_inode_release()
4731 int wanted = __ceph_caps_wanted(ci); in ceph_encode_inode_release()
4742 if (cap == ci->i_auth_cap && in ceph_encode_inode_release()
4744 ci->i_requested_max_size = 0; in ceph_encode_inode_release()
4767 spin_unlock(&ci->i_ceph_lock); in ceph_encode_inode_release()
4836 struct ceph_inode_info *ci = ceph_inode(inode); in remove_capsnaps() local
4840 lockdep_assert_held(&ci->i_ceph_lock); in remove_capsnaps()
4842 dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode); in remove_capsnaps()
4844 while (!list_empty(&ci->i_cap_snaps)) { in remove_capsnaps()
4845 capsnap = list_first_entry(&ci->i_cap_snaps, in remove_capsnaps()
4852 wake_up_all(&ci->i_cap_wq); in remove_capsnaps()
4861 struct ceph_inode_info *ci = ceph_inode(inode); in ceph_purge_inode_cap() local
4866 lockdep_assert_held(&ci->i_ceph_lock); in ceph_purge_inode_cap()
4869 cap, ci, &ci->netfs.inode); in ceph_purge_inode_cap()
4871 is_auth = (cap == ci->i_auth_cap); in ceph_purge_inode_cap()
4879 if (ci->i_wrbuffer_ref > 0) in ceph_purge_inode_cap()
4886 while (!list_empty(&ci->i_cap_flush_list)) { in ceph_purge_inode_cap()
4887 cf = list_first_entry(&ci->i_cap_flush_list, in ceph_purge_inode_cap()
4895 if (!list_empty(&ci->i_dirty_item)) { in ceph_purge_inode_cap()
4898 ceph_cap_string(ci->i_dirty_caps), in ceph_purge_inode_cap()
4900 ci->i_dirty_caps = 0; in ceph_purge_inode_cap()
4901 list_del_init(&ci->i_dirty_item); in ceph_purge_inode_cap()
4904 if (!list_empty(&ci->i_flushing_item)) { in ceph_purge_inode_cap()
4907 ceph_cap_string(ci->i_flushing_caps), in ceph_purge_inode_cap()
4909 ci->i_flushing_caps = 0; in ceph_purge_inode_cap()
4910 list_del_init(&ci->i_flushing_item); in ceph_purge_inode_cap()
4919 if (ci->i_wrbuffer_ref_head == 0 && in ceph_purge_inode_cap()
4920 ci->i_wr_ref == 0 && in ceph_purge_inode_cap()
4921 ci->i_dirty_caps == 0 && in ceph_purge_inode_cap()
4922 ci->i_flushing_caps == 0) { in ceph_purge_inode_cap()
4923 ceph_put_snap_context(ci->i_head_snapc); in ceph_purge_inode_cap()
4924 ci->i_head_snapc = NULL; in ceph_purge_inode_cap()
4928 if (atomic_read(&ci->i_filelock_ref) > 0) { in ceph_purge_inode_cap()
4930 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; in ceph_purge_inode_cap()
4935 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { in ceph_purge_inode_cap()
4936 cf = ci->i_prealloc_cap_flush; in ceph_purge_inode_cap()
4937 ci->i_prealloc_cap_flush = NULL; in ceph_purge_inode_cap()
4942 if (!list_empty(&ci->i_cap_snaps)) in ceph_purge_inode_cap()