Lines Matching refs:ip

385 	struct gfs2_inode *ip = GFS2_I(inode);  in bh_get()  local
410 error = gfs2_meta_read(ip->i_gl, iomap.addr >> inode->i_blkbits, in bh_get()
568 int gfs2_qa_get(struct gfs2_inode *ip) in gfs2_qa_get() argument
570 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_qa_get()
571 struct inode *inode = &ip->i_inode; in gfs2_qa_get()
577 if (ip->i_qadata == NULL) { in gfs2_qa_get()
586 if (ip->i_qadata == NULL) in gfs2_qa_get()
587 ip->i_qadata = tmp; in gfs2_qa_get()
591 ip->i_qadata->qa_ref++; in gfs2_qa_get()
596 void gfs2_qa_put(struct gfs2_inode *ip) in gfs2_qa_put() argument
598 struct inode *inode = &ip->i_inode; in gfs2_qa_put()
601 if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) { in gfs2_qa_put()
602 kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); in gfs2_qa_put()
603 ip->i_qadata = NULL; in gfs2_qa_put()
608 int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_hold() argument
610 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_hold()
617 error = gfs2_qa_get(ip); in gfs2_quota_hold()
621 qd = ip->i_qadata->qa_qd; in gfs2_quota_hold()
623 if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || in gfs2_quota_hold()
624 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) { in gfs2_quota_hold()
626 gfs2_qa_put(ip); in gfs2_quota_hold()
630 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); in gfs2_quota_hold()
633 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
636 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); in gfs2_quota_hold()
639 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
643 !uid_eq(uid, ip->i_inode.i_uid)) { in gfs2_quota_hold()
647 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
652 !gid_eq(gid, ip->i_inode.i_gid)) { in gfs2_quota_hold()
656 ip->i_qadata->qa_qd_num++; in gfs2_quota_hold()
662 gfs2_quota_unhold(ip); in gfs2_quota_hold()
667 void gfs2_quota_unhold(struct gfs2_inode *ip) in gfs2_quota_unhold() argument
669 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unhold()
672 if (ip->i_qadata == NULL) in gfs2_quota_unhold()
675 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); in gfs2_quota_unhold()
677 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unhold()
678 qdsb_put(ip->i_qadata->qa_qd[x]); in gfs2_quota_unhold()
679 ip->i_qadata->qa_qd[x] = NULL; in gfs2_quota_unhold()
681 ip->i_qadata->qa_qd_num = 0; in gfs2_quota_unhold()
682 gfs2_qa_put(ip); in gfs2_quota_unhold()
700 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in do_qc() local
705 gfs2_trans_add_meta(ip->i_gl, qd->qd_bh); in do_qc()
742 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_write_buf_to_page() local
743 struct inode *inode = &ip->i_inode; in gfs2_write_buf_to_page()
781 gfs2_trans_add_data(ip->i_gl, bh); in gfs2_write_buf_to_page()
853 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_adjust_quota() local
854 struct inode *inode = &ip->i_inode; in gfs2_adjust_quota()
859 if (gfs2_is_stuffed(ip)) { in gfs2_adjust_quota()
860 err = gfs2_unstuff_dinode(ip); in gfs2_adjust_quota()
866 err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); in gfs2_adjust_quota()
906 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_sync() local
917 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in do_sync()
925 inode_lock(&ip->i_inode); in do_sync()
933 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in do_sync()
939 if (gfs2_write_alloc_required(ip, offset, in do_sync()
957 error = gfs2_inplace_reserve(ip, &ap); in do_sync()
962 blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; in do_sync()
983 gfs2_inplace_release(ip); in do_sync()
989 inode_unlock(&ip->i_inode); in do_sync()
991 gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, in do_sync()
1002 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in update_qd() local
1010 error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); in update_qd()
1029 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in do_glock() local
1051 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh); in do_glock()
1074 int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) in gfs2_quota_lock() argument
1076 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_lock()
1085 error = gfs2_quota_hold(ip, uid, gid); in gfs2_quota_lock()
1089 sort(ip->i_qadata->qa_qd, ip->i_qadata->qa_qd_num, in gfs2_quota_lock()
1092 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_lock()
1093 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_lock()
1094 error = do_glock(qd, NO_FORCE, &ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1100 set_bit(GIF_QD_LOCKED, &ip->i_flags); in gfs2_quota_lock()
1103 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_lock()
1104 gfs2_quota_unhold(ip); in gfs2_quota_lock()
1145 void gfs2_quota_unlock(struct gfs2_inode *ip) in gfs2_quota_unlock() argument
1147 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unlock()
1153 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_unlock()
1156 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_unlock()
1160 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_unlock()
1163 gfs2_glock_dq_uninit(&ip->i_qadata->qa_qd_ghs[x]); in gfs2_quota_unlock()
1184 gfs2_quota_unhold(ip); in gfs2_quota_unlock()
1219 int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid, in gfs2_quota_check() argument
1222 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_check()
1229 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags)) in gfs2_quota_check()
1232 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_check()
1233 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_check()
1276 void gfs2_quota_change(struct gfs2_inode *ip, s64 change, in gfs2_quota_change() argument
1281 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_change()
1287 if (ip->i_diskflags & GFS2_DIF_SYSTEM) in gfs2_quota_change()
1290 if (gfs2_assert_withdraw(sdp, ip->i_qadata && in gfs2_quota_change()
1291 ip->i_qadata->qa_ref > 0)) in gfs2_quota_change()
1293 for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { in gfs2_quota_change()
1294 qd = ip->i_qadata->qa_qd[x]; in gfs2_quota_change()
1386 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); in gfs2_quota_init() local
1418 error = gfs2_get_extent(&ip->i_inode, x, &dblock, &extlen); in gfs2_quota_init()
1423 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); in gfs2_quota_init()
1681 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); in gfs2_set_dqblk() local
1704 error = gfs2_qa_get(ip); in gfs2_set_dqblk()
1708 inode_lock(&ip->i_inode); in gfs2_set_dqblk()
1712 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); in gfs2_set_dqblk()
1738 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); in gfs2_set_dqblk()
1739 if (gfs2_is_stuffed(ip)) in gfs2_set_dqblk()
1743 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), in gfs2_set_dqblk()
1747 error = gfs2_inplace_reserve(ip, &ap); in gfs2_set_dqblk()
1750 blocks += gfs2_rg_blocks(ip, blocks); in gfs2_set_dqblk()
1767 gfs2_inplace_release(ip); in gfs2_set_dqblk()
1773 gfs2_qa_put(ip); in gfs2_set_dqblk()
1774 inode_unlock(&ip->i_inode); in gfs2_set_dqblk()