Lines Matching refs:mp

47 static int xfs_icwalk(struct xfs_mount *mp,
72 struct xfs_mount *mp,
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
93 XFS_STATS_INC(mp, vn_active);
99 ip->i_mount = mp;
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
190 struct xfs_mount *mp)
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
209 struct xfs_mount *mp = pag->pag_mount;
211 if (!xfs_is_blockgc_enabled(mp))
229 struct xfs_mount *mp = pag->pag_mount;
244 spin_lock(&mp->m_perag_lock);
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
246 spin_unlock(&mp->m_perag_lock);
251 xfs_reclaim_work_queue(mp);
268 struct xfs_mount *mp = pag->pag_mount;
288 spin_lock(&mp->m_perag_lock);
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
290 spin_unlock(&mp->m_perag_lock);
305 struct xfs_mount *mp,
317 error = inode_init_always(mp->m_super, inode);
339 struct xfs_mount *mp = ip->i_mount;
360 error = xfs_reinit_inode(mp, inode);
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
440 struct xfs_mount *mp)
446 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
447 gc = per_cpu_ptr(mp->m_inodegc, cpu);
449 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
460 struct xfs_mount *mp)
465 flush_workqueue(mp->m_inodegc_wq);
466 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
469 gc = per_cpu_ptr(mp->m_inodegc, cpu);
490 struct xfs_mount *mp = ip->i_mount;
571 XFS_STATS_INC(mp, xs_ig_found);
577 XFS_STATS_INC(mp, xs_ig_frecycle);
591 if (xfs_is_inodegc_enabled(mp))
592 xfs_inodegc_queue_all(mp);
598 struct xfs_mount *mp,
608 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
611 ip = xfs_inode_alloc(mp, ino);
629 if (xfs_has_v3inodes(mp) &&
630 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
635 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
701 XFS_STATS_INC(mp, xs_ig_dup);
736 struct xfs_mount *mp,
751 if (!xfs_verify_ino(mp, ino))
754 XFS_STATS_INC(mp, xs_ig_attempts);
757 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
758 agino = XFS_INO_TO_AGINO(mp, ino);
775 XFS_STATS_INC(mp, xs_ig_missed);
777 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
956 struct xfs_mount *mp)
958 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
959 xfs_is_shutdown(mp);
964 struct xfs_mount *mp)
970 if (xfs_want_reclaim_sick(mp))
973 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
974 xfs_ail_push_all_sync(mp->m_ail);
975 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
988 struct xfs_mount *mp,
996 if (xfs_want_reclaim_sick(mp))
1000 xfs_reclaim_work_queue(mp);
1001 xfs_ail_push_all(mp->m_ail);
1003 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1013 struct xfs_mount *mp)
1019 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1111 struct xfs_mount *mp = container_of(to_delayed_work(work),
1114 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1115 xfs_reclaim_work_queue(mp);
1166 struct xfs_mount *mp = ip->i_mount;
1181 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1184 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1204 struct xfs_mount *mp = ip->i_mount;
1218 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1221 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1356 struct xfs_mount *mp)
1361 if (!xfs_clear_blockgc_enabled(mp))
1364 for_each_perag(mp, agno, pag)
1366 trace_xfs_blockgc_stop(mp, __return_address);
1372 struct xfs_mount *mp)
1377 if (xfs_set_blockgc_enabled(mp))
1380 trace_xfs_blockgc_start(mp, __return_address);
1381 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1457 struct xfs_mount *mp = pag->pag_mount;
1460 trace_xfs_blockgc_worker(mp, __return_address);
1464 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1475 struct xfs_mount *mp,
1480 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1482 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1486 return xfs_inodegc_flush(mp);
1495 struct xfs_mount *mp)
1500 trace_xfs_blockgc_flush_all(mp, __return_address);
1507 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1511 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1514 return xfs_inodegc_flush(mp);
1529 struct xfs_mount *mp,
1547 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1548 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1553 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1554 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1559 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1568 return xfs_blockgc_free_space(mp, &icw);
1648 struct xfs_mount *mp = pag->pag_mount;
1701 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1703 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1704 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1753 struct xfs_mount *mp,
1762 for_each_perag_tag(mp, agno, pag, goal) {
1807 struct xfs_mount *mp = ip->i_mount;
1810 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1816 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1823 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1857 struct xfs_mount *mp = gc->mp;
1866 cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask);
1882 trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits));
1903 struct xfs_mount *mp)
1905 if (!xfs_is_inodegc_enabled(mp))
1907 trace_xfs_inodegc_push(mp, __return_address);
1908 xfs_inodegc_queue_all(mp);
1917 struct xfs_mount *mp)
1919 xfs_inodegc_push(mp);
1920 trace_xfs_inodegc_flush(mp, __return_address);
1921 return xfs_inodegc_wait_all(mp);
1931 struct xfs_mount *mp)
1935 if (!xfs_clear_inodegc_enabled(mp))
1948 xfs_inodegc_queue_all(mp);
1950 flush_workqueue(mp->m_inodegc_wq);
1951 rerun = xfs_inodegc_queue_all(mp);
1954 trace_xfs_inodegc_stop(mp, __return_address);
1964 struct xfs_mount *mp)
1966 if (xfs_set_inodegc_enabled(mp))
1969 trace_xfs_inodegc_start(mp, __return_address);
1970 xfs_inodegc_queue_all(mp);
1978 struct xfs_mount *mp = ip->i_mount;
1983 if (__percpu_counter_compare(&mp->m_frextents,
1984 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
2006 struct xfs_mount *mp = ip->i_mount;
2008 if (items > mp->m_ino_geo.inodes_per_cluster)
2011 if (__percpu_counter_compare(&mp->m_fdblocks,
2012 mp->m_low_space[XFS_LOWSP_5_PCNT],
2075 struct xfs_mount *mp = ip->i_mount;
2088 gc = this_cpu_ptr(mp->m_inodegc);
2100 if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask))
2101 cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask);
2107 if (!xfs_is_inodegc_enabled(mp)) {
2115 trace_xfs_inodegc_queue(mp, __return_address);
2116 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2121 trace_xfs_inodegc_throttle(mp, __return_address);
2140 struct xfs_mount *mp = ip->i_mount;
2143 XFS_STATS_INC(mp, vn_reclaim);
2179 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2184 if (!xfs_is_inodegc_enabled(mp))
2187 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2188 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2201 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2207 if (!xfs_is_inodegc_enabled(mp))
2210 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2212 for_each_cpu(cpu, &mp->m_inodegc_cpumask) {
2213 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2218 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2236 struct xfs_mount *mp)
2238 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2246 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);