1d5c88131SDarrick J. Wong // SPDX-License-Identifier: GPL-2.0-or-later
2d5c88131SDarrick J. Wong /*
3d5c88131SDarrick J. Wong * Copyright (C) 2022-2023 Oracle. All Rights Reserved.
4d5c88131SDarrick J. Wong * Author: Darrick J. Wong <djwong@kernel.org>
5d5c88131SDarrick J. Wong */
6d5c88131SDarrick J. Wong #include "xfs.h"
7d5c88131SDarrick J. Wong #include "xfs_fs.h"
8d5c88131SDarrick J. Wong #include "xfs_shared.h"
9d5c88131SDarrick J. Wong #include "xfs_format.h"
10d5c88131SDarrick J. Wong #include "xfs_trans_resv.h"
11d5c88131SDarrick J. Wong #include "xfs_mount.h"
12d5c88131SDarrick J. Wong #include "xfs_ag.h"
13d5c88131SDarrick J. Wong #include "xfs_trace.h"
14d5c88131SDarrick J. Wong
15*466c525dSDarrick J. Wong /*
16*466c525dSDarrick J. Wong * Use a static key here to reduce the overhead of xfs_drain_rele. If the
17*466c525dSDarrick J. Wong * compiler supports jump labels, the static branch will be replaced by a nop
18*466c525dSDarrick J. Wong * sled when there are no xfs_drain_wait callers. Online fsck is currently
19*466c525dSDarrick J. Wong * the only caller, so this is a reasonable tradeoff.
20*466c525dSDarrick J. Wong *
21*466c525dSDarrick J. Wong * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
22*466c525dSDarrick J. Wong * parts of the kernel allocate memory with that lock held, which means that
23*466c525dSDarrick J. Wong * XFS callers cannot hold any locks that might be used by memory reclaim or
24*466c525dSDarrick J. Wong * writeback when calling the static_branch_{inc,dec} functions.
25*466c525dSDarrick J. Wong */
26*466c525dSDarrick J. Wong static DEFINE_STATIC_KEY_FALSE(xfs_drain_waiter_gate);
27*466c525dSDarrick J. Wong
28*466c525dSDarrick J. Wong void
xfs_drain_wait_disable(void)29*466c525dSDarrick J. Wong xfs_drain_wait_disable(void)
30*466c525dSDarrick J. Wong {
31*466c525dSDarrick J. Wong static_branch_dec(&xfs_drain_waiter_gate);
32*466c525dSDarrick J. Wong }
33*466c525dSDarrick J. Wong
34*466c525dSDarrick J. Wong void
xfs_drain_wait_enable(void)35*466c525dSDarrick J. Wong xfs_drain_wait_enable(void)
36*466c525dSDarrick J. Wong {
37*466c525dSDarrick J. Wong static_branch_inc(&xfs_drain_waiter_gate);
38*466c525dSDarrick J. Wong }
39*466c525dSDarrick J. Wong
40d5c88131SDarrick J. Wong void
xfs_defer_drain_init(struct xfs_defer_drain * dr)41d5c88131SDarrick J. Wong xfs_defer_drain_init(
42d5c88131SDarrick J. Wong struct xfs_defer_drain *dr)
43d5c88131SDarrick J. Wong {
44d5c88131SDarrick J. Wong atomic_set(&dr->dr_count, 0);
45d5c88131SDarrick J. Wong init_waitqueue_head(&dr->dr_waiters);
46d5c88131SDarrick J. Wong }
47d5c88131SDarrick J. Wong
48d5c88131SDarrick J. Wong void
xfs_defer_drain_free(struct xfs_defer_drain * dr)49d5c88131SDarrick J. Wong xfs_defer_drain_free(struct xfs_defer_drain *dr)
50d5c88131SDarrick J. Wong {
51d5c88131SDarrick J. Wong ASSERT(atomic_read(&dr->dr_count) == 0);
52d5c88131SDarrick J. Wong }
53d5c88131SDarrick J. Wong
54d5c88131SDarrick J. Wong /* Increase the pending intent count. */
xfs_defer_drain_grab(struct xfs_defer_drain * dr)55d5c88131SDarrick J. Wong static inline void xfs_defer_drain_grab(struct xfs_defer_drain *dr)
56d5c88131SDarrick J. Wong {
57d5c88131SDarrick J. Wong atomic_inc(&dr->dr_count);
58d5c88131SDarrick J. Wong }
59d5c88131SDarrick J. Wong
has_waiters(struct wait_queue_head * wq_head)60d5c88131SDarrick J. Wong static inline bool has_waiters(struct wait_queue_head *wq_head)
61d5c88131SDarrick J. Wong {
62d5c88131SDarrick J. Wong /*
63d5c88131SDarrick J. Wong * This memory barrier is paired with the one in set_current_state on
64d5c88131SDarrick J. Wong * the waiting side.
65d5c88131SDarrick J. Wong */
66d5c88131SDarrick J. Wong smp_mb__after_atomic();
67d5c88131SDarrick J. Wong return waitqueue_active(wq_head);
68d5c88131SDarrick J. Wong }
69d5c88131SDarrick J. Wong
70d5c88131SDarrick J. Wong /* Decrease the pending intent count, and wake any waiters, if appropriate. */
xfs_defer_drain_rele(struct xfs_defer_drain * dr)71d5c88131SDarrick J. Wong static inline void xfs_defer_drain_rele(struct xfs_defer_drain *dr)
72d5c88131SDarrick J. Wong {
73d5c88131SDarrick J. Wong if (atomic_dec_and_test(&dr->dr_count) &&
74*466c525dSDarrick J. Wong static_branch_unlikely(&xfs_drain_waiter_gate) &&
75d5c88131SDarrick J. Wong has_waiters(&dr->dr_waiters))
76d5c88131SDarrick J. Wong wake_up(&dr->dr_waiters);
77d5c88131SDarrick J. Wong }
78d5c88131SDarrick J. Wong
79d5c88131SDarrick J. Wong /* Are there intents pending? */
xfs_defer_drain_busy(struct xfs_defer_drain * dr)80d5c88131SDarrick J. Wong static inline bool xfs_defer_drain_busy(struct xfs_defer_drain *dr)
81d5c88131SDarrick J. Wong {
82d5c88131SDarrick J. Wong return atomic_read(&dr->dr_count) > 0;
83d5c88131SDarrick J. Wong }
84d5c88131SDarrick J. Wong
85d5c88131SDarrick J. Wong /*
86d5c88131SDarrick J. Wong * Wait for the pending intent count for a drain to hit zero.
87d5c88131SDarrick J. Wong *
88d5c88131SDarrick J. Wong * Callers must not hold any locks that would prevent intents from being
89d5c88131SDarrick J. Wong * finished.
90d5c88131SDarrick J. Wong */
xfs_defer_drain_wait(struct xfs_defer_drain * dr)91d5c88131SDarrick J. Wong static inline int xfs_defer_drain_wait(struct xfs_defer_drain *dr)
92d5c88131SDarrick J. Wong {
93d5c88131SDarrick J. Wong return wait_event_killable(dr->dr_waiters, !xfs_defer_drain_busy(dr));
94d5c88131SDarrick J. Wong }
95d5c88131SDarrick J. Wong
96d5c88131SDarrick J. Wong /*
97d5c88131SDarrick J. Wong * Get a passive reference to an AG and declare an intent to update its
98d5c88131SDarrick J. Wong * metadata.
99d5c88131SDarrick J. Wong */
100d5c88131SDarrick J. Wong struct xfs_perag *
xfs_perag_intent_get(struct xfs_mount * mp,xfs_agnumber_t agno)101d5c88131SDarrick J. Wong xfs_perag_intent_get(
102d5c88131SDarrick J. Wong struct xfs_mount *mp,
103d5c88131SDarrick J. Wong xfs_agnumber_t agno)
104d5c88131SDarrick J. Wong {
105d5c88131SDarrick J. Wong struct xfs_perag *pag;
106d5c88131SDarrick J. Wong
107d5c88131SDarrick J. Wong pag = xfs_perag_get(mp, agno);
108d5c88131SDarrick J. Wong if (!pag)
109d5c88131SDarrick J. Wong return NULL;
110d5c88131SDarrick J. Wong
111d5c88131SDarrick J. Wong xfs_perag_intent_hold(pag);
112d5c88131SDarrick J. Wong return pag;
113d5c88131SDarrick J. Wong }
114d5c88131SDarrick J. Wong
115d5c88131SDarrick J. Wong /*
116d5c88131SDarrick J. Wong * Release our intent to update this AG's metadata, and then release our
117d5c88131SDarrick J. Wong * passive ref to the AG.
118d5c88131SDarrick J. Wong */
119d5c88131SDarrick J. Wong void
xfs_perag_intent_put(struct xfs_perag * pag)120d5c88131SDarrick J. Wong xfs_perag_intent_put(
121d5c88131SDarrick J. Wong struct xfs_perag *pag)
122d5c88131SDarrick J. Wong {
123d5c88131SDarrick J. Wong xfs_perag_intent_rele(pag);
124d5c88131SDarrick J. Wong xfs_perag_put(pag);
125d5c88131SDarrick J. Wong }
126d5c88131SDarrick J. Wong
127d5c88131SDarrick J. Wong /*
128d5c88131SDarrick J. Wong * Declare an intent to update AG metadata. Other threads that need exclusive
129d5c88131SDarrick J. Wong * access can decide to back off if they see declared intentions.
130d5c88131SDarrick J. Wong */
131d5c88131SDarrick J. Wong void
xfs_perag_intent_hold(struct xfs_perag * pag)132d5c88131SDarrick J. Wong xfs_perag_intent_hold(
133d5c88131SDarrick J. Wong struct xfs_perag *pag)
134d5c88131SDarrick J. Wong {
135d5c88131SDarrick J. Wong trace_xfs_perag_intent_hold(pag, __return_address);
136d5c88131SDarrick J. Wong xfs_defer_drain_grab(&pag->pag_intents_drain);
137d5c88131SDarrick J. Wong }
138d5c88131SDarrick J. Wong
139d5c88131SDarrick J. Wong /* Release our intent to update this AG's metadata. */
140d5c88131SDarrick J. Wong void
xfs_perag_intent_rele(struct xfs_perag * pag)141d5c88131SDarrick J. Wong xfs_perag_intent_rele(
142d5c88131SDarrick J. Wong struct xfs_perag *pag)
143d5c88131SDarrick J. Wong {
144d5c88131SDarrick J. Wong trace_xfs_perag_intent_rele(pag, __return_address);
145d5c88131SDarrick J. Wong xfs_defer_drain_rele(&pag->pag_intents_drain);
146d5c88131SDarrick J. Wong }
147d5c88131SDarrick J. Wong
148d5c88131SDarrick J. Wong /*
149d5c88131SDarrick J. Wong * Wait for the intent update count for this AG to hit zero.
150d5c88131SDarrick J. Wong * Callers must not hold any AG header buffers.
151d5c88131SDarrick J. Wong */
152d5c88131SDarrick J. Wong int
xfs_perag_intent_drain(struct xfs_perag * pag)153d5c88131SDarrick J. Wong xfs_perag_intent_drain(
154d5c88131SDarrick J. Wong struct xfs_perag *pag)
155d5c88131SDarrick J. Wong {
156d5c88131SDarrick J. Wong trace_xfs_perag_wait_intents(pag, __return_address);
157d5c88131SDarrick J. Wong return xfs_defer_drain_wait(&pag->pag_intents_drain);
158d5c88131SDarrick J. Wong }
159d5c88131SDarrick J. Wong
160d5c88131SDarrick J. Wong /* Has anyone declared an intent to update this AG? */
161d5c88131SDarrick J. Wong bool
xfs_perag_intent_busy(struct xfs_perag * pag)162d5c88131SDarrick J. Wong xfs_perag_intent_busy(
163d5c88131SDarrick J. Wong struct xfs_perag *pag)
164d5c88131SDarrick J. Wong {
165d5c88131SDarrick J. Wong return xfs_defer_drain_busy(&pag->pag_intents_drain);
166d5c88131SDarrick J. Wong }
167