xref: /openbmc/linux/fs/btrfs/transaction.h (revision ae213c44)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #ifndef BTRFS_TRANSACTION_H
7 #define BTRFS_TRANSACTION_H
8 
9 #include <linux/refcount.h>
10 #include "btrfs_inode.h"
11 #include "delayed-ref.h"
12 #include "ctree.h"
13 
14 enum btrfs_trans_state {
15 	TRANS_STATE_RUNNING,
16 	TRANS_STATE_BLOCKED,
17 	TRANS_STATE_COMMIT_START,
18 	TRANS_STATE_COMMIT_DOING,
19 	TRANS_STATE_UNBLOCKED,
20 	TRANS_STATE_COMPLETED,
21 	TRANS_STATE_MAX,
22 };
23 
24 #define BTRFS_TRANS_HAVE_FREE_BGS	0
25 #define BTRFS_TRANS_DIRTY_BG_RUN	1
26 #define BTRFS_TRANS_CACHE_ENOSPC	2
27 
28 struct btrfs_transaction {
29 	u64 transid;
30 	/*
31 	 * total external writers(USERSPACE/START/ATTACH) in this
32 	 * transaction, it must be zero before the transaction is
33 	 * being committed
34 	 */
35 	atomic_t num_extwriters;
36 	/*
37 	 * total writers in this transaction, it must be zero before the
38 	 * transaction can end
39 	 */
40 	atomic_t num_writers;
41 	refcount_t use_count;
42 
43 	unsigned long flags;
44 
45 	/* Be protected by fs_info->trans_lock when we want to change it. */
46 	enum btrfs_trans_state state;
47 	int aborted;
48 	struct list_head list;
49 	struct extent_io_tree dirty_pages;
50 	time64_t start_time;
51 	wait_queue_head_t writer_wait;
52 	wait_queue_head_t commit_wait;
53 	struct list_head pending_snapshots;
54 	struct list_head dev_update_list;
55 	struct list_head switch_commits;
56 	struct list_head dirty_bgs;
57 
58 	/*
59 	 * There is no explicit lock which protects io_bgs, rather its
60 	 * consistency is implied by the fact that all the sites which modify
61 	 * it do so under some form of transaction critical section, namely:
62 	 *
63 	 * - btrfs_start_dirty_block_groups - This function can only ever be
64 	 *   run by one of the transaction committers. Refer to
65 	 *   BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction
66 	 *
67 	 * - btrfs_write_dirty_blockgroups - this is called by
68 	 *   commit_cowonly_roots from transaction critical section
69 	 *   (TRANS_STATE_COMMIT_DOING)
70 	 *
71 	 * - btrfs_cleanup_dirty_bgs - called on transaction abort
72 	 */
73 	struct list_head io_bgs;
74 	struct list_head dropped_roots;
75 
76 	/*
77 	 * we need to make sure block group deletion doesn't race with
78 	 * free space cache writeout.  This mutex keeps them from stomping
79 	 * on each other
80 	 */
81 	struct mutex cache_write_mutex;
82 	spinlock_t dirty_bgs_lock;
83 	/* Protected by spin lock fs_info->unused_bgs_lock. */
84 	struct list_head deleted_bgs;
85 	spinlock_t dropped_roots_lock;
86 	struct btrfs_delayed_ref_root delayed_refs;
87 	struct btrfs_fs_info *fs_info;
88 };
89 
90 #define __TRANS_FREEZABLE	(1U << 0)
91 
92 #define __TRANS_START		(1U << 9)
93 #define __TRANS_ATTACH		(1U << 10)
94 #define __TRANS_JOIN		(1U << 11)
95 #define __TRANS_JOIN_NOLOCK	(1U << 12)
96 #define __TRANS_DUMMY		(1U << 13)
97 
98 #define TRANS_START		(__TRANS_START | __TRANS_FREEZABLE)
99 #define TRANS_ATTACH		(__TRANS_ATTACH)
100 #define TRANS_JOIN		(__TRANS_JOIN | __TRANS_FREEZABLE)
101 #define TRANS_JOIN_NOLOCK	(__TRANS_JOIN_NOLOCK)
102 
103 #define TRANS_EXTWRITERS	(__TRANS_START | __TRANS_ATTACH)
104 
105 #define BTRFS_SEND_TRANS_STUB	((void *)1)
106 
107 struct btrfs_trans_handle {
108 	u64 transid;
109 	u64 bytes_reserved;
110 	u64 chunk_bytes_reserved;
111 	unsigned long delayed_ref_updates;
112 	struct btrfs_transaction *transaction;
113 	struct btrfs_block_rsv *block_rsv;
114 	struct btrfs_block_rsv *orig_rsv;
115 	refcount_t use_count;
116 	unsigned int type;
117 	short aborted;
118 	bool adding_csums;
119 	bool allocating_chunk;
120 	bool can_flush_pending_bgs;
121 	bool reloc_reserved;
122 	bool dirty;
123 	struct btrfs_root *root;
124 	struct btrfs_fs_info *fs_info;
125 	struct list_head new_bgs;
126 };
127 
128 struct btrfs_pending_snapshot {
129 	struct dentry *dentry;
130 	struct inode *dir;
131 	struct btrfs_root *root;
132 	struct btrfs_root_item *root_item;
133 	struct btrfs_root *snap;
134 	struct btrfs_qgroup_inherit *inherit;
135 	struct btrfs_path *path;
136 	/* block reservation for the operation */
137 	struct btrfs_block_rsv block_rsv;
138 	/* extra metadata reservation for relocation */
139 	int error;
140 	bool readonly;
141 	struct list_head list;
142 };
143 
144 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
145 					      struct inode *inode)
146 {
147 	spin_lock(&BTRFS_I(inode)->lock);
148 	BTRFS_I(inode)->last_trans = trans->transaction->transid;
149 	BTRFS_I(inode)->last_sub_trans = BTRFS_I(inode)->root->log_transid;
150 	BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->root->last_log_commit;
151 	spin_unlock(&BTRFS_I(inode)->lock);
152 }
153 
154 /*
155  * Make qgroup codes to skip given qgroupid, means the old/new_roots for
156  * qgroup won't contain the qgroupid in it.
157  */
158 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
159 					 u64 qgroupid)
160 {
161 	struct btrfs_delayed_ref_root *delayed_refs;
162 
163 	delayed_refs = &trans->transaction->delayed_refs;
164 	WARN_ON(delayed_refs->qgroup_to_skip);
165 	delayed_refs->qgroup_to_skip = qgroupid;
166 }
167 
168 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
169 {
170 	struct btrfs_delayed_ref_root *delayed_refs;
171 
172 	delayed_refs = &trans->transaction->delayed_refs;
173 	WARN_ON(!delayed_refs->qgroup_to_skip);
174 	delayed_refs->qgroup_to_skip = 0;
175 }
176 
177 int btrfs_end_transaction(struct btrfs_trans_handle *trans);
178 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
179 						   unsigned int num_items);
180 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
181 					struct btrfs_root *root,
182 					unsigned int num_items,
183 					int min_factor);
184 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
185 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
186 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
187 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
188 					struct btrfs_root *root);
189 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
190 
191 void btrfs_add_dead_root(struct btrfs_root *root);
192 int btrfs_defrag_root(struct btrfs_root *root);
193 int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
194 int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
195 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
196 				   int wait_for_unblock);
197 
198 /*
199  * Try to commit transaction asynchronously, so this is safe to call
200  * even holding a spinlock.
201  *
202  * It's done by informing transaction_kthread to commit transaction without
203  * waiting for commit interval.
204  */
205 static inline void btrfs_commit_transaction_locksafe(
206 		struct btrfs_fs_info *fs_info)
207 {
208 	set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags);
209 	wake_up_process(fs_info->transaction_kthread);
210 }
211 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
212 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
213 void btrfs_throttle(struct btrfs_fs_info *fs_info);
214 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
215 				struct btrfs_root *root);
216 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
217 				struct extent_io_tree *dirty_pages, int mark);
218 int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
219 		       struct extent_io_tree *dirty_pages);
220 int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
221 int btrfs_transaction_blocked(struct btrfs_fs_info *info);
222 int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
223 void btrfs_put_transaction(struct btrfs_transaction *transaction);
224 void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
225 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans,
226 			    struct btrfs_root *root);
227 
228 #endif
229