1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_TRANSACTION_H 7 #define BTRFS_TRANSACTION_H 8 9 #include <linux/refcount.h> 10 #include "btrfs_inode.h" 11 #include "delayed-ref.h" 12 #include "ctree.h" 13 #include "misc.h" 14 15 enum btrfs_trans_state { 16 TRANS_STATE_RUNNING, 17 TRANS_STATE_COMMIT_START, 18 TRANS_STATE_COMMIT_DOING, 19 TRANS_STATE_UNBLOCKED, 20 TRANS_STATE_SUPER_COMMITTED, 21 TRANS_STATE_COMPLETED, 22 TRANS_STATE_MAX, 23 }; 24 25 #define BTRFS_TRANS_HAVE_FREE_BGS 0 26 #define BTRFS_TRANS_DIRTY_BG_RUN 1 27 #define BTRFS_TRANS_CACHE_ENOSPC 2 28 29 struct btrfs_transaction { 30 u64 transid; 31 /* 32 * total external writers(USERSPACE/START/ATTACH) in this 33 * transaction, it must be zero before the transaction is 34 * being committed 35 */ 36 atomic_t num_extwriters; 37 /* 38 * total writers in this transaction, it must be zero before the 39 * transaction can end 40 */ 41 atomic_t num_writers; 42 refcount_t use_count; 43 44 unsigned long flags; 45 46 /* Be protected by fs_info->trans_lock when we want to change it. */ 47 enum btrfs_trans_state state; 48 int aborted; 49 struct list_head list; 50 struct extent_io_tree dirty_pages; 51 time64_t start_time; 52 wait_queue_head_t writer_wait; 53 wait_queue_head_t commit_wait; 54 struct list_head pending_snapshots; 55 struct list_head dev_update_list; 56 struct list_head switch_commits; 57 struct list_head dirty_bgs; 58 59 /* 60 * There is no explicit lock which protects io_bgs, rather its 61 * consistency is implied by the fact that all the sites which modify 62 * it do so under some form of transaction critical section, namely: 63 * 64 * - btrfs_start_dirty_block_groups - This function can only ever be 65 * run by one of the transaction committers. Refer to 66 * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction 67 * 68 * - btrfs_write_dirty_blockgroups - this is called by 69 * commit_cowonly_roots from transaction critical section 70 * (TRANS_STATE_COMMIT_DOING) 71 * 72 * - btrfs_cleanup_dirty_bgs - called on transaction abort 73 */ 74 struct list_head io_bgs; 75 struct list_head dropped_roots; 76 struct extent_io_tree pinned_extents; 77 78 /* 79 * we need to make sure block group deletion doesn't race with 80 * free space cache writeout. This mutex keeps them from stomping 81 * on each other 82 */ 83 struct mutex cache_write_mutex; 84 spinlock_t dirty_bgs_lock; 85 /* Protected by spin lock fs_info->unused_bgs_lock. */ 86 struct list_head deleted_bgs; 87 spinlock_t dropped_roots_lock; 88 struct btrfs_delayed_ref_root delayed_refs; 89 struct btrfs_fs_info *fs_info; 90 91 /* 92 * Number of ordered extents the transaction must wait for before 93 * committing. These are ordered extents started by a fast fsync. 94 */ 95 atomic_t pending_ordered; 96 wait_queue_head_t pending_wait; 97 }; 98 99 enum { 100 ENUM_BIT(__TRANS_FREEZABLE), 101 ENUM_BIT(__TRANS_START), 102 ENUM_BIT(__TRANS_ATTACH), 103 ENUM_BIT(__TRANS_JOIN), 104 ENUM_BIT(__TRANS_JOIN_NOLOCK), 105 ENUM_BIT(__TRANS_DUMMY), 106 ENUM_BIT(__TRANS_JOIN_NOSTART), 107 }; 108 109 #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) 110 #define TRANS_ATTACH (__TRANS_ATTACH) 111 #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) 112 #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) 113 #define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART) 114 115 #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) 116 117 struct btrfs_trans_handle { 118 u64 transid; 119 u64 bytes_reserved; 120 u64 chunk_bytes_reserved; 121 unsigned long delayed_ref_updates; 122 struct btrfs_transaction *transaction; 123 struct btrfs_block_rsv *block_rsv; 124 struct btrfs_block_rsv *orig_rsv; 125 /* Set by a task that wants to create a snapshot. */ 126 struct btrfs_pending_snapshot *pending_snapshot; 127 refcount_t use_count; 128 unsigned int type; 129 /* 130 * Error code of transaction abort, set outside of locks and must use 131 * the READ_ONCE/WRITE_ONCE access 132 */ 133 short aborted; 134 bool adding_csums; 135 bool allocating_chunk; 136 bool removing_chunk; 137 bool reloc_reserved; 138 bool in_fsync; 139 struct btrfs_fs_info *fs_info; 140 struct list_head new_bgs; 141 }; 142 143 /* 144 * The abort status can be changed between calls and is not protected by locks. 145 * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's 146 * set to a non-zero value it does not change, so the macro should be in checks 147 * but is not necessary for further reads of the value. 148 */ 149 #define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted))) 150 151 struct btrfs_pending_snapshot { 152 struct dentry *dentry; 153 struct inode *dir; 154 struct btrfs_root *root; 155 struct btrfs_root_item *root_item; 156 struct btrfs_root *snap; 157 struct btrfs_qgroup_inherit *inherit; 158 struct btrfs_path *path; 159 /* block reservation for the operation */ 160 struct btrfs_block_rsv block_rsv; 161 /* extra metadata reservation for relocation */ 162 int error; 163 /* Preallocated anonymous block device number */ 164 dev_t anon_dev; 165 bool readonly; 166 struct list_head list; 167 }; 168 169 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, 170 struct btrfs_inode *inode) 171 { 172 spin_lock(&inode->lock); 173 inode->last_trans = trans->transaction->transid; 174 inode->last_sub_trans = inode->root->log_transid; 175 inode->last_log_commit = inode->last_sub_trans - 1; 176 spin_unlock(&inode->lock); 177 } 178 179 /* 180 * Make qgroup codes to skip given qgroupid, means the old/new_roots for 181 * qgroup won't contain the qgroupid in it. 182 */ 183 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans, 184 u64 qgroupid) 185 { 186 struct btrfs_delayed_ref_root *delayed_refs; 187 188 delayed_refs = &trans->transaction->delayed_refs; 189 WARN_ON(delayed_refs->qgroup_to_skip); 190 delayed_refs->qgroup_to_skip = qgroupid; 191 } 192 193 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans) 194 { 195 struct btrfs_delayed_ref_root *delayed_refs; 196 197 delayed_refs = &trans->transaction->delayed_refs; 198 WARN_ON(!delayed_refs->qgroup_to_skip); 199 delayed_refs->qgroup_to_skip = 0; 200 } 201 202 bool __cold abort_should_print_stack(int errno); 203 204 /* 205 * Call btrfs_abort_transaction as early as possible when an error condition is 206 * detected, that way the exact stack trace is reported for some errors. 207 */ 208 #define btrfs_abort_transaction(trans, errno) \ 209 do { \ 210 bool first = false; \ 211 /* Report first abort since mount */ \ 212 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ 213 &((trans)->fs_info->fs_state))) { \ 214 first = true; \ 215 if (WARN(abort_should_print_stack(errno), \ 216 KERN_ERR \ 217 "BTRFS: Transaction aborted (error %d)\n", \ 218 (errno))) { \ 219 /* Stack trace printed. */ \ 220 } else { \ 221 btrfs_debug((trans)->fs_info, \ 222 "Transaction aborted (error %d)", \ 223 (errno)); \ 224 } \ 225 } \ 226 __btrfs_abort_transaction((trans), __func__, \ 227 __LINE__, (errno), first); \ 228 } while (0) 229 230 int btrfs_end_transaction(struct btrfs_trans_handle *trans); 231 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 232 unsigned int num_items); 233 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 234 struct btrfs_root *root, 235 unsigned int num_items); 236 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); 237 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); 238 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); 239 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); 240 struct btrfs_trans_handle *btrfs_attach_transaction_barrier( 241 struct btrfs_root *root); 242 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid); 243 244 void btrfs_add_dead_root(struct btrfs_root *root); 245 int btrfs_defrag_root(struct btrfs_root *root); 246 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info); 247 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info); 248 int btrfs_commit_transaction(struct btrfs_trans_handle *trans); 249 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans); 250 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); 251 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans); 252 void btrfs_throttle(struct btrfs_fs_info *fs_info); 253 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 254 struct btrfs_root *root); 255 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 256 struct extent_io_tree *dirty_pages, int mark); 257 int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark); 258 int btrfs_transaction_blocked(struct btrfs_fs_info *info); 259 int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 260 void btrfs_put_transaction(struct btrfs_transaction *transaction); 261 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 262 struct btrfs_root *root); 263 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); 264 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 265 const char *function, 266 unsigned int line, int errno, bool first_hit); 267 268 int __init btrfs_transaction_init(void); 269 void __cold btrfs_transaction_exit(void); 270 271 #endif 272