1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2007 Oracle. All rights reserved. 4 */ 5 6 #ifndef BTRFS_TRANSACTION_H 7 #define BTRFS_TRANSACTION_H 8 9 #include <linux/refcount.h> 10 #include "btrfs_inode.h" 11 #include "delayed-ref.h" 12 #include "ctree.h" 13 #include "misc.h" 14 15 /* 16 * Signal that a direct IO write is in progress, to avoid deadlock for sync 17 * direct IO writes when fsync is called during the direct IO write path. 18 */ 19 #define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1) 20 21 /* Radix-tree tag for roots that are part of the trasaction. */ 22 #define BTRFS_ROOT_TRANS_TAG 0 23 24 enum btrfs_trans_state { 25 TRANS_STATE_RUNNING, 26 TRANS_STATE_COMMIT_PREP, 27 TRANS_STATE_COMMIT_START, 28 TRANS_STATE_COMMIT_DOING, 29 TRANS_STATE_UNBLOCKED, 30 TRANS_STATE_SUPER_COMMITTED, 31 TRANS_STATE_COMPLETED, 32 TRANS_STATE_MAX, 33 }; 34 35 #define BTRFS_TRANS_HAVE_FREE_BGS 0 36 #define BTRFS_TRANS_DIRTY_BG_RUN 1 37 #define BTRFS_TRANS_CACHE_ENOSPC 2 38 39 struct btrfs_transaction { 40 u64 transid; 41 /* 42 * total external writers(USERSPACE/START/ATTACH) in this 43 * transaction, it must be zero before the transaction is 44 * being committed 45 */ 46 atomic_t num_extwriters; 47 /* 48 * total writers in this transaction, it must be zero before the 49 * transaction can end 50 */ 51 atomic_t num_writers; 52 refcount_t use_count; 53 54 unsigned long flags; 55 56 /* Be protected by fs_info->trans_lock when we want to change it. */ 57 enum btrfs_trans_state state; 58 int aborted; 59 struct list_head list; 60 struct extent_io_tree dirty_pages; 61 time64_t start_time; 62 wait_queue_head_t writer_wait; 63 wait_queue_head_t commit_wait; 64 struct list_head pending_snapshots; 65 struct list_head dev_update_list; 66 struct list_head switch_commits; 67 struct list_head dirty_bgs; 68 69 /* 70 * There is no explicit lock which protects io_bgs, rather its 71 * consistency is implied by the fact that all the sites which modify 72 * it do so under some form of transaction critical section, namely: 73 * 74 * - btrfs_start_dirty_block_groups - This function can only ever be 75 * run by one of the transaction committers. Refer to 76 * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction 77 * 78 * - btrfs_write_dirty_blockgroups - this is called by 79 * commit_cowonly_roots from transaction critical section 80 * (TRANS_STATE_COMMIT_DOING) 81 * 82 * - btrfs_cleanup_dirty_bgs - called on transaction abort 83 */ 84 struct list_head io_bgs; 85 struct list_head dropped_roots; 86 struct extent_io_tree pinned_extents; 87 88 /* 89 * we need to make sure block group deletion doesn't race with 90 * free space cache writeout. This mutex keeps them from stomping 91 * on each other 92 */ 93 struct mutex cache_write_mutex; 94 spinlock_t dirty_bgs_lock; 95 /* Protected by spin lock fs_info->unused_bgs_lock. */ 96 struct list_head deleted_bgs; 97 spinlock_t dropped_roots_lock; 98 struct btrfs_delayed_ref_root delayed_refs; 99 struct btrfs_fs_info *fs_info; 100 101 /* 102 * Number of ordered extents the transaction must wait for before 103 * committing. These are ordered extents started by a fast fsync. 104 */ 105 atomic_t pending_ordered; 106 wait_queue_head_t pending_wait; 107 }; 108 109 enum { 110 ENUM_BIT(__TRANS_FREEZABLE), 111 ENUM_BIT(__TRANS_START), 112 ENUM_BIT(__TRANS_ATTACH), 113 ENUM_BIT(__TRANS_JOIN), 114 ENUM_BIT(__TRANS_JOIN_NOLOCK), 115 ENUM_BIT(__TRANS_DUMMY), 116 ENUM_BIT(__TRANS_JOIN_NOSTART), 117 }; 118 119 #define TRANS_START (__TRANS_START | __TRANS_FREEZABLE) 120 #define TRANS_ATTACH (__TRANS_ATTACH) 121 #define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE) 122 #define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK) 123 #define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART) 124 125 #define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH) 126 127 struct btrfs_trans_handle { 128 u64 transid; 129 u64 bytes_reserved; 130 u64 chunk_bytes_reserved; 131 unsigned long delayed_ref_updates; 132 struct btrfs_transaction *transaction; 133 struct btrfs_block_rsv *block_rsv; 134 struct btrfs_block_rsv *orig_rsv; 135 /* Set by a task that wants to create a snapshot. */ 136 struct btrfs_pending_snapshot *pending_snapshot; 137 refcount_t use_count; 138 unsigned int type; 139 /* 140 * Error code of transaction abort, set outside of locks and must use 141 * the READ_ONCE/WRITE_ONCE access 142 */ 143 short aborted; 144 bool adding_csums; 145 bool allocating_chunk; 146 bool removing_chunk; 147 bool reloc_reserved; 148 bool in_fsync; 149 struct btrfs_fs_info *fs_info; 150 struct list_head new_bgs; 151 }; 152 153 /* 154 * The abort status can be changed between calls and is not protected by locks. 155 * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's 156 * set to a non-zero value it does not change, so the macro should be in checks 157 * but is not necessary for further reads of the value. 158 */ 159 #define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted))) 160 161 struct btrfs_pending_snapshot { 162 struct dentry *dentry; 163 struct inode *dir; 164 struct btrfs_root *root; 165 struct btrfs_root_item *root_item; 166 struct btrfs_root *snap; 167 struct btrfs_qgroup_inherit *inherit; 168 struct btrfs_path *path; 169 /* block reservation for the operation */ 170 struct btrfs_block_rsv block_rsv; 171 /* extra metadata reservation for relocation */ 172 int error; 173 /* Preallocated anonymous block device number */ 174 dev_t anon_dev; 175 bool readonly; 176 struct list_head list; 177 }; 178 179 static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, 180 struct btrfs_inode *inode) 181 { 182 spin_lock(&inode->lock); 183 inode->last_trans = trans->transaction->transid; 184 inode->last_sub_trans = inode->root->log_transid; 185 inode->last_log_commit = inode->last_sub_trans - 1; 186 spin_unlock(&inode->lock); 187 } 188 189 /* 190 * Make qgroup codes to skip given qgroupid, means the old/new_roots for 191 * qgroup won't contain the qgroupid in it. 192 */ 193 static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans, 194 u64 qgroupid) 195 { 196 struct btrfs_delayed_ref_root *delayed_refs; 197 198 delayed_refs = &trans->transaction->delayed_refs; 199 WARN_ON(delayed_refs->qgroup_to_skip); 200 delayed_refs->qgroup_to_skip = qgroupid; 201 } 202 203 static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans) 204 { 205 struct btrfs_delayed_ref_root *delayed_refs; 206 207 delayed_refs = &trans->transaction->delayed_refs; 208 WARN_ON(!delayed_refs->qgroup_to_skip); 209 delayed_refs->qgroup_to_skip = 0; 210 } 211 212 bool __cold abort_should_print_stack(int errno); 213 214 /* 215 * Call btrfs_abort_transaction as early as possible when an error condition is 216 * detected, that way the exact stack trace is reported for some errors. 217 */ 218 #define btrfs_abort_transaction(trans, errno) \ 219 do { \ 220 bool first = false; \ 221 /* Report first abort since mount */ \ 222 if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ 223 &((trans)->fs_info->fs_state))) { \ 224 first = true; \ 225 if (WARN(abort_should_print_stack(errno), \ 226 KERN_ERR \ 227 "BTRFS: Transaction aborted (error %d)\n", \ 228 (errno))) { \ 229 /* Stack trace printed. */ \ 230 } else { \ 231 btrfs_err((trans)->fs_info, \ 232 "Transaction aborted (error %d)", \ 233 (errno)); \ 234 } \ 235 } \ 236 __btrfs_abort_transaction((trans), __func__, \ 237 __LINE__, (errno), first); \ 238 } while (0) 239 240 int btrfs_end_transaction(struct btrfs_trans_handle *trans); 241 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 242 unsigned int num_items); 243 struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv( 244 struct btrfs_root *root, 245 unsigned int num_items); 246 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root); 247 struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root); 248 struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root); 249 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root); 250 struct btrfs_trans_handle *btrfs_attach_transaction_barrier( 251 struct btrfs_root *root); 252 int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid); 253 254 void btrfs_add_dead_root(struct btrfs_root *root); 255 int btrfs_defrag_root(struct btrfs_root *root); 256 void btrfs_maybe_wake_unfinished_drop(struct btrfs_fs_info *fs_info); 257 int btrfs_clean_one_deleted_snapshot(struct btrfs_fs_info *fs_info); 258 int btrfs_commit_transaction(struct btrfs_trans_handle *trans); 259 void btrfs_commit_transaction_async(struct btrfs_trans_handle *trans); 260 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); 261 bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans); 262 void btrfs_throttle(struct btrfs_fs_info *fs_info); 263 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 264 struct btrfs_root *root); 265 int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info, 266 struct extent_io_tree *dirty_pages, int mark); 267 int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark); 268 int btrfs_transaction_blocked(struct btrfs_fs_info *info); 269 int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 270 void btrfs_put_transaction(struct btrfs_transaction *transaction); 271 void btrfs_add_dropped_root(struct btrfs_trans_handle *trans, 272 struct btrfs_root *root); 273 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); 274 void __cold __btrfs_abort_transaction(struct btrfs_trans_handle *trans, 275 const char *function, 276 unsigned int line, int errno, bool first_hit); 277 278 int __init btrfs_transaction_init(void); 279 void __cold btrfs_transaction_exit(void); 280 281 #endif 282