xref: /openbmc/linux/fs/btrfs/delayed-ref.h (revision 1887be66)
156bec294SChris Mason /*
256bec294SChris Mason  * Copyright (C) 2008 Oracle.  All rights reserved.
356bec294SChris Mason  *
456bec294SChris Mason  * This program is free software; you can redistribute it and/or
556bec294SChris Mason  * modify it under the terms of the GNU General Public
656bec294SChris Mason  * License v2 as published by the Free Software Foundation.
756bec294SChris Mason  *
856bec294SChris Mason  * This program is distributed in the hope that it will be useful,
956bec294SChris Mason  * but WITHOUT ANY WARRANTY; without even the implied warranty of
1056bec294SChris Mason  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
1156bec294SChris Mason  * General Public License for more details.
1256bec294SChris Mason  *
1356bec294SChris Mason  * You should have received a copy of the GNU General Public
1456bec294SChris Mason  * License along with this program; if not, write to the
1556bec294SChris Mason  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
1656bec294SChris Mason  * Boston, MA 021110-1307, USA.
1756bec294SChris Mason  */
1856bec294SChris Mason #ifndef __DELAYED_REF__
1956bec294SChris Mason #define __DELAYED_REF__
2056bec294SChris Mason 
2156bec294SChris Mason /* these are the possible values of struct btrfs_delayed_ref->action */
2256bec294SChris Mason #define BTRFS_ADD_DELAYED_REF    1 /* add one backref to the tree */
2356bec294SChris Mason #define BTRFS_DROP_DELAYED_REF   2 /* delete one backref from the tree */
2456bec294SChris Mason #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
2556bec294SChris Mason 
2656bec294SChris Mason struct btrfs_delayed_ref_node {
2756bec294SChris Mason 	struct rb_node rb_node;
2856bec294SChris Mason 
2956bec294SChris Mason 	/* the starting bytenr of the extent */
3056bec294SChris Mason 	u64 bytenr;
3156bec294SChris Mason 
3256bec294SChris Mason 	/* the parent our backref will point to */
3356bec294SChris Mason 	u64 parent;
3456bec294SChris Mason 
3556bec294SChris Mason 	/* the size of the extent */
3656bec294SChris Mason 	u64 num_bytes;
3756bec294SChris Mason 
3856bec294SChris Mason 	/* ref count on this data structure */
3956bec294SChris Mason 	atomic_t refs;
4056bec294SChris Mason 
4156bec294SChris Mason 	/*
4256bec294SChris Mason 	 * how many refs is this entry adding or deleting.  For
4356bec294SChris Mason 	 * head refs, this may be a negative number because it is keeping
4456bec294SChris Mason 	 * track of the total mods done to the reference count.
4556bec294SChris Mason 	 * For individual refs, this will always be a positive number
4656bec294SChris Mason 	 *
4756bec294SChris Mason 	 * It may be more than one, since it is possible for a single
4856bec294SChris Mason 	 * parent to have more than one ref on an extent
4956bec294SChris Mason 	 */
5056bec294SChris Mason 	int ref_mod;
5156bec294SChris Mason 
5256bec294SChris Mason 	/* is this node still in the rbtree? */
5356bec294SChris Mason 	unsigned int in_tree:1;
5456bec294SChris Mason };
5556bec294SChris Mason 
5656bec294SChris Mason /*
5756bec294SChris Mason  * the head refs are used to hold a lock on a given extent, which allows us
5856bec294SChris Mason  * to make sure that only one process is running the delayed refs
5956bec294SChris Mason  * at a time for a single extent.  They also store the sum of all the
6056bec294SChris Mason  * reference count modifications we've queued up.
6156bec294SChris Mason  */
6256bec294SChris Mason struct btrfs_delayed_ref_head {
6356bec294SChris Mason 	struct btrfs_delayed_ref_node node;
6456bec294SChris Mason 
6556bec294SChris Mason 	/*
6656bec294SChris Mason 	 * the mutex is held while running the refs, and it is also
6756bec294SChris Mason 	 * held when checking the sum of reference modifications.
6856bec294SChris Mason 	 */
6956bec294SChris Mason 	struct mutex mutex;
7056bec294SChris Mason 
7156bec294SChris Mason 	/*
7256bec294SChris Mason 	 * when a new extent is allocated, it is just reserved in memory
7356bec294SChris Mason 	 * The actual extent isn't inserted into the extent allocation tree
7456bec294SChris Mason 	 * until the delayed ref is processed.  must_insert_reserved is
7556bec294SChris Mason 	 * used to flag a delayed ref so the accounting can be updated
7656bec294SChris Mason 	 * when a full insert is done.
7756bec294SChris Mason 	 *
7856bec294SChris Mason 	 * It is possible the extent will be freed before it is ever
7956bec294SChris Mason 	 * inserted into the extent allocation tree.  In this case
8056bec294SChris Mason 	 * we need to update the in ram accounting to properly reflect
8156bec294SChris Mason 	 * the free has happened.
8256bec294SChris Mason 	 */
8356bec294SChris Mason 	unsigned int must_insert_reserved:1;
8456bec294SChris Mason };
8556bec294SChris Mason 
8656bec294SChris Mason struct btrfs_delayed_ref {
8756bec294SChris Mason 	struct btrfs_delayed_ref_node node;
8856bec294SChris Mason 
8956bec294SChris Mason 	/* the root objectid our ref will point to */
9056bec294SChris Mason 	u64 root;
9156bec294SChris Mason 
9256bec294SChris Mason 	/* the generation for the backref */
9356bec294SChris Mason 	u64 generation;
9456bec294SChris Mason 
9556bec294SChris Mason 	/* owner_objectid of the backref  */
9656bec294SChris Mason 	u64 owner_objectid;
9756bec294SChris Mason 
9856bec294SChris Mason 	/* operation done by this entry in the rbtree */
9956bec294SChris Mason 	u8 action;
10056bec294SChris Mason 
10156bec294SChris Mason 	/* if pin == 1, when the extent is freed it will be pinned until
10256bec294SChris Mason 	 * transaction commit
10356bec294SChris Mason 	 */
10456bec294SChris Mason 	unsigned int pin:1;
10556bec294SChris Mason };
10656bec294SChris Mason 
10756bec294SChris Mason struct btrfs_delayed_ref_root {
10856bec294SChris Mason 	struct rb_root root;
10956bec294SChris Mason 
11056bec294SChris Mason 	/* this spin lock protects the rbtree and the entries inside */
11156bec294SChris Mason 	spinlock_t lock;
11256bec294SChris Mason 
11356bec294SChris Mason 	/* how many delayed ref updates we've queued, used by the
11456bec294SChris Mason 	 * throttling code
11556bec294SChris Mason 	 */
11656bec294SChris Mason 	unsigned long num_entries;
11756bec294SChris Mason 
11856bec294SChris Mason 	/*
11956bec294SChris Mason 	 * set when the tree is flushing before a transaction commit,
12056bec294SChris Mason 	 * used by the throttling code to decide if new updates need
12156bec294SChris Mason 	 * to be run right away
12256bec294SChris Mason 	 */
12356bec294SChris Mason 	int flushing;
12456bec294SChris Mason };
12556bec294SChris Mason 
12656bec294SChris Mason static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
12756bec294SChris Mason {
12856bec294SChris Mason 	WARN_ON(atomic_read(&ref->refs) == 0);
12956bec294SChris Mason 	if (atomic_dec_and_test(&ref->refs)) {
13056bec294SChris Mason 		WARN_ON(ref->in_tree);
13156bec294SChris Mason 		kfree(ref);
13256bec294SChris Mason 	}
13356bec294SChris Mason }
13456bec294SChris Mason 
13556bec294SChris Mason int btrfs_add_delayed_ref(struct btrfs_trans_handle *trans,
13656bec294SChris Mason 			  u64 bytenr, u64 num_bytes, u64 parent, u64 ref_root,
13756bec294SChris Mason 			  u64 ref_generation, u64 owner_objectid, int action,
13856bec294SChris Mason 			  int pin);
13956bec294SChris Mason 
1401887be66SChris Mason struct btrfs_delayed_ref_head *
1411887be66SChris Mason btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
14256bec294SChris Mason int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
14356bec294SChris Mason int btrfs_lock_delayed_ref(struct btrfs_trans_handle *trans,
14456bec294SChris Mason 			   struct btrfs_delayed_ref_node *ref,
14556bec294SChris Mason 			   struct btrfs_delayed_ref_head **next_ret);
14656bec294SChris Mason int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
14756bec294SChris Mason 			    struct btrfs_root *root, u64 bytenr,
14856bec294SChris Mason 			    u64 num_bytes, u32 *refs);
14956bec294SChris Mason int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
15056bec294SChris Mason 			  u64 bytenr, u64 num_bytes, u64 orig_parent,
15156bec294SChris Mason 			  u64 parent, u64 orig_ref_root, u64 ref_root,
15256bec294SChris Mason 			  u64 orig_ref_generation, u64 ref_generation,
15356bec294SChris Mason 			  u64 owner_objectid, int pin);
15456bec294SChris Mason /*
15556bec294SChris Mason  * a node might live in a head or a regular ref, this lets you
15656bec294SChris Mason  * test for the proper type to use.
15756bec294SChris Mason  */
15856bec294SChris Mason static int btrfs_delayed_ref_is_head(struct btrfs_delayed_ref_node *node)
15956bec294SChris Mason {
16056bec294SChris Mason 	return node->parent == (u64)-1;
16156bec294SChris Mason }
16256bec294SChris Mason 
16356bec294SChris Mason /*
16456bec294SChris Mason  * helper functions to cast a node into its container
16556bec294SChris Mason  */
16656bec294SChris Mason static inline struct btrfs_delayed_ref *
16756bec294SChris Mason btrfs_delayed_node_to_ref(struct btrfs_delayed_ref_node *node)
16856bec294SChris Mason {
16956bec294SChris Mason 	WARN_ON(btrfs_delayed_ref_is_head(node));
17056bec294SChris Mason 	return container_of(node, struct btrfs_delayed_ref, node);
17156bec294SChris Mason 
17256bec294SChris Mason }
17356bec294SChris Mason 
17456bec294SChris Mason static inline struct btrfs_delayed_ref_head *
17556bec294SChris Mason btrfs_delayed_node_to_head(struct btrfs_delayed_ref_node *node)
17656bec294SChris Mason {
17756bec294SChris Mason 	WARN_ON(!btrfs_delayed_ref_is_head(node));
17856bec294SChris Mason 	return container_of(node, struct btrfs_delayed_ref_head, node);
17956bec294SChris Mason 
18056bec294SChris Mason }
18156bec294SChris Mason #endif
182