xref: /openbmc/linux/fs/btrfs/misc.h (revision b5345d6c)
1602cbe91SDavid Sterba /* SPDX-License-Identifier: GPL-2.0 */
2602cbe91SDavid Sterba 
3602cbe91SDavid Sterba #ifndef BTRFS_MISC_H
4602cbe91SDavid Sterba #define BTRFS_MISC_H
5602cbe91SDavid Sterba 
6602cbe91SDavid Sterba #include <linux/sched.h>
7602cbe91SDavid Sterba #include <linux/wait.h>
8cde7417cSKari Argillander #include <linux/math64.h>
9e9a28dc5SQu Wenruo #include <linux/rbtree.h>
10602cbe91SDavid Sterba 
11602cbe91SDavid Sterba #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
12602cbe91SDavid Sterba 
13d549ff7bSDavid Sterba /*
14d549ff7bSDavid Sterba  * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
15d549ff7bSDavid Sterba  */
16d549ff7bSDavid Sterba #define ENUM_BIT(name)                                  \
17d549ff7bSDavid Sterba 	__ ## name ## _BIT,                             \
18d549ff7bSDavid Sterba 	name = (1U << __ ## name ## _BIT),              \
19d549ff7bSDavid Sterba 	__ ## name ## _SEQ = __ ## name ## _BIT
20d549ff7bSDavid Sterba 
21602cbe91SDavid Sterba static inline void cond_wake_up(struct wait_queue_head *wq)
22602cbe91SDavid Sterba {
23602cbe91SDavid Sterba 	/*
24602cbe91SDavid Sterba 	 * This implies a full smp_mb barrier, see comments for
25602cbe91SDavid Sterba 	 * waitqueue_active why.
26602cbe91SDavid Sterba 	 */
27602cbe91SDavid Sterba 	if (wq_has_sleeper(wq))
28602cbe91SDavid Sterba 		wake_up(wq);
29602cbe91SDavid Sterba }
30602cbe91SDavid Sterba 
31602cbe91SDavid Sterba static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
32602cbe91SDavid Sterba {
33602cbe91SDavid Sterba 	/*
34602cbe91SDavid Sterba 	 * Special case for conditional wakeup where the barrier required for
35602cbe91SDavid Sterba 	 * waitqueue_active is implied by some of the preceding code. Eg. one
36602cbe91SDavid Sterba 	 * of such atomic operations (atomic_dec_and_return, ...), or a
37602cbe91SDavid Sterba 	 * unlock/lock sequence, etc.
38602cbe91SDavid Sterba 	 */
39602cbe91SDavid Sterba 	if (waitqueue_active(wq))
40602cbe91SDavid Sterba 		wake_up(wq);
41602cbe91SDavid Sterba }
42602cbe91SDavid Sterba 
43428c8e03SDavid Sterba static inline u64 mult_perc(u64 num, u32 percent)
44784352feSDavid Sterba {
45428c8e03SDavid Sterba 	return div_u64(num * percent, 100);
46784352feSDavid Sterba }
4779c8264eSDavid Sterba /* Copy of is_power_of_two that is 64bit safe */
4879c8264eSDavid Sterba static inline bool is_power_of_two_u64(u64 n)
4979c8264eSDavid Sterba {
5079c8264eSDavid Sterba 	return n != 0 && (n & (n - 1)) == 0;
5179c8264eSDavid Sterba }
5279c8264eSDavid Sterba 
5379c8264eSDavid Sterba static inline bool has_single_bit_set(u64 n)
5479c8264eSDavid Sterba {
5579c8264eSDavid Sterba 	return is_power_of_two_u64(n);
5679c8264eSDavid Sterba }
5779c8264eSDavid Sterba 
58e9a28dc5SQu Wenruo /*
59e9a28dc5SQu Wenruo  * Simple bytenr based rb_tree relate structures
60e9a28dc5SQu Wenruo  *
61e9a28dc5SQu Wenruo  * Any structure wants to use bytenr as single search index should have their
62e9a28dc5SQu Wenruo  * structure start with these members.
63e9a28dc5SQu Wenruo  */
64e9a28dc5SQu Wenruo struct rb_simple_node {
65e9a28dc5SQu Wenruo 	struct rb_node rb_node;
66e9a28dc5SQu Wenruo 	u64 bytenr;
67e9a28dc5SQu Wenruo };
68e9a28dc5SQu Wenruo 
69e9a28dc5SQu Wenruo static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
70e9a28dc5SQu Wenruo {
71e9a28dc5SQu Wenruo 	struct rb_node *node = root->rb_node;
72e9a28dc5SQu Wenruo 	struct rb_simple_node *entry;
73e9a28dc5SQu Wenruo 
74e9a28dc5SQu Wenruo 	while (node) {
75e9a28dc5SQu Wenruo 		entry = rb_entry(node, struct rb_simple_node, rb_node);
76e9a28dc5SQu Wenruo 
77e9a28dc5SQu Wenruo 		if (bytenr < entry->bytenr)
78e9a28dc5SQu Wenruo 			node = node->rb_left;
79e9a28dc5SQu Wenruo 		else if (bytenr > entry->bytenr)
80e9a28dc5SQu Wenruo 			node = node->rb_right;
81e9a28dc5SQu Wenruo 		else
82e9a28dc5SQu Wenruo 			return node;
83e9a28dc5SQu Wenruo 	}
84e9a28dc5SQu Wenruo 	return NULL;
85e9a28dc5SQu Wenruo }
86e9a28dc5SQu Wenruo 
8787c11705SJosef Bacik /*
8887c11705SJosef Bacik  * Search @root from an entry that starts or comes after @bytenr.
8987c11705SJosef Bacik  *
9087c11705SJosef Bacik  * @root:	the root to search.
9187c11705SJosef Bacik  * @bytenr:	bytenr to search from.
9287c11705SJosef Bacik  *
9387c11705SJosef Bacik  * Return the rb_node that start at or after @bytenr.  If there is no entry at
9487c11705SJosef Bacik  * or after @bytner return NULL.
9587c11705SJosef Bacik  */
9687c11705SJosef Bacik static inline struct rb_node *rb_simple_search_first(struct rb_root *root,
9787c11705SJosef Bacik 						     u64 bytenr)
9887c11705SJosef Bacik {
9987c11705SJosef Bacik 	struct rb_node *node = root->rb_node, *ret = NULL;
10087c11705SJosef Bacik 	struct rb_simple_node *entry, *ret_entry = NULL;
10187c11705SJosef Bacik 
10287c11705SJosef Bacik 	while (node) {
10387c11705SJosef Bacik 		entry = rb_entry(node, struct rb_simple_node, rb_node);
10487c11705SJosef Bacik 
10587c11705SJosef Bacik 		if (bytenr < entry->bytenr) {
10687c11705SJosef Bacik 			if (!ret || entry->bytenr < ret_entry->bytenr) {
10787c11705SJosef Bacik 				ret = node;
10887c11705SJosef Bacik 				ret_entry = entry;
10987c11705SJosef Bacik 			}
11087c11705SJosef Bacik 
11187c11705SJosef Bacik 			node = node->rb_left;
11287c11705SJosef Bacik 		} else if (bytenr > entry->bytenr) {
11387c11705SJosef Bacik 			node = node->rb_right;
11487c11705SJosef Bacik 		} else {
11587c11705SJosef Bacik 			return node;
11687c11705SJosef Bacik 		}
11787c11705SJosef Bacik 	}
11887c11705SJosef Bacik 
11987c11705SJosef Bacik 	return ret;
12087c11705SJosef Bacik }
12187c11705SJosef Bacik 
122e9a28dc5SQu Wenruo static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
123e9a28dc5SQu Wenruo 					       struct rb_node *node)
124e9a28dc5SQu Wenruo {
125e9a28dc5SQu Wenruo 	struct rb_node **p = &root->rb_node;
126e9a28dc5SQu Wenruo 	struct rb_node *parent = NULL;
127e9a28dc5SQu Wenruo 	struct rb_simple_node *entry;
128e9a28dc5SQu Wenruo 
129e9a28dc5SQu Wenruo 	while (*p) {
130e9a28dc5SQu Wenruo 		parent = *p;
131e9a28dc5SQu Wenruo 		entry = rb_entry(parent, struct rb_simple_node, rb_node);
132e9a28dc5SQu Wenruo 
133e9a28dc5SQu Wenruo 		if (bytenr < entry->bytenr)
134e9a28dc5SQu Wenruo 			p = &(*p)->rb_left;
135e9a28dc5SQu Wenruo 		else if (bytenr > entry->bytenr)
136e9a28dc5SQu Wenruo 			p = &(*p)->rb_right;
137e9a28dc5SQu Wenruo 		else
138e9a28dc5SQu Wenruo 			return parent;
139e9a28dc5SQu Wenruo 	}
140e9a28dc5SQu Wenruo 
141e9a28dc5SQu Wenruo 	rb_link_node(node, parent, p);
142e9a28dc5SQu Wenruo 	rb_insert_color(node, root);
143e9a28dc5SQu Wenruo 	return NULL;
144e9a28dc5SQu Wenruo }
145e9a28dc5SQu Wenruo 
146*b5345d6cSNaohiro Aota static inline bool bitmap_test_range_all_set(const unsigned long *addr,
147*b5345d6cSNaohiro Aota 					     unsigned long start,
148*b5345d6cSNaohiro Aota 					     unsigned long nbits)
149*b5345d6cSNaohiro Aota {
150*b5345d6cSNaohiro Aota 	unsigned long found_zero;
151*b5345d6cSNaohiro Aota 
152*b5345d6cSNaohiro Aota 	found_zero = find_next_zero_bit(addr, start + nbits, start);
153*b5345d6cSNaohiro Aota 	return (found_zero == start + nbits);
154*b5345d6cSNaohiro Aota }
155*b5345d6cSNaohiro Aota 
156*b5345d6cSNaohiro Aota static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
157*b5345d6cSNaohiro Aota 					      unsigned long start,
158*b5345d6cSNaohiro Aota 					      unsigned long nbits)
159*b5345d6cSNaohiro Aota {
160*b5345d6cSNaohiro Aota 	unsigned long found_set;
161*b5345d6cSNaohiro Aota 
162*b5345d6cSNaohiro Aota 	found_set = find_next_bit(addr, start + nbits, start);
163*b5345d6cSNaohiro Aota 	return (found_set == start + nbits);
164*b5345d6cSNaohiro Aota }
165*b5345d6cSNaohiro Aota 
166602cbe91SDavid Sterba #endif
167