1602cbe91SDavid Sterba /* SPDX-License-Identifier: GPL-2.0 */
2602cbe91SDavid Sterba
3602cbe91SDavid Sterba #ifndef BTRFS_MISC_H
4602cbe91SDavid Sterba #define BTRFS_MISC_H
5602cbe91SDavid Sterba
6602cbe91SDavid Sterba #include <linux/sched.h>
7602cbe91SDavid Sterba #include <linux/wait.h>
8cde7417cSKari Argillander #include <linux/math64.h>
9e9a28dc5SQu Wenruo #include <linux/rbtree.h>
10602cbe91SDavid Sterba
11d549ff7bSDavid Sterba /*
12d549ff7bSDavid Sterba * Enumerate bits using enum autoincrement. Define the @name as the n-th bit.
13d549ff7bSDavid Sterba */
14d549ff7bSDavid Sterba #define ENUM_BIT(name) \
15d549ff7bSDavid Sterba __ ## name ## _BIT, \
16d549ff7bSDavid Sterba name = (1U << __ ## name ## _BIT), \
17d549ff7bSDavid Sterba __ ## name ## _SEQ = __ ## name ## _BIT
18d549ff7bSDavid Sterba
cond_wake_up(struct wait_queue_head * wq)19602cbe91SDavid Sterba static inline void cond_wake_up(struct wait_queue_head *wq)
20602cbe91SDavid Sterba {
21602cbe91SDavid Sterba /*
22602cbe91SDavid Sterba * This implies a full smp_mb barrier, see comments for
23602cbe91SDavid Sterba * waitqueue_active why.
24602cbe91SDavid Sterba */
25602cbe91SDavid Sterba if (wq_has_sleeper(wq))
26602cbe91SDavid Sterba wake_up(wq);
27602cbe91SDavid Sterba }
28602cbe91SDavid Sterba
cond_wake_up_nomb(struct wait_queue_head * wq)29602cbe91SDavid Sterba static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
30602cbe91SDavid Sterba {
31602cbe91SDavid Sterba /*
32602cbe91SDavid Sterba * Special case for conditional wakeup where the barrier required for
33602cbe91SDavid Sterba * waitqueue_active is implied by some of the preceding code. Eg. one
34602cbe91SDavid Sterba * of such atomic operations (atomic_dec_and_return, ...), or a
35602cbe91SDavid Sterba * unlock/lock sequence, etc.
36602cbe91SDavid Sterba */
37602cbe91SDavid Sterba if (waitqueue_active(wq))
38602cbe91SDavid Sterba wake_up(wq);
39602cbe91SDavid Sterba }
40602cbe91SDavid Sterba
mult_perc(u64 num,u32 percent)41428c8e03SDavid Sterba static inline u64 mult_perc(u64 num, u32 percent)
42784352feSDavid Sterba {
43428c8e03SDavid Sterba return div_u64(num * percent, 100);
44784352feSDavid Sterba }
4579c8264eSDavid Sterba /* Copy of is_power_of_two that is 64bit safe */
is_power_of_two_u64(u64 n)4679c8264eSDavid Sterba static inline bool is_power_of_two_u64(u64 n)
4779c8264eSDavid Sterba {
4879c8264eSDavid Sterba return n != 0 && (n & (n - 1)) == 0;
4979c8264eSDavid Sterba }
5079c8264eSDavid Sterba
has_single_bit_set(u64 n)5179c8264eSDavid Sterba static inline bool has_single_bit_set(u64 n)
5279c8264eSDavid Sterba {
5379c8264eSDavid Sterba return is_power_of_two_u64(n);
5479c8264eSDavid Sterba }
5579c8264eSDavid Sterba
56e9a28dc5SQu Wenruo /*
57e9a28dc5SQu Wenruo * Simple bytenr based rb_tree relate structures
58e9a28dc5SQu Wenruo *
59e9a28dc5SQu Wenruo * Any structure wants to use bytenr as single search index should have their
60e9a28dc5SQu Wenruo * structure start with these members.
61e9a28dc5SQu Wenruo */
62e9a28dc5SQu Wenruo struct rb_simple_node {
63e9a28dc5SQu Wenruo struct rb_node rb_node;
64e9a28dc5SQu Wenruo u64 bytenr;
65e9a28dc5SQu Wenruo };
66e9a28dc5SQu Wenruo
rb_simple_search(struct rb_root * root,u64 bytenr)67e9a28dc5SQu Wenruo static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
68e9a28dc5SQu Wenruo {
69e9a28dc5SQu Wenruo struct rb_node *node = root->rb_node;
70e9a28dc5SQu Wenruo struct rb_simple_node *entry;
71e9a28dc5SQu Wenruo
72e9a28dc5SQu Wenruo while (node) {
73e9a28dc5SQu Wenruo entry = rb_entry(node, struct rb_simple_node, rb_node);
74e9a28dc5SQu Wenruo
75e9a28dc5SQu Wenruo if (bytenr < entry->bytenr)
76e9a28dc5SQu Wenruo node = node->rb_left;
77e9a28dc5SQu Wenruo else if (bytenr > entry->bytenr)
78e9a28dc5SQu Wenruo node = node->rb_right;
79e9a28dc5SQu Wenruo else
80e9a28dc5SQu Wenruo return node;
81e9a28dc5SQu Wenruo }
82e9a28dc5SQu Wenruo return NULL;
83e9a28dc5SQu Wenruo }
84e9a28dc5SQu Wenruo
8587c11705SJosef Bacik /*
8687c11705SJosef Bacik * Search @root from an entry that starts or comes after @bytenr.
8787c11705SJosef Bacik *
8887c11705SJosef Bacik * @root: the root to search.
8987c11705SJosef Bacik * @bytenr: bytenr to search from.
9087c11705SJosef Bacik *
9187c11705SJosef Bacik * Return the rb_node that start at or after @bytenr. If there is no entry at
9287c11705SJosef Bacik * or after @bytner return NULL.
9387c11705SJosef Bacik */
rb_simple_search_first(struct rb_root * root,u64 bytenr)9487c11705SJosef Bacik static inline struct rb_node *rb_simple_search_first(struct rb_root *root,
9587c11705SJosef Bacik u64 bytenr)
9687c11705SJosef Bacik {
9787c11705SJosef Bacik struct rb_node *node = root->rb_node, *ret = NULL;
9887c11705SJosef Bacik struct rb_simple_node *entry, *ret_entry = NULL;
9987c11705SJosef Bacik
10087c11705SJosef Bacik while (node) {
10187c11705SJosef Bacik entry = rb_entry(node, struct rb_simple_node, rb_node);
10287c11705SJosef Bacik
10387c11705SJosef Bacik if (bytenr < entry->bytenr) {
10487c11705SJosef Bacik if (!ret || entry->bytenr < ret_entry->bytenr) {
10587c11705SJosef Bacik ret = node;
10687c11705SJosef Bacik ret_entry = entry;
10787c11705SJosef Bacik }
10887c11705SJosef Bacik
10987c11705SJosef Bacik node = node->rb_left;
11087c11705SJosef Bacik } else if (bytenr > entry->bytenr) {
11187c11705SJosef Bacik node = node->rb_right;
11287c11705SJosef Bacik } else {
11387c11705SJosef Bacik return node;
11487c11705SJosef Bacik }
11587c11705SJosef Bacik }
11687c11705SJosef Bacik
11787c11705SJosef Bacik return ret;
11887c11705SJosef Bacik }
11987c11705SJosef Bacik
rb_simple_insert(struct rb_root * root,u64 bytenr,struct rb_node * node)120e9a28dc5SQu Wenruo static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
121e9a28dc5SQu Wenruo struct rb_node *node)
122e9a28dc5SQu Wenruo {
123e9a28dc5SQu Wenruo struct rb_node **p = &root->rb_node;
124e9a28dc5SQu Wenruo struct rb_node *parent = NULL;
125e9a28dc5SQu Wenruo struct rb_simple_node *entry;
126e9a28dc5SQu Wenruo
127e9a28dc5SQu Wenruo while (*p) {
128e9a28dc5SQu Wenruo parent = *p;
129e9a28dc5SQu Wenruo entry = rb_entry(parent, struct rb_simple_node, rb_node);
130e9a28dc5SQu Wenruo
131e9a28dc5SQu Wenruo if (bytenr < entry->bytenr)
132e9a28dc5SQu Wenruo p = &(*p)->rb_left;
133e9a28dc5SQu Wenruo else if (bytenr > entry->bytenr)
134e9a28dc5SQu Wenruo p = &(*p)->rb_right;
135e9a28dc5SQu Wenruo else
136e9a28dc5SQu Wenruo return parent;
137e9a28dc5SQu Wenruo }
138e9a28dc5SQu Wenruo
139e9a28dc5SQu Wenruo rb_link_node(node, parent, p);
140e9a28dc5SQu Wenruo rb_insert_color(node, root);
141e9a28dc5SQu Wenruo return NULL;
142e9a28dc5SQu Wenruo }
143e9a28dc5SQu Wenruo
bitmap_test_range_all_set(const unsigned long * addr,unsigned long start,unsigned long nbits)144*b5345d6cSNaohiro Aota static inline bool bitmap_test_range_all_set(const unsigned long *addr,
145*b5345d6cSNaohiro Aota unsigned long start,
146*b5345d6cSNaohiro Aota unsigned long nbits)
147*b5345d6cSNaohiro Aota {
148*b5345d6cSNaohiro Aota unsigned long found_zero;
149*b5345d6cSNaohiro Aota
150*b5345d6cSNaohiro Aota found_zero = find_next_zero_bit(addr, start + nbits, start);
151*b5345d6cSNaohiro Aota return (found_zero == start + nbits);
152*b5345d6cSNaohiro Aota }
153*b5345d6cSNaohiro Aota
bitmap_test_range_all_zero(const unsigned long * addr,unsigned long start,unsigned long nbits)154*b5345d6cSNaohiro Aota static inline bool bitmap_test_range_all_zero(const unsigned long *addr,
155*b5345d6cSNaohiro Aota unsigned long start,
156*b5345d6cSNaohiro Aota unsigned long nbits)
157*b5345d6cSNaohiro Aota {
158*b5345d6cSNaohiro Aota unsigned long found_set;
159*b5345d6cSNaohiro Aota
160*b5345d6cSNaohiro Aota found_set = find_next_bit(addr, start + nbits, start);
161*b5345d6cSNaohiro Aota return (found_set == start + nbits);
162*b5345d6cSNaohiro Aota }
163*b5345d6cSNaohiro Aota
164602cbe91SDavid Sterba #endif
165