xref: /openbmc/linux/tools/include/linux/rbtree.h (revision 2d24dd57)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3   Red Black Trees
4   (C) 1999  Andrea Arcangeli <andrea@suse.de>
5 
6 
7   linux/include/linux/rbtree.h
8 
9   To use rbtrees you'll have to implement your own insert and search cores.
10   This will avoid us to use callbacks and to drop drammatically performances.
11   I know it's not the cleaner way,  but in C (not in C++) to get
12   performances and genericity...
13 
14   See Documentation/core-api/rbtree.rst for documentation and samples.
15 */
16 
17 #ifndef __TOOLS_LINUX_PERF_RBTREE_H
18 #define __TOOLS_LINUX_PERF_RBTREE_H
19 
20 #include <linux/kernel.h>
21 #include <linux/stddef.h>
22 
23 struct rb_node {
24 	unsigned long  __rb_parent_color;
25 	struct rb_node *rb_right;
26 	struct rb_node *rb_left;
27 } __attribute__((aligned(sizeof(long))));
28     /* The alignment might seem pointless, but allegedly CRIS needs it */
29 
30 struct rb_root {
31 	struct rb_node *rb_node;
32 };
33 
34 #define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
35 
36 #define RB_ROOT	(struct rb_root) { NULL, }
37 #define	rb_entry(ptr, type, member) container_of(ptr, type, member)
38 
39 #define RB_EMPTY_ROOT(root)  (READ_ONCE((root)->rb_node) == NULL)
40 
41 /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
42 #define RB_EMPTY_NODE(node)  \
43 	((node)->__rb_parent_color == (unsigned long)(node))
44 #define RB_CLEAR_NODE(node)  \
45 	((node)->__rb_parent_color = (unsigned long)(node))
46 
47 
48 extern void rb_insert_color(struct rb_node *, struct rb_root *);
49 extern void rb_erase(struct rb_node *, struct rb_root *);
50 
51 
52 /* Find logical next and previous nodes in a tree */
53 extern struct rb_node *rb_next(const struct rb_node *);
54 extern struct rb_node *rb_prev(const struct rb_node *);
55 extern struct rb_node *rb_first(const struct rb_root *);
56 extern struct rb_node *rb_last(const struct rb_root *);
57 
58 /* Postorder iteration - always visit the parent after its children */
59 extern struct rb_node *rb_first_postorder(const struct rb_root *);
60 extern struct rb_node *rb_next_postorder(const struct rb_node *);
61 
62 /* Fast replacement of a single node without remove/rebalance/add/rebalance */
63 extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
64 			    struct rb_root *root);
65 
rb_link_node(struct rb_node * node,struct rb_node * parent,struct rb_node ** rb_link)66 static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
67 				struct rb_node **rb_link)
68 {
69 	node->__rb_parent_color = (unsigned long)parent;
70 	node->rb_left = node->rb_right = NULL;
71 
72 	*rb_link = node;
73 }
74 
75 #define rb_entry_safe(ptr, type, member) \
76 	({ typeof(ptr) ____ptr = (ptr); \
77 	   ____ptr ? rb_entry(____ptr, type, member) : NULL; \
78 	})
79 
80 /**
81  * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
82  * given type allowing the backing memory of @pos to be invalidated
83  *
84  * @pos:	the 'type *' to use as a loop cursor.
85  * @n:		another 'type *' to use as temporary storage
86  * @root:	'rb_root *' of the rbtree.
87  * @field:	the name of the rb_node field within 'type'.
88  *
89  * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
90  * list_for_each_entry_safe() and allows the iteration to continue independent
91  * of changes to @pos by the body of the loop.
92  *
93  * Note, however, that it cannot handle other modifications that re-order the
94  * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
95  * rb_erase() may rebalance the tree, causing us to miss some nodes.
96  */
97 #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
98 	for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
99 	     pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
100 			typeof(*pos), field); 1; }); \
101 	     pos = n)
102 
rb_erase_init(struct rb_node * n,struct rb_root * root)103 static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
104 {
105 	rb_erase(n, root);
106 	RB_CLEAR_NODE(n);
107 }
108 
109 /*
110  * Leftmost-cached rbtrees.
111  *
112  * We do not cache the rightmost node based on footprint
113  * size vs number of potential users that could benefit
114  * from O(1) rb_last(). Just not worth it, users that want
115  * this feature can always implement the logic explicitly.
116  * Furthermore, users that want to cache both pointers may
117  * find it a bit asymmetric, but that's ok.
118  */
119 struct rb_root_cached {
120 	struct rb_root rb_root;
121 	struct rb_node *rb_leftmost;
122 };
123 
124 #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
125 
126 /* Same as rb_first(), but O(1) */
127 #define rb_first_cached(root) (root)->rb_leftmost
128 
rb_insert_color_cached(struct rb_node * node,struct rb_root_cached * root,bool leftmost)129 static inline void rb_insert_color_cached(struct rb_node *node,
130 					  struct rb_root_cached *root,
131 					  bool leftmost)
132 {
133 	if (leftmost)
134 		root->rb_leftmost = node;
135 	rb_insert_color(node, &root->rb_root);
136 }
137 
rb_erase_cached(struct rb_node * node,struct rb_root_cached * root)138 static inline void rb_erase_cached(struct rb_node *node,
139 				   struct rb_root_cached *root)
140 {
141 	if (root->rb_leftmost == node)
142 		root->rb_leftmost = rb_next(node);
143 	rb_erase(node, &root->rb_root);
144 }
145 
rb_replace_node_cached(struct rb_node * victim,struct rb_node * new,struct rb_root_cached * root)146 static inline void rb_replace_node_cached(struct rb_node *victim,
147 					  struct rb_node *new,
148 					  struct rb_root_cached *root)
149 {
150 	if (root->rb_leftmost == victim)
151 		root->rb_leftmost = new;
152 	rb_replace_node(victim, new, &root->rb_root);
153 }
154 
155 /*
156  * The below helper functions use 2 operators with 3 different
157  * calling conventions. The operators are related like:
158  *
159  *	comp(a->key,b) < 0  := less(a,b)
160  *	comp(a->key,b) > 0  := less(b,a)
161  *	comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
162  *
163  * If these operators define a partial order on the elements we make no
164  * guarantee on which of the elements matching the key is found. See
165  * rb_find().
166  *
167  * The reason for this is to allow the find() interface without requiring an
168  * on-stack dummy object, which might not be feasible due to object size.
169  */
170 
171 /**
172  * rb_add_cached() - insert @node into the leftmost cached tree @tree
173  * @node: node to insert
174  * @tree: leftmost cached tree to insert @node into
175  * @less: operator defining the (partial) node order
176  */
177 static __always_inline void
rb_add_cached(struct rb_node * node,struct rb_root_cached * tree,bool (* less)(struct rb_node *,const struct rb_node *))178 rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
179 	      bool (*less)(struct rb_node *, const struct rb_node *))
180 {
181 	struct rb_node **link = &tree->rb_root.rb_node;
182 	struct rb_node *parent = NULL;
183 	bool leftmost = true;
184 
185 	while (*link) {
186 		parent = *link;
187 		if (less(node, parent)) {
188 			link = &parent->rb_left;
189 		} else {
190 			link = &parent->rb_right;
191 			leftmost = false;
192 		}
193 	}
194 
195 	rb_link_node(node, parent, link);
196 	rb_insert_color_cached(node, tree, leftmost);
197 }
198 
199 /**
200  * rb_add() - insert @node into @tree
201  * @node: node to insert
202  * @tree: tree to insert @node into
203  * @less: operator defining the (partial) node order
204  */
205 static __always_inline void
rb_add(struct rb_node * node,struct rb_root * tree,bool (* less)(struct rb_node *,const struct rb_node *))206 rb_add(struct rb_node *node, struct rb_root *tree,
207        bool (*less)(struct rb_node *, const struct rb_node *))
208 {
209 	struct rb_node **link = &tree->rb_node;
210 	struct rb_node *parent = NULL;
211 
212 	while (*link) {
213 		parent = *link;
214 		if (less(node, parent))
215 			link = &parent->rb_left;
216 		else
217 			link = &parent->rb_right;
218 	}
219 
220 	rb_link_node(node, parent, link);
221 	rb_insert_color(node, tree);
222 }
223 
224 /**
225  * rb_find_add() - find equivalent @node in @tree, or add @node
226  * @node: node to look-for / insert
227  * @tree: tree to search / modify
228  * @cmp: operator defining the node order
229  *
230  * Returns the rb_node matching @node, or NULL when no match is found and @node
231  * is inserted.
232  */
233 static __always_inline struct rb_node *
rb_find_add(struct rb_node * node,struct rb_root * tree,int (* cmp)(struct rb_node *,const struct rb_node *))234 rb_find_add(struct rb_node *node, struct rb_root *tree,
235 	    int (*cmp)(struct rb_node *, const struct rb_node *))
236 {
237 	struct rb_node **link = &tree->rb_node;
238 	struct rb_node *parent = NULL;
239 	int c;
240 
241 	while (*link) {
242 		parent = *link;
243 		c = cmp(node, parent);
244 
245 		if (c < 0)
246 			link = &parent->rb_left;
247 		else if (c > 0)
248 			link = &parent->rb_right;
249 		else
250 			return parent;
251 	}
252 
253 	rb_link_node(node, parent, link);
254 	rb_insert_color(node, tree);
255 	return NULL;
256 }
257 
258 /**
259  * rb_find() - find @key in tree @tree
260  * @key: key to match
261  * @tree: tree to search
262  * @cmp: operator defining the node order
263  *
264  * Returns the rb_node matching @key or NULL.
265  */
266 static __always_inline struct rb_node *
rb_find(const void * key,const struct rb_root * tree,int (* cmp)(const void * key,const struct rb_node *))267 rb_find(const void *key, const struct rb_root *tree,
268 	int (*cmp)(const void *key, const struct rb_node *))
269 {
270 	struct rb_node *node = tree->rb_node;
271 
272 	while (node) {
273 		int c = cmp(key, node);
274 
275 		if (c < 0)
276 			node = node->rb_left;
277 		else if (c > 0)
278 			node = node->rb_right;
279 		else
280 			return node;
281 	}
282 
283 	return NULL;
284 }
285 
286 /**
287  * rb_find_first() - find the first @key in @tree
288  * @key: key to match
289  * @tree: tree to search
290  * @cmp: operator defining node order
291  *
292  * Returns the leftmost node matching @key, or NULL.
293  */
294 static __always_inline struct rb_node *
rb_find_first(const void * key,const struct rb_root * tree,int (* cmp)(const void * key,const struct rb_node *))295 rb_find_first(const void *key, const struct rb_root *tree,
296 	      int (*cmp)(const void *key, const struct rb_node *))
297 {
298 	struct rb_node *node = tree->rb_node;
299 	struct rb_node *match = NULL;
300 
301 	while (node) {
302 		int c = cmp(key, node);
303 
304 		if (c <= 0) {
305 			if (!c)
306 				match = node;
307 			node = node->rb_left;
308 		} else if (c > 0) {
309 			node = node->rb_right;
310 		}
311 	}
312 
313 	return match;
314 }
315 
316 /**
317  * rb_next_match() - find the next @key in @tree
318  * @key: key to match
319  * @tree: tree to search
320  * @cmp: operator defining node order
321  *
322  * Returns the next node matching @key, or NULL.
323  */
324 static __always_inline struct rb_node *
rb_next_match(const void * key,struct rb_node * node,int (* cmp)(const void * key,const struct rb_node *))325 rb_next_match(const void *key, struct rb_node *node,
326 	      int (*cmp)(const void *key, const struct rb_node *))
327 {
328 	node = rb_next(node);
329 	if (node && cmp(key, node))
330 		node = NULL;
331 	return node;
332 }
333 
334 /**
335  * rb_for_each() - iterates a subtree matching @key
336  * @node: iterator
337  * @key: key to match
338  * @tree: tree to search
339  * @cmp: operator defining node order
340  */
341 #define rb_for_each(node, key, tree, cmp) \
342 	for ((node) = rb_find_first((key), (tree), (cmp)); \
343 	     (node); (node) = rb_next_match((key), (node), (cmp)))
344 
345 #endif	/* __TOOLS_LINUX_PERF_RBTREE_H */
346