1c1d7c514SDavid Sterba // SPDX-License-Identifier: GPL-2.0
2da5c8135SArne Jansen /*
3da5c8135SArne Jansen * Copyright (C) 2011 STRATO AG
4da5c8135SArne Jansen * written by Arne Jansen <sensille@gmx.net>
5da5c8135SArne Jansen */
6da5c8135SArne Jansen
7da5c8135SArne Jansen #include <linux/slab.h>
89b569ea0SJosef Bacik #include "messages.h"
9da5c8135SArne Jansen #include "ulist.h"
104c7a6f74SWang Shilong #include "ctree.h"
11da5c8135SArne Jansen
12da5c8135SArne Jansen /*
13da5c8135SArne Jansen * ulist is a generic data structure to hold a collection of unique u64
14da5c8135SArne Jansen * values. The only operations it supports is adding to the list and
15da5c8135SArne Jansen * enumerating it.
16da5c8135SArne Jansen * It is possible to store an auxiliary value along with the key.
17da5c8135SArne Jansen *
18da5c8135SArne Jansen * A sample usage for ulists is the enumeration of directed graphs without
19da5c8135SArne Jansen * visiting a node twice. The pseudo-code could look like this:
20da5c8135SArne Jansen *
21da5c8135SArne Jansen * ulist = ulist_alloc();
22da5c8135SArne Jansen * ulist_add(ulist, root);
23cd1b413cSJan Schmidt * ULIST_ITER_INIT(&uiter);
24da5c8135SArne Jansen *
25cd1b413cSJan Schmidt * while ((elem = ulist_next(ulist, &uiter)) {
26da5c8135SArne Jansen * for (all child nodes n in elem)
27da5c8135SArne Jansen * ulist_add(ulist, n);
28da5c8135SArne Jansen * do something useful with the node;
29da5c8135SArne Jansen * }
30da5c8135SArne Jansen * ulist_free(ulist);
31da5c8135SArne Jansen *
3201327610SNicholas D Steeves * This assumes the graph nodes are addressable by u64. This stems from the
33da5c8135SArne Jansen * usage for tree enumeration in btrfs, where the logical addresses are
34da5c8135SArne Jansen * 64 bit.
35da5c8135SArne Jansen *
36da5c8135SArne Jansen * It is also useful for tree enumeration which could be done elegantly
37da5c8135SArne Jansen * recursively, but is not possible due to kernel stack limitations. The
38da5c8135SArne Jansen * loop would be similar to the above.
39da5c8135SArne Jansen */
40da5c8135SArne Jansen
4143dd529aSDavid Sterba /*
4243dd529aSDavid Sterba * Freshly initialize a ulist.
4343dd529aSDavid Sterba *
44da5c8135SArne Jansen * @ulist: the ulist to initialize
45da5c8135SArne Jansen *
46da5c8135SArne Jansen * Note: don't use this function to init an already used ulist, use
47da5c8135SArne Jansen * ulist_reinit instead.
48da5c8135SArne Jansen */
ulist_init(struct ulist * ulist)49da5c8135SArne Jansen void ulist_init(struct ulist *ulist)
50da5c8135SArne Jansen {
514c7a6f74SWang Shilong INIT_LIST_HEAD(&ulist->nodes);
52f7f82b81SWang Shilong ulist->root = RB_ROOT;
534c7a6f74SWang Shilong ulist->nnodes = 0;
54da5c8135SArne Jansen }
55da5c8135SArne Jansen
5643dd529aSDavid Sterba /*
5743dd529aSDavid Sterba * Free up additionally allocated memory for the ulist.
5843dd529aSDavid Sterba *
59da5c8135SArne Jansen * @ulist: the ulist from which to free the additional memory
60da5c8135SArne Jansen *
61da5c8135SArne Jansen * This is useful in cases where the base 'struct ulist' has been statically
62da5c8135SArne Jansen * allocated.
63da5c8135SArne Jansen */
ulist_release(struct ulist * ulist)646655bc3dSDavid Sterba void ulist_release(struct ulist *ulist)
65da5c8135SArne Jansen {
664c7a6f74SWang Shilong struct ulist_node *node;
674c7a6f74SWang Shilong struct ulist_node *next;
684c7a6f74SWang Shilong
694c7a6f74SWang Shilong list_for_each_entry_safe(node, next, &ulist->nodes, list) {
704c7a6f74SWang Shilong kfree(node);
714c7a6f74SWang Shilong }
72f7f82b81SWang Shilong ulist->root = RB_ROOT;
734c7a6f74SWang Shilong INIT_LIST_HEAD(&ulist->nodes);
74da5c8135SArne Jansen }
75da5c8135SArne Jansen
7643dd529aSDavid Sterba /*
7743dd529aSDavid Sterba * Prepare a ulist for reuse.
7843dd529aSDavid Sterba *
79da5c8135SArne Jansen * @ulist: ulist to be reused
80da5c8135SArne Jansen *
81da5c8135SArne Jansen * Free up all additional memory allocated for the list elements and reinit
82da5c8135SArne Jansen * the ulist.
83da5c8135SArne Jansen */
ulist_reinit(struct ulist * ulist)84da5c8135SArne Jansen void ulist_reinit(struct ulist *ulist)
85da5c8135SArne Jansen {
866655bc3dSDavid Sterba ulist_release(ulist);
87da5c8135SArne Jansen ulist_init(ulist);
88da5c8135SArne Jansen }
89da5c8135SArne Jansen
9043dd529aSDavid Sterba /*
9143dd529aSDavid Sterba * Dynamically allocate a ulist.
9243dd529aSDavid Sterba *
93da5c8135SArne Jansen * @gfp_mask: allocation flags to for base allocation
94da5c8135SArne Jansen *
95da5c8135SArne Jansen * The allocated ulist will be returned in an initialized state.
96da5c8135SArne Jansen */
ulist_alloc(gfp_t gfp_mask)972eec6c81SDaniel J Blueman struct ulist *ulist_alloc(gfp_t gfp_mask)
98da5c8135SArne Jansen {
99da5c8135SArne Jansen struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
100da5c8135SArne Jansen
101da5c8135SArne Jansen if (!ulist)
102da5c8135SArne Jansen return NULL;
103da5c8135SArne Jansen
104da5c8135SArne Jansen ulist_init(ulist);
105da5c8135SArne Jansen
106da5c8135SArne Jansen return ulist;
107da5c8135SArne Jansen }
108da5c8135SArne Jansen
10943dd529aSDavid Sterba /*
11043dd529aSDavid Sterba * Free dynamically allocated ulist.
11143dd529aSDavid Sterba *
112da5c8135SArne Jansen * @ulist: ulist to free
113da5c8135SArne Jansen *
1146655bc3dSDavid Sterba * It is not necessary to call ulist_release before.
115da5c8135SArne Jansen */
ulist_free(struct ulist * ulist)116da5c8135SArne Jansen void ulist_free(struct ulist *ulist)
117da5c8135SArne Jansen {
118da5c8135SArne Jansen if (!ulist)
119da5c8135SArne Jansen return;
1206655bc3dSDavid Sterba ulist_release(ulist);
121da5c8135SArne Jansen kfree(ulist);
122da5c8135SArne Jansen }
123da5c8135SArne Jansen
ulist_rbtree_search(struct ulist * ulist,u64 val)124f7f82b81SWang Shilong static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
125f7f82b81SWang Shilong {
126f7f82b81SWang Shilong struct rb_node *n = ulist->root.rb_node;
127f7f82b81SWang Shilong struct ulist_node *u = NULL;
128f7f82b81SWang Shilong
129f7f82b81SWang Shilong while (n) {
130f7f82b81SWang Shilong u = rb_entry(n, struct ulist_node, rb_node);
131f7f82b81SWang Shilong if (u->val < val)
132f7f82b81SWang Shilong n = n->rb_right;
133f7f82b81SWang Shilong else if (u->val > val)
134f7f82b81SWang Shilong n = n->rb_left;
135f7f82b81SWang Shilong else
136f7f82b81SWang Shilong return u;
137f7f82b81SWang Shilong }
138f7f82b81SWang Shilong return NULL;
139f7f82b81SWang Shilong }
140f7f82b81SWang Shilong
ulist_rbtree_erase(struct ulist * ulist,struct ulist_node * node)141d4b80404SQu Wenruo static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
142d4b80404SQu Wenruo {
143d4b80404SQu Wenruo rb_erase(&node->rb_node, &ulist->root);
144d4b80404SQu Wenruo list_del(&node->list);
145d4b80404SQu Wenruo kfree(node);
146d4b80404SQu Wenruo BUG_ON(ulist->nnodes == 0);
147d4b80404SQu Wenruo ulist->nnodes--;
148d4b80404SQu Wenruo }
149d4b80404SQu Wenruo
ulist_rbtree_insert(struct ulist * ulist,struct ulist_node * ins)150f7f82b81SWang Shilong static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
151f7f82b81SWang Shilong {
152f7f82b81SWang Shilong struct rb_node **p = &ulist->root.rb_node;
153f7f82b81SWang Shilong struct rb_node *parent = NULL;
154f7f82b81SWang Shilong struct ulist_node *cur = NULL;
155f7f82b81SWang Shilong
156f7f82b81SWang Shilong while (*p) {
157f7f82b81SWang Shilong parent = *p;
158f7f82b81SWang Shilong cur = rb_entry(parent, struct ulist_node, rb_node);
159f7f82b81SWang Shilong
160f7f82b81SWang Shilong if (cur->val < ins->val)
161f7f82b81SWang Shilong p = &(*p)->rb_right;
162f7f82b81SWang Shilong else if (cur->val > ins->val)
163f7f82b81SWang Shilong p = &(*p)->rb_left;
164f7f82b81SWang Shilong else
165f7f82b81SWang Shilong return -EEXIST;
166f7f82b81SWang Shilong }
167f7f82b81SWang Shilong rb_link_node(&ins->rb_node, parent, p);
168f7f82b81SWang Shilong rb_insert_color(&ins->rb_node, &ulist->root);
169f7f82b81SWang Shilong return 0;
170f7f82b81SWang Shilong }
171f7f82b81SWang Shilong
17243dd529aSDavid Sterba /*
17343dd529aSDavid Sterba * Add an element to the ulist.
17443dd529aSDavid Sterba *
175da5c8135SArne Jansen * @ulist: ulist to add the element to
176da5c8135SArne Jansen * @val: value to add to ulist
177da5c8135SArne Jansen * @aux: auxiliary value to store along with val
178da5c8135SArne Jansen * @gfp_mask: flags to use for allocation
179da5c8135SArne Jansen *
180da5c8135SArne Jansen * Note: locking must be provided by the caller. In case of rwlocks write
181da5c8135SArne Jansen * locking is needed
182da5c8135SArne Jansen *
183da5c8135SArne Jansen * Add an element to a ulist. The @val will only be added if it doesn't
184da5c8135SArne Jansen * already exist. If it is added, the auxiliary value @aux is stored along with
185da5c8135SArne Jansen * it. In case @val already exists in the ulist, @aux is ignored, even if
186da5c8135SArne Jansen * it differs from the already stored value.
187da5c8135SArne Jansen *
188da5c8135SArne Jansen * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
189da5c8135SArne Jansen * inserted.
190da5c8135SArne Jansen * In case of allocation failure -ENOMEM is returned and the ulist stays
191da5c8135SArne Jansen * unaltered.
192da5c8135SArne Jansen */
ulist_add(struct ulist * ulist,u64 val,u64 aux,gfp_t gfp_mask)19334d73f54SAlexander Block int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
194da5c8135SArne Jansen {
1953301958bSJan Schmidt return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
1963301958bSJan Schmidt }
1973301958bSJan Schmidt
ulist_add_merge(struct ulist * ulist,u64 val,u64 aux,u64 * old_aux,gfp_t gfp_mask)19834d73f54SAlexander Block int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
19934d73f54SAlexander Block u64 *old_aux, gfp_t gfp_mask)
2003301958bSJan Schmidt {
2014c7a6f74SWang Shilong int ret;
2024c7a6f74SWang Shilong struct ulist_node *node;
2034c7a6f74SWang Shilong
204f7f82b81SWang Shilong node = ulist_rbtree_search(ulist, val);
205f7f82b81SWang Shilong if (node) {
2063301958bSJan Schmidt if (old_aux)
207f7f82b81SWang Shilong *old_aux = node->aux;
208da5c8135SArne Jansen return 0;
209da5c8135SArne Jansen }
2104c7a6f74SWang Shilong node = kmalloc(sizeof(*node), gfp_mask);
2114c7a6f74SWang Shilong if (!node)
212da5c8135SArne Jansen return -ENOMEM;
213da5c8135SArne Jansen
2144c7a6f74SWang Shilong node->val = val;
2154c7a6f74SWang Shilong node->aux = aux;
216da5c8135SArne Jansen
2174c7a6f74SWang Shilong ret = ulist_rbtree_insert(ulist, node);
2184c7a6f74SWang Shilong ASSERT(!ret);
2194c7a6f74SWang Shilong list_add_tail(&node->list, &ulist->nodes);
2204c7a6f74SWang Shilong ulist->nnodes++;
221da5c8135SArne Jansen
222da5c8135SArne Jansen return 1;
223da5c8135SArne Jansen }
224da5c8135SArne Jansen
225d4b80404SQu Wenruo /*
226d4b80404SQu Wenruo * ulist_del - delete one node from ulist
227d4b80404SQu Wenruo * @ulist: ulist to remove node from
228d4b80404SQu Wenruo * @val: value to delete
229d4b80404SQu Wenruo * @aux: aux to delete
230d4b80404SQu Wenruo *
231d4b80404SQu Wenruo * The deletion will only be done when *BOTH* val and aux matches.
232d4b80404SQu Wenruo * Return 0 for successful delete.
233d4b80404SQu Wenruo * Return > 0 for not found.
234d4b80404SQu Wenruo */
ulist_del(struct ulist * ulist,u64 val,u64 aux)235d4b80404SQu Wenruo int ulist_del(struct ulist *ulist, u64 val, u64 aux)
236d4b80404SQu Wenruo {
237d4b80404SQu Wenruo struct ulist_node *node;
238d4b80404SQu Wenruo
239d4b80404SQu Wenruo node = ulist_rbtree_search(ulist, val);
240d4b80404SQu Wenruo /* Not found */
241d4b80404SQu Wenruo if (!node)
242d4b80404SQu Wenruo return 1;
243d4b80404SQu Wenruo
244d4b80404SQu Wenruo if (node->aux != aux)
245d4b80404SQu Wenruo return 1;
246d4b80404SQu Wenruo
247d4b80404SQu Wenruo /* Found and delete */
248d4b80404SQu Wenruo ulist_rbtree_erase(ulist, node);
249d4b80404SQu Wenruo return 0;
250d4b80404SQu Wenruo }
251d4b80404SQu Wenruo
25243dd529aSDavid Sterba /*
25343dd529aSDavid Sterba * Iterate ulist.
25443dd529aSDavid Sterba *
255da5c8135SArne Jansen * @ulist: ulist to iterate
256cd1b413cSJan Schmidt * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
257da5c8135SArne Jansen *
258da5c8135SArne Jansen * Note: locking must be provided by the caller. In case of rwlocks only read
259da5c8135SArne Jansen * locking is needed
260da5c8135SArne Jansen *
261cd1b413cSJan Schmidt * This function is used to iterate an ulist.
262cd1b413cSJan Schmidt * It returns the next element from the ulist or %NULL when the
263da5c8135SArne Jansen * end is reached. No guarantee is made with respect to the order in which
264da5c8135SArne Jansen * the elements are returned. They might neither be returned in order of
265da5c8135SArne Jansen * addition nor in ascending order.
266da5c8135SArne Jansen * It is allowed to call ulist_add during an enumeration. Newly added items
267da5c8135SArne Jansen * are guaranteed to show up in the running enumeration.
268da5c8135SArne Jansen */
ulist_next(const struct ulist * ulist,struct ulist_iterator * uiter)269*fa104a87SFilipe Manana struct ulist_node *ulist_next(const struct ulist *ulist, struct ulist_iterator *uiter)
270da5c8135SArne Jansen {
2714c7a6f74SWang Shilong struct ulist_node *node;
272da5c8135SArne Jansen
2734c7a6f74SWang Shilong if (list_empty(&ulist->nodes))
2744c7a6f74SWang Shilong return NULL;
2754c7a6f74SWang Shilong if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
2764c7a6f74SWang Shilong return NULL;
2774c7a6f74SWang Shilong if (uiter->cur_list) {
2784c7a6f74SWang Shilong uiter->cur_list = uiter->cur_list->next;
2794c7a6f74SWang Shilong } else {
2804c7a6f74SWang Shilong uiter->cur_list = ulist->nodes.next;
2814c7a6f74SWang Shilong }
2824c7a6f74SWang Shilong node = list_entry(uiter->cur_list, struct ulist_node, list);
2834c7a6f74SWang Shilong return node;
284da5c8135SArne Jansen }
285