xref: /openbmc/linux/include/linux/freelist.h (revision e563604a)
1 /* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
2 #ifndef FREELIST_H
3 #define FREELIST_H
4 
5 #include <linux/atomic.h>
6 
7 /*
8  * Copyright: cameron@moodycamel.com
9  *
10  * A simple CAS-based lock-free free list. Not the fastest thing in the world
11  * under heavy contention, but simple and correct (assuming nodes are never
12  * freed until after the free list is destroyed), and fairly speedy under low
13  * contention.
14  *
15  * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
16  */
17 
18 struct freelist_node {
19 	atomic_t		refs;
20 	struct freelist_node	*next;
21 };
22 
23 struct freelist_head {
24 	struct freelist_node	*head;
25 };
26 
27 #define REFS_ON_FREELIST 0x80000000
28 #define REFS_MASK	 0x7FFFFFFF
29 
__freelist_add(struct freelist_node * node,struct freelist_head * list)30 static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
31 {
32 	/*
33 	 * Since the refcount is zero, and nobody can increase it once it's
34 	 * zero (except us, and we run only one copy of this method per node at
35 	 * a time, i.e. the single thread case), then we know we can safely
36 	 * change the next pointer of the node; however, once the refcount is
37 	 * back above zero, then other threads could increase it (happens under
38 	 * heavy contention, when the refcount goes to zero in between a load
39 	 * and a refcount increment of a node in try_get, then back up to
40 	 * something non-zero, then the refcount increment is done by the other
41 	 * thread) -- so if the CAS to add the node to the actual list fails,
42 	 * decrese the refcount and leave the add operation to the next thread
43 	 * who puts the refcount back to zero (which could be us, hence the
44 	 * loop).
45 	 */
46 	struct freelist_node *head = READ_ONCE(list->head);
47 
48 	for (;;) {
49 		WRITE_ONCE(node->next, head);
50 		atomic_set_release(&node->refs, 1);
51 
52 		if (!try_cmpxchg_release(&list->head, &head, node)) {
53 			/*
54 			 * Hmm, the add failed, but we can only try again when
55 			 * the refcount goes back to zero.
56 			 */
57 			if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
58 				continue;
59 		}
60 		return;
61 	}
62 }
63 
freelist_add(struct freelist_node * node,struct freelist_head * list)64 static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
65 {
66 	/*
67 	 * We know that the should-be-on-freelist bit is 0 at this point, so
68 	 * it's safe to set it using a fetch_add.
69 	 */
70 	if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
71 		/*
72 		 * Oh look! We were the last ones referencing this node, and we
73 		 * know we want to add it to the free list, so let's do it!
74 		 */
75 		__freelist_add(node, list);
76 	}
77 }
78 
freelist_try_get(struct freelist_head * list)79 static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
80 {
81 	struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
82 	unsigned int refs;
83 
84 	while (head) {
85 		prev = head;
86 		refs = atomic_read(&head->refs);
87 		if ((refs & REFS_MASK) == 0 ||
88 		    !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
89 			head = smp_load_acquire(&list->head);
90 			continue;
91 		}
92 
93 		/*
94 		 * Good, reference count has been incremented (it wasn't at
95 		 * zero), which means we can read the next and not worry about
96 		 * it changing between now and the time we do the CAS.
97 		 */
98 		next = READ_ONCE(head->next);
99 		if (try_cmpxchg_acquire(&list->head, &head, next)) {
100 			/*
101 			 * Yay, got the node. This means it was on the list,
102 			 * which means should-be-on-freelist must be false no
103 			 * matter the refcount (because nobody else knows it's
104 			 * been taken off yet, it can't have been put back on).
105 			 */
106 			WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
107 
108 			/*
109 			 * Decrease refcount twice, once for our ref, and once
110 			 * for the list's ref.
111 			 */
112 			atomic_fetch_add(-2, &head->refs);
113 
114 			return head;
115 		}
116 
117 		/*
118 		 * OK, the head must have changed on us, but we still need to decrement
119 		 * the refcount we increased.
120 		 */
121 		refs = atomic_fetch_add(-1, &prev->refs);
122 		if (refs == REFS_ON_FREELIST + 1)
123 			__freelist_add(prev, list);
124 	}
125 
126 	return NULL;
127 }
128 
129 #endif /* FREELIST_H */
130