xref: /openbmc/linux/lib/refcount.c (revision e5c86679)
1 /*
2  * Variant of atomic_t specialized for reference counts.
3  *
4  * The interface matches the atomic_t interface (to aid in porting) but only
5  * provides the few functions one should use for reference counting.
6  *
7  * It differs in that the counter saturates at UINT_MAX and will not move once
8  * there. This avoids wrapping the counter and causing 'spurious'
9  * use-after-free issues.
10  *
11  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
12  * and provide only what is strictly required for refcounts.
13  *
14  * The increments are fully relaxed; these will not provide ordering. The
15  * rationale is that whatever is used to obtain the object we're increasing the
16  * reference count on will provide the ordering. For locked data structures,
17  * its the lock acquire, for RCU/lockless data structures its the dependent
18  * load.
19  *
20  * Do note that inc_not_zero() provides a control dependency which will order
21  * future stores against the inc, this ensures we'll never modify the object
22  * if we did not in fact acquire a reference.
23  *
24  * The decrements will provide release order, such that all the prior loads and
25  * stores will be issued before, it also provides a control dependency, which
26  * will order us against the subsequent free().
27  *
28  * The control dependency is against the load of the cmpxchg (ll/sc) that
29  * succeeded. This means the stores aren't fully ordered, but this is fine
30  * because the 1->0 transition indicates no concurrency.
31  *
32  * Note that the allocator is responsible for ordering things between free()
33  * and alloc().
34  *
35  */
36 
37 #include <linux/refcount.h>
38 #include <linux/bug.h>
39 
40 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
41 {
42 	unsigned int old, new, val = atomic_read(&r->refs);
43 
44 	for (;;) {
45 		if (!val)
46 			return false;
47 
48 		if (unlikely(val == UINT_MAX))
49 			return true;
50 
51 		new = val + i;
52 		if (new < val)
53 			new = UINT_MAX;
54 		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
55 		if (old == val)
56 			break;
57 
58 		val = old;
59 	}
60 
61 	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
62 
63 	return true;
64 }
65 EXPORT_SYMBOL_GPL(refcount_add_not_zero);
66 
67 void refcount_add(unsigned int i, refcount_t *r)
68 {
69 	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
70 }
71 EXPORT_SYMBOL_GPL(refcount_add);
72 
73 /*
74  * Similar to atomic_inc_not_zero(), will saturate at UINT_MAX and WARN.
75  *
76  * Provides no memory ordering, it is assumed the caller has guaranteed the
77  * object memory to be stable (RCU, etc.). It does provide a control dependency
78  * and thereby orders future stores. See the comment on top.
79  */
80 bool refcount_inc_not_zero(refcount_t *r)
81 {
82 	unsigned int old, new, val = atomic_read(&r->refs);
83 
84 	for (;;) {
85 		new = val + 1;
86 
87 		if (!val)
88 			return false;
89 
90 		if (unlikely(!new))
91 			return true;
92 
93 		old = atomic_cmpxchg_relaxed(&r->refs, val, new);
94 		if (old == val)
95 			break;
96 
97 		val = old;
98 	}
99 
100 	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
101 
102 	return true;
103 }
104 EXPORT_SYMBOL_GPL(refcount_inc_not_zero);
105 
106 /*
107  * Similar to atomic_inc(), will saturate at UINT_MAX and WARN.
108  *
109  * Provides no memory ordering, it is assumed the caller already has a
110  * reference on the object, will WARN when this is not so.
111  */
112 void refcount_inc(refcount_t *r)
113 {
114 	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
115 }
116 EXPORT_SYMBOL_GPL(refcount_inc);
117 
118 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
119 {
120 	unsigned int old, new, val = atomic_read(&r->refs);
121 
122 	for (;;) {
123 		if (unlikely(val == UINT_MAX))
124 			return false;
125 
126 		new = val - i;
127 		if (new > val) {
128 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
129 			return false;
130 		}
131 
132 		old = atomic_cmpxchg_release(&r->refs, val, new);
133 		if (old == val)
134 			break;
135 
136 		val = old;
137 	}
138 
139 	return !new;
140 }
141 EXPORT_SYMBOL_GPL(refcount_sub_and_test);
142 
143 /*
144  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
145  * decrement when saturated at UINT_MAX.
146  *
147  * Provides release memory ordering, such that prior loads and stores are done
148  * before, and provides a control dependency such that free() must come after.
149  * See the comment on top.
150  */
151 bool refcount_dec_and_test(refcount_t *r)
152 {
153 	return refcount_sub_and_test(1, r);
154 }
155 EXPORT_SYMBOL_GPL(refcount_dec_and_test);
156 
157 /*
158  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
159  * when saturated at UINT_MAX.
160  *
161  * Provides release memory ordering, such that prior loads and stores are done
162  * before.
163  */
164 
165 void refcount_dec(refcount_t *r)
166 {
167 	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
168 }
169 EXPORT_SYMBOL_GPL(refcount_dec);
170 
171 /*
172  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
173  * success thereof.
174  *
175  * Like all decrement operations, it provides release memory order and provides
176  * a control dependency.
177  *
178  * It can be used like a try-delete operator; this explicit case is provided
179  * and not cmpxchg in generic, because that would allow implementing unsafe
180  * operations.
181  */
182 bool refcount_dec_if_one(refcount_t *r)
183 {
184 	return atomic_cmpxchg_release(&r->refs, 1, 0) == 1;
185 }
186 EXPORT_SYMBOL_GPL(refcount_dec_if_one);
187 
188 /*
189  * No atomic_t counterpart, it decrements unless the value is 1, in which case
190  * it will return false.
191  *
192  * Was often done like: atomic_add_unless(&var, -1, 1)
193  */
194 bool refcount_dec_not_one(refcount_t *r)
195 {
196 	unsigned int old, new, val = atomic_read(&r->refs);
197 
198 	for (;;) {
199 		if (unlikely(val == UINT_MAX))
200 			return true;
201 
202 		if (val == 1)
203 			return false;
204 
205 		new = val - 1;
206 		if (new > val) {
207 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
208 			return true;
209 		}
210 
211 		old = atomic_cmpxchg_release(&r->refs, val, new);
212 		if (old == val)
213 			break;
214 
215 		val = old;
216 	}
217 
218 	return true;
219 }
220 EXPORT_SYMBOL_GPL(refcount_dec_not_one);
221 
222 /*
223  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
224  * to decrement when saturated at UINT_MAX.
225  *
226  * Provides release memory ordering, such that prior loads and stores are done
227  * before, and provides a control dependency such that free() must come after.
228  * See the comment on top.
229  */
230 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
231 {
232 	if (refcount_dec_not_one(r))
233 		return false;
234 
235 	mutex_lock(lock);
236 	if (!refcount_dec_and_test(r)) {
237 		mutex_unlock(lock);
238 		return false;
239 	}
240 
241 	return true;
242 }
243 EXPORT_SYMBOL_GPL(refcount_dec_and_mutex_lock);
244 
245 /*
246  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
247  * decrement when saturated at UINT_MAX.
248  *
249  * Provides release memory ordering, such that prior loads and stores are done
250  * before, and provides a control dependency such that free() must come after.
251  * See the comment on top.
252  */
253 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
254 {
255 	if (refcount_dec_not_one(r))
256 		return false;
257 
258 	spin_lock(lock);
259 	if (!refcount_dec_and_test(r)) {
260 		spin_unlock(lock);
261 		return false;
262 	}
263 
264 	return true;
265 }
266 EXPORT_SYMBOL_GPL(refcount_dec_and_lock);
267 
268