xref: /openbmc/linux/lib/refcount.c (revision 020c5260)
1 /*
2  * Variant of atomic_t specialized for reference counts.
3  *
4  * The interface matches the atomic_t interface (to aid in porting) but only
5  * provides the few functions one should use for reference counting.
6  *
7  * It differs in that the counter saturates at UINT_MAX and will not move once
8  * there. This avoids wrapping the counter and causing 'spurious'
9  * use-after-free issues.
10  *
11  * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
12  * and provide only what is strictly required for refcounts.
13  *
14  * The increments are fully relaxed; these will not provide ordering. The
15  * rationale is that whatever is used to obtain the object we're increasing the
16  * reference count on will provide the ordering. For locked data structures,
17  * its the lock acquire, for RCU/lockless data structures its the dependent
18  * load.
19  *
20  * Do note that inc_not_zero() provides a control dependency which will order
21  * future stores against the inc, this ensures we'll never modify the object
22  * if we did not in fact acquire a reference.
23  *
24  * The decrements will provide release order, such that all the prior loads and
25  * stores will be issued before, it also provides a control dependency, which
26  * will order us against the subsequent free().
27  *
28  * The control dependency is against the load of the cmpxchg (ll/sc) that
29  * succeeded. This means the stores aren't fully ordered, but this is fine
30  * because the 1->0 transition indicates no concurrency.
31  *
32  * Note that the allocator is responsible for ordering things between free()
33  * and alloc().
34  *
35  */
36 
37 #include <linux/refcount.h>
38 #include <linux/bug.h>
39 
40 /**
41  * refcount_add_not_zero - add a value to a refcount unless it is 0
42  * @i: the value to add to the refcount
43  * @r: the refcount
44  *
45  * Will saturate at UINT_MAX and WARN.
46  *
47  * Provides no memory ordering, it is assumed the caller has guaranteed the
48  * object memory to be stable (RCU, etc.). It does provide a control dependency
49  * and thereby orders future stores. See the comment on top.
50  *
51  * Use of this function is not recommended for the normal reference counting
52  * use case in which references are taken and released one at a time.  In these
53  * cases, refcount_inc(), or one of its variants, should instead be used to
54  * increment a reference count.
55  *
56  * Return: false if the passed refcount is 0, true otherwise
57  */
58 bool refcount_add_not_zero(unsigned int i, refcount_t *r)
59 {
60 	unsigned int new, val = atomic_read(&r->refs);
61 
62 	do {
63 		if (!val)
64 			return false;
65 
66 		if (unlikely(val == UINT_MAX))
67 			return true;
68 
69 		new = val + i;
70 		if (new < val)
71 			new = UINT_MAX;
72 
73 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
74 
75 	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
76 
77 	return true;
78 }
79 EXPORT_SYMBOL(refcount_add_not_zero);
80 
81 /**
82  * refcount_add - add a value to a refcount
83  * @i: the value to add to the refcount
84  * @r: the refcount
85  *
86  * Similar to atomic_add(), but will saturate at UINT_MAX and WARN.
87  *
88  * Provides no memory ordering, it is assumed the caller has guaranteed the
89  * object memory to be stable (RCU, etc.). It does provide a control dependency
90  * and thereby orders future stores. See the comment on top.
91  *
92  * Use of this function is not recommended for the normal reference counting
93  * use case in which references are taken and released one at a time.  In these
94  * cases, refcount_inc(), or one of its variants, should instead be used to
95  * increment a reference count.
96  */
97 void refcount_add(unsigned int i, refcount_t *r)
98 {
99 	WARN_ONCE(!refcount_add_not_zero(i, r), "refcount_t: addition on 0; use-after-free.\n");
100 }
101 EXPORT_SYMBOL(refcount_add);
102 
103 /**
104  * refcount_inc_not_zero - increment a refcount unless it is 0
105  * @r: the refcount to increment
106  *
107  * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN.
108  *
109  * Provides no memory ordering, it is assumed the caller has guaranteed the
110  * object memory to be stable (RCU, etc.). It does provide a control dependency
111  * and thereby orders future stores. See the comment on top.
112  *
113  * Return: true if the increment was successful, false otherwise
114  */
115 bool refcount_inc_not_zero(refcount_t *r)
116 {
117 	unsigned int new, val = atomic_read(&r->refs);
118 
119 	do {
120 		new = val + 1;
121 
122 		if (!val)
123 			return false;
124 
125 		if (unlikely(!new))
126 			return true;
127 
128 	} while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new));
129 
130 	WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n");
131 
132 	return true;
133 }
134 EXPORT_SYMBOL(refcount_inc_not_zero);
135 
136 /**
137  * refcount_inc - increment a refcount
138  * @r: the refcount to increment
139  *
140  * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN.
141  *
142  * Provides no memory ordering, it is assumed the caller already has a
143  * reference on the object.
144  *
145  * Will WARN if the refcount is 0, as this represents a possible use-after-free
146  * condition.
147  */
148 void refcount_inc(refcount_t *r)
149 {
150 	WARN_ONCE(!refcount_inc_not_zero(r), "refcount_t: increment on 0; use-after-free.\n");
151 }
152 EXPORT_SYMBOL(refcount_inc);
153 
154 /**
155  * refcount_sub_and_test - subtract from a refcount and test if it is 0
156  * @i: amount to subtract from the refcount
157  * @r: the refcount
158  *
159  * Similar to atomic_dec_and_test(), but it will WARN, return false and
160  * ultimately leak on underflow and will fail to decrement when saturated
161  * at UINT_MAX.
162  *
163  * Provides release memory ordering, such that prior loads and stores are done
164  * before, and provides a control dependency such that free() must come after.
165  * See the comment on top.
166  *
167  * Use of this function is not recommended for the normal reference counting
168  * use case in which references are taken and released one at a time.  In these
169  * cases, refcount_dec(), or one of its variants, should instead be used to
170  * decrement a reference count.
171  *
172  * Return: true if the resulting refcount is 0, false otherwise
173  */
174 bool refcount_sub_and_test(unsigned int i, refcount_t *r)
175 {
176 	unsigned int new, val = atomic_read(&r->refs);
177 
178 	do {
179 		if (unlikely(val == UINT_MAX))
180 			return false;
181 
182 		new = val - i;
183 		if (new > val) {
184 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
185 			return false;
186 		}
187 
188 	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
189 
190 	return !new;
191 }
192 EXPORT_SYMBOL(refcount_sub_and_test);
193 
194 /**
195  * refcount_dec_and_test - decrement a refcount and test if it is 0
196  * @r: the refcount
197  *
198  * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
199  * decrement when saturated at UINT_MAX.
200  *
201  * Provides release memory ordering, such that prior loads and stores are done
202  * before, and provides a control dependency such that free() must come after.
203  * See the comment on top.
204  *
205  * Return: true if the resulting refcount is 0, false otherwise
206  */
207 bool refcount_dec_and_test(refcount_t *r)
208 {
209 	return refcount_sub_and_test(1, r);
210 }
211 EXPORT_SYMBOL(refcount_dec_and_test);
212 
213 /**
214  * refcount_dec - decrement a refcount
215  * @r: the refcount
216  *
217  * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
218  * when saturated at UINT_MAX.
219  *
220  * Provides release memory ordering, such that prior loads and stores are done
221  * before.
222  */
223 void refcount_dec(refcount_t *r)
224 {
225 	WARN_ONCE(refcount_dec_and_test(r), "refcount_t: decrement hit 0; leaking memory.\n");
226 }
227 EXPORT_SYMBOL(refcount_dec);
228 
229 /**
230  * refcount_dec_if_one - decrement a refcount if it is 1
231  * @r: the refcount
232  *
233  * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the
234  * success thereof.
235  *
236  * Like all decrement operations, it provides release memory order and provides
237  * a control dependency.
238  *
239  * It can be used like a try-delete operator; this explicit case is provided
240  * and not cmpxchg in generic, because that would allow implementing unsafe
241  * operations.
242  *
243  * Return: true if the resulting refcount is 0, false otherwise
244  */
245 bool refcount_dec_if_one(refcount_t *r)
246 {
247 	int val = 1;
248 
249 	return atomic_try_cmpxchg_release(&r->refs, &val, 0);
250 }
251 EXPORT_SYMBOL(refcount_dec_if_one);
252 
253 /**
254  * refcount_dec_not_one - decrement a refcount if it is not 1
255  * @r: the refcount
256  *
257  * No atomic_t counterpart, it decrements unless the value is 1, in which case
258  * it will return false.
259  *
260  * Was often done like: atomic_add_unless(&var, -1, 1)
261  *
262  * Return: true if the decrement operation was successful, false otherwise
263  */
264 bool refcount_dec_not_one(refcount_t *r)
265 {
266 	unsigned int new, val = atomic_read(&r->refs);
267 
268 	do {
269 		if (unlikely(val == UINT_MAX))
270 			return true;
271 
272 		if (val == 1)
273 			return false;
274 
275 		new = val - 1;
276 		if (new > val) {
277 			WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n");
278 			return true;
279 		}
280 
281 	} while (!atomic_try_cmpxchg_release(&r->refs, &val, new));
282 
283 	return true;
284 }
285 EXPORT_SYMBOL(refcount_dec_not_one);
286 
287 /**
288  * refcount_dec_and_mutex_lock - return holding mutex if able to decrement
289  *                               refcount to 0
290  * @r: the refcount
291  * @lock: the mutex to be locked
292  *
293  * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail
294  * to decrement when saturated at UINT_MAX.
295  *
296  * Provides release memory ordering, such that prior loads and stores are done
297  * before, and provides a control dependency such that free() must come after.
298  * See the comment on top.
299  *
300  * Return: true and hold mutex if able to decrement refcount to 0, false
301  *         otherwise
302  */
303 bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock)
304 {
305 	if (refcount_dec_not_one(r))
306 		return false;
307 
308 	mutex_lock(lock);
309 	if (!refcount_dec_and_test(r)) {
310 		mutex_unlock(lock);
311 		return false;
312 	}
313 
314 	return true;
315 }
316 EXPORT_SYMBOL(refcount_dec_and_mutex_lock);
317 
318 /**
319  * refcount_dec_and_lock - return holding spinlock if able to decrement
320  *                         refcount to 0
321  * @r: the refcount
322  * @lock: the spinlock to be locked
323  *
324  * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
325  * decrement when saturated at UINT_MAX.
326  *
327  * Provides release memory ordering, such that prior loads and stores are done
328  * before, and provides a control dependency such that free() must come after.
329  * See the comment on top.
330  *
331  * Return: true and hold spinlock if able to decrement refcount to 0, false
332  *         otherwise
333  */
334 bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
335 {
336 	if (refcount_dec_not_one(r))
337 		return false;
338 
339 	spin_lock(lock);
340 	if (!refcount_dec_and_test(r)) {
341 		spin_unlock(lock);
342 		return false;
343 	}
344 
345 	return true;
346 }
347 EXPORT_SYMBOL(refcount_dec_and_lock);
348 
349