1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _LINUX_RCUREF_H
3 #define _LINUX_RCUREF_H
4
5 #include <linux/atomic.h>
6 #include <linux/bug.h>
7 #include <linux/limits.h>
8 #include <linux/lockdep.h>
9 #include <linux/preempt.h>
10 #include <linux/rcupdate.h>
11
12 #define RCUREF_ONEREF 0x00000000U
13 #define RCUREF_MAXREF 0x7FFFFFFFU
14 #define RCUREF_SATURATED 0xA0000000U
15 #define RCUREF_RELEASED 0xC0000000U
16 #define RCUREF_DEAD 0xE0000000U
17 #define RCUREF_NOREF 0xFFFFFFFFU
18
19 /**
20 * rcuref_init - Initialize a rcuref reference count with the given reference count
21 * @ref: Pointer to the reference count
22 * @cnt: The initial reference count typically '1'
23 */
rcuref_init(rcuref_t * ref,unsigned int cnt)24 static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
25 {
26 atomic_set(&ref->refcnt, cnt - 1);
27 }
28
29 /**
30 * rcuref_read - Read the number of held reference counts of a rcuref
31 * @ref: Pointer to the reference count
32 *
33 * Return: The number of held references (0 ... N)
34 */
rcuref_read(rcuref_t * ref)35 static inline unsigned int rcuref_read(rcuref_t *ref)
36 {
37 unsigned int c = atomic_read(&ref->refcnt);
38
39 /* Return 0 if within the DEAD zone. */
40 return c >= RCUREF_RELEASED ? 0 : c + 1;
41 }
42
43 extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
44
45 /**
46 * rcuref_get - Acquire one reference on a rcuref reference count
47 * @ref: Pointer to the reference count
48 *
49 * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
50 *
51 * Provides no memory ordering, it is assumed the caller has guaranteed the
52 * object memory to be stable (RCU, etc.). It does provide a control dependency
53 * and thereby orders future stores. See documentation in lib/rcuref.c
54 *
55 * Return:
56 * False if the attempt to acquire a reference failed. This happens
57 * when the last reference has been put already
58 *
59 * True if a reference was successfully acquired
60 */
rcuref_get(rcuref_t * ref)61 static inline __must_check bool rcuref_get(rcuref_t *ref)
62 {
63 /*
64 * Unconditionally increase the reference count. The saturation and
65 * dead zones provide enough tolerance for this.
66 */
67 if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
68 return true;
69
70 /* Handle the cases inside the saturation and dead zones */
71 return rcuref_get_slowpath(ref);
72 }
73
74 extern __must_check bool rcuref_put_slowpath(rcuref_t *ref, unsigned int cnt);
75
76 /*
77 * Internal helper. Do not invoke directly.
78 */
__rcuref_put(rcuref_t * ref)79 static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
80 {
81 int cnt;
82
83 RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
84 "suspicious rcuref_put_rcusafe() usage");
85 /*
86 * Unconditionally decrease the reference count. The saturation and
87 * dead zones provide enough tolerance for this.
88 */
89 cnt = atomic_sub_return_release(1, &ref->refcnt);
90 if (likely(cnt >= 0))
91 return false;
92
93 /*
94 * Handle the last reference drop and cases inside the saturation
95 * and dead zones.
96 */
97 return rcuref_put_slowpath(ref, cnt);
98 }
99
100 /**
101 * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
102 * @ref: Pointer to the reference count
103 *
104 * Provides release memory ordering, such that prior loads and stores are done
105 * before, and provides an acquire ordering on success such that free()
106 * must come after.
107 *
108 * Can be invoked from contexts, which guarantee that no grace period can
109 * happen which would free the object concurrently if the decrement drops
110 * the last reference and the slowpath races against a concurrent get() and
111 * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
112 *
113 * Return:
114 * True if this was the last reference with no future references
115 * possible. This signals the caller that it can safely release the
116 * object which is protected by the reference counter.
117 *
118 * False if there are still active references or the put() raced
119 * with a concurrent get()/put() pair. Caller is not allowed to
120 * release the protected object.
121 */
rcuref_put_rcusafe(rcuref_t * ref)122 static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
123 {
124 return __rcuref_put(ref);
125 }
126
127 /**
128 * rcuref_put -- Release one reference for a rcuref reference count
129 * @ref: Pointer to the reference count
130 *
131 * Can be invoked from any context.
132 *
133 * Provides release memory ordering, such that prior loads and stores are done
134 * before, and provides an acquire ordering on success such that free()
135 * must come after.
136 *
137 * Return:
138 *
139 * True if this was the last reference with no future references
140 * possible. This signals the caller that it can safely schedule the
141 * object, which is protected by the reference counter, for
142 * deconstruction.
143 *
144 * False if there are still active references or the put() raced
145 * with a concurrent get()/put() pair. Caller is not allowed to
146 * deconstruct the protected object.
147 */
rcuref_put(rcuref_t * ref)148 static inline __must_check bool rcuref_put(rcuref_t *ref)
149 {
150 bool released;
151
152 preempt_disable();
153 released = __rcuref_put(ref);
154 preempt_enable();
155 return released;
156 }
157
158 #endif
159