xref: /openbmc/linux/lib/lockref.c (revision 1802d0be)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/lockref.h>
4 
5 #if USE_CMPXCHG_LOCKREF
6 
7 /*
8  * Note that the "cmpxchg()" reloads the "old" value for the
9  * failure case.
10  */
11 #define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
12 	struct lockref old;							\
13 	BUILD_BUG_ON(sizeof(old) != 8);						\
14 	old.lock_count = READ_ONCE(lockref->lock_count);			\
15 	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
16 		struct lockref new = old, prev = old;				\
17 		CODE								\
18 		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
19 						   old.lock_count,		\
20 						   new.lock_count);		\
21 		if (likely(old.lock_count == prev.lock_count)) {		\
22 			SUCCESS;						\
23 		}								\
24 		cpu_relax();							\
25 	}									\
26 } while (0)
27 
28 #else
29 
30 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
31 
32 #endif
33 
34 /**
35  * lockref_get - Increments reference count unconditionally
36  * @lockref: pointer to lockref structure
37  *
38  * This operation is only valid if you already hold a reference
39  * to the object, so you know the count cannot be zero.
40  */
41 void lockref_get(struct lockref *lockref)
42 {
43 	CMPXCHG_LOOP(
44 		new.count++;
45 	,
46 		return;
47 	);
48 
49 	spin_lock(&lockref->lock);
50 	lockref->count++;
51 	spin_unlock(&lockref->lock);
52 }
53 EXPORT_SYMBOL(lockref_get);
54 
55 /**
56  * lockref_get_not_zero - Increments count unless the count is 0 or dead
57  * @lockref: pointer to lockref structure
58  * Return: 1 if count updated successfully or 0 if count was zero
59  */
60 int lockref_get_not_zero(struct lockref *lockref)
61 {
62 	int retval;
63 
64 	CMPXCHG_LOOP(
65 		new.count++;
66 		if (old.count <= 0)
67 			return 0;
68 	,
69 		return 1;
70 	);
71 
72 	spin_lock(&lockref->lock);
73 	retval = 0;
74 	if (lockref->count > 0) {
75 		lockref->count++;
76 		retval = 1;
77 	}
78 	spin_unlock(&lockref->lock);
79 	return retval;
80 }
81 EXPORT_SYMBOL(lockref_get_not_zero);
82 
83 /**
84  * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
85  * @lockref: pointer to lockref structure
86  * Return: 1 if count updated successfully or 0 if count would become zero
87  */
88 int lockref_put_not_zero(struct lockref *lockref)
89 {
90 	int retval;
91 
92 	CMPXCHG_LOOP(
93 		new.count--;
94 		if (old.count <= 1)
95 			return 0;
96 	,
97 		return 1;
98 	);
99 
100 	spin_lock(&lockref->lock);
101 	retval = 0;
102 	if (lockref->count > 1) {
103 		lockref->count--;
104 		retval = 1;
105 	}
106 	spin_unlock(&lockref->lock);
107 	return retval;
108 }
109 EXPORT_SYMBOL(lockref_put_not_zero);
110 
111 /**
112  * lockref_get_or_lock - Increments count unless the count is 0 or dead
113  * @lockref: pointer to lockref structure
114  * Return: 1 if count updated successfully or 0 if count was zero
115  * and we got the lock instead.
116  */
117 int lockref_get_or_lock(struct lockref *lockref)
118 {
119 	CMPXCHG_LOOP(
120 		new.count++;
121 		if (old.count <= 0)
122 			break;
123 	,
124 		return 1;
125 	);
126 
127 	spin_lock(&lockref->lock);
128 	if (lockref->count <= 0)
129 		return 0;
130 	lockref->count++;
131 	spin_unlock(&lockref->lock);
132 	return 1;
133 }
134 EXPORT_SYMBOL(lockref_get_or_lock);
135 
136 /**
137  * lockref_put_return - Decrement reference count if possible
138  * @lockref: pointer to lockref structure
139  *
140  * Decrement the reference count and return the new value.
141  * If the lockref was dead or locked, return an error.
142  */
143 int lockref_put_return(struct lockref *lockref)
144 {
145 	CMPXCHG_LOOP(
146 		new.count--;
147 		if (old.count <= 0)
148 			return -1;
149 	,
150 		return new.count;
151 	);
152 	return -1;
153 }
154 EXPORT_SYMBOL(lockref_put_return);
155 
156 /**
157  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
158  * @lockref: pointer to lockref structure
159  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
160  */
161 int lockref_put_or_lock(struct lockref *lockref)
162 {
163 	CMPXCHG_LOOP(
164 		new.count--;
165 		if (old.count <= 1)
166 			break;
167 	,
168 		return 1;
169 	);
170 
171 	spin_lock(&lockref->lock);
172 	if (lockref->count <= 1)
173 		return 0;
174 	lockref->count--;
175 	spin_unlock(&lockref->lock);
176 	return 1;
177 }
178 EXPORT_SYMBOL(lockref_put_or_lock);
179 
180 /**
181  * lockref_mark_dead - mark lockref dead
182  * @lockref: pointer to lockref structure
183  */
184 void lockref_mark_dead(struct lockref *lockref)
185 {
186 	assert_spin_locked(&lockref->lock);
187 	lockref->count = -128;
188 }
189 EXPORT_SYMBOL(lockref_mark_dead);
190 
191 /**
192  * lockref_get_not_dead - Increments count unless the ref is dead
193  * @lockref: pointer to lockref structure
194  * Return: 1 if count updated successfully or 0 if lockref was dead
195  */
196 int lockref_get_not_dead(struct lockref *lockref)
197 {
198 	int retval;
199 
200 	CMPXCHG_LOOP(
201 		new.count++;
202 		if (old.count < 0)
203 			return 0;
204 	,
205 		return 1;
206 	);
207 
208 	spin_lock(&lockref->lock);
209 	retval = 0;
210 	if (lockref->count >= 0) {
211 		lockref->count++;
212 		retval = 1;
213 	}
214 	spin_unlock(&lockref->lock);
215 	return retval;
216 }
217 EXPORT_SYMBOL(lockref_get_not_dead);
218