xref: /openbmc/linux/lib/lockref.c (revision f94059f8)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/lockref.h>
4 
5 #if USE_CMPXCHG_LOCKREF
6 
7 /*
8  * Note that the "cmpxchg()" reloads the "old" value for the
9  * failure case.
10  */
11 #define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
12 	int retry = 100;							\
13 	struct lockref old;							\
14 	BUILD_BUG_ON(sizeof(old) != 8);						\
15 	old.lock_count = READ_ONCE(lockref->lock_count);			\
16 	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
17 		struct lockref new = old;					\
18 		CODE								\
19 		if (likely(try_cmpxchg64_relaxed(&lockref->lock_count,		\
20 						 &old.lock_count,		\
21 						 new.lock_count))) {		\
22 			SUCCESS;						\
23 		}								\
24 		if (!--retry)							\
25 			break;							\
26 		cpu_relax();							\
27 	}									\
28 } while (0)
29 
30 #else
31 
32 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
33 
34 #endif
35 
36 /**
37  * lockref_get - Increments reference count unconditionally
38  * @lockref: pointer to lockref structure
39  *
40  * This operation is only valid if you already hold a reference
41  * to the object, so you know the count cannot be zero.
42  */
43 void lockref_get(struct lockref *lockref)
44 {
45 	CMPXCHG_LOOP(
46 		new.count++;
47 	,
48 		return;
49 	);
50 
51 	spin_lock(&lockref->lock);
52 	lockref->count++;
53 	spin_unlock(&lockref->lock);
54 }
55 EXPORT_SYMBOL(lockref_get);
56 
57 /**
58  * lockref_get_not_zero - Increments count unless the count is 0 or dead
59  * @lockref: pointer to lockref structure
60  * Return: 1 if count updated successfully or 0 if count was zero
61  */
62 int lockref_get_not_zero(struct lockref *lockref)
63 {
64 	int retval;
65 
66 	CMPXCHG_LOOP(
67 		new.count++;
68 		if (old.count <= 0)
69 			return 0;
70 	,
71 		return 1;
72 	);
73 
74 	spin_lock(&lockref->lock);
75 	retval = 0;
76 	if (lockref->count > 0) {
77 		lockref->count++;
78 		retval = 1;
79 	}
80 	spin_unlock(&lockref->lock);
81 	return retval;
82 }
83 EXPORT_SYMBOL(lockref_get_not_zero);
84 
85 /**
86  * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
87  * @lockref: pointer to lockref structure
88  * Return: 1 if count updated successfully or 0 if count would become zero
89  */
90 int lockref_put_not_zero(struct lockref *lockref)
91 {
92 	int retval;
93 
94 	CMPXCHG_LOOP(
95 		new.count--;
96 		if (old.count <= 1)
97 			return 0;
98 	,
99 		return 1;
100 	);
101 
102 	spin_lock(&lockref->lock);
103 	retval = 0;
104 	if (lockref->count > 1) {
105 		lockref->count--;
106 		retval = 1;
107 	}
108 	spin_unlock(&lockref->lock);
109 	return retval;
110 }
111 EXPORT_SYMBOL(lockref_put_not_zero);
112 
113 /**
114  * lockref_get_or_lock - Increments count unless the count is 0 or dead
115  * @lockref: pointer to lockref structure
116  * Return: 1 if count updated successfully or 0 if count was zero
117  * and we got the lock instead.
118  */
119 int lockref_get_or_lock(struct lockref *lockref)
120 {
121 	CMPXCHG_LOOP(
122 		new.count++;
123 		if (old.count <= 0)
124 			break;
125 	,
126 		return 1;
127 	);
128 
129 	spin_lock(&lockref->lock);
130 	if (lockref->count <= 0)
131 		return 0;
132 	lockref->count++;
133 	spin_unlock(&lockref->lock);
134 	return 1;
135 }
136 EXPORT_SYMBOL(lockref_get_or_lock);
137 
138 /**
139  * lockref_put_return - Decrement reference count if possible
140  * @lockref: pointer to lockref structure
141  *
142  * Decrement the reference count and return the new value.
143  * If the lockref was dead or locked, return an error.
144  */
145 int lockref_put_return(struct lockref *lockref)
146 {
147 	CMPXCHG_LOOP(
148 		new.count--;
149 		if (old.count <= 0)
150 			return -1;
151 	,
152 		return new.count;
153 	);
154 	return -1;
155 }
156 EXPORT_SYMBOL(lockref_put_return);
157 
158 /**
159  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
160  * @lockref: pointer to lockref structure
161  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
162  */
163 int lockref_put_or_lock(struct lockref *lockref)
164 {
165 	CMPXCHG_LOOP(
166 		new.count--;
167 		if (old.count <= 1)
168 			break;
169 	,
170 		return 1;
171 	);
172 
173 	spin_lock(&lockref->lock);
174 	if (lockref->count <= 1)
175 		return 0;
176 	lockref->count--;
177 	spin_unlock(&lockref->lock);
178 	return 1;
179 }
180 EXPORT_SYMBOL(lockref_put_or_lock);
181 
182 /**
183  * lockref_mark_dead - mark lockref dead
184  * @lockref: pointer to lockref structure
185  */
186 void lockref_mark_dead(struct lockref *lockref)
187 {
188 	assert_spin_locked(&lockref->lock);
189 	lockref->count = -128;
190 }
191 EXPORT_SYMBOL(lockref_mark_dead);
192 
193 /**
194  * lockref_get_not_dead - Increments count unless the ref is dead
195  * @lockref: pointer to lockref structure
196  * Return: 1 if count updated successfully or 0 if lockref was dead
197  */
198 int lockref_get_not_dead(struct lockref *lockref)
199 {
200 	int retval;
201 
202 	CMPXCHG_LOOP(
203 		new.count++;
204 		if (old.count < 0)
205 			return 0;
206 	,
207 		return 1;
208 	);
209 
210 	spin_lock(&lockref->lock);
211 	retval = 0;
212 	if (lockref->count >= 0) {
213 		lockref->count++;
214 		retval = 1;
215 	}
216 	spin_unlock(&lockref->lock);
217 	return retval;
218 }
219 EXPORT_SYMBOL(lockref_get_not_dead);
220