xref: /openbmc/linux/lib/lockref.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/export.h>
3 #include <linux/lockref.h>
4 
5 #if USE_CMPXCHG_LOCKREF
6 
7 /*
8  * Note that the "cmpxchg()" reloads the "old" value for the
9  * failure case.
10  */
11 #define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
12 	int retry = 100;							\
13 	struct lockref old;							\
14 	BUILD_BUG_ON(sizeof(old) != 8);						\
15 	old.lock_count = READ_ONCE(lockref->lock_count);			\
16 	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
17 		struct lockref new = old, prev = old;				\
18 		CODE								\
19 		old.lock_count = cmpxchg64_relaxed(&lockref->lock_count,	\
20 						   old.lock_count,		\
21 						   new.lock_count);		\
22 		if (likely(old.lock_count == prev.lock_count)) {		\
23 			SUCCESS;						\
24 		}								\
25 		if (!--retry)							\
26 			break;							\
27 		cpu_relax();							\
28 	}									\
29 } while (0)
30 
31 #else
32 
33 #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
34 
35 #endif
36 
37 /**
38  * lockref_get - Increments reference count unconditionally
39  * @lockref: pointer to lockref structure
40  *
41  * This operation is only valid if you already hold a reference
42  * to the object, so you know the count cannot be zero.
43  */
44 void lockref_get(struct lockref *lockref)
45 {
46 	CMPXCHG_LOOP(
47 		new.count++;
48 	,
49 		return;
50 	);
51 
52 	spin_lock(&lockref->lock);
53 	lockref->count++;
54 	spin_unlock(&lockref->lock);
55 }
56 EXPORT_SYMBOL(lockref_get);
57 
58 /**
59  * lockref_get_not_zero - Increments count unless the count is 0 or dead
60  * @lockref: pointer to lockref structure
61  * Return: 1 if count updated successfully or 0 if count was zero
62  */
63 int lockref_get_not_zero(struct lockref *lockref)
64 {
65 	int retval;
66 
67 	CMPXCHG_LOOP(
68 		new.count++;
69 		if (old.count <= 0)
70 			return 0;
71 	,
72 		return 1;
73 	);
74 
75 	spin_lock(&lockref->lock);
76 	retval = 0;
77 	if (lockref->count > 0) {
78 		lockref->count++;
79 		retval = 1;
80 	}
81 	spin_unlock(&lockref->lock);
82 	return retval;
83 }
84 EXPORT_SYMBOL(lockref_get_not_zero);
85 
86 /**
87  * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
88  * @lockref: pointer to lockref structure
89  * Return: 1 if count updated successfully or 0 if count would become zero
90  */
91 int lockref_put_not_zero(struct lockref *lockref)
92 {
93 	int retval;
94 
95 	CMPXCHG_LOOP(
96 		new.count--;
97 		if (old.count <= 1)
98 			return 0;
99 	,
100 		return 1;
101 	);
102 
103 	spin_lock(&lockref->lock);
104 	retval = 0;
105 	if (lockref->count > 1) {
106 		lockref->count--;
107 		retval = 1;
108 	}
109 	spin_unlock(&lockref->lock);
110 	return retval;
111 }
112 EXPORT_SYMBOL(lockref_put_not_zero);
113 
114 /**
115  * lockref_get_or_lock - Increments count unless the count is 0 or dead
116  * @lockref: pointer to lockref structure
117  * Return: 1 if count updated successfully or 0 if count was zero
118  * and we got the lock instead.
119  */
120 int lockref_get_or_lock(struct lockref *lockref)
121 {
122 	CMPXCHG_LOOP(
123 		new.count++;
124 		if (old.count <= 0)
125 			break;
126 	,
127 		return 1;
128 	);
129 
130 	spin_lock(&lockref->lock);
131 	if (lockref->count <= 0)
132 		return 0;
133 	lockref->count++;
134 	spin_unlock(&lockref->lock);
135 	return 1;
136 }
137 EXPORT_SYMBOL(lockref_get_or_lock);
138 
139 /**
140  * lockref_put_return - Decrement reference count if possible
141  * @lockref: pointer to lockref structure
142  *
143  * Decrement the reference count and return the new value.
144  * If the lockref was dead or locked, return an error.
145  */
146 int lockref_put_return(struct lockref *lockref)
147 {
148 	CMPXCHG_LOOP(
149 		new.count--;
150 		if (old.count <= 0)
151 			return -1;
152 	,
153 		return new.count;
154 	);
155 	return -1;
156 }
157 EXPORT_SYMBOL(lockref_put_return);
158 
159 /**
160  * lockref_put_or_lock - decrements count unless count <= 1 before decrement
161  * @lockref: pointer to lockref structure
162  * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
163  */
164 int lockref_put_or_lock(struct lockref *lockref)
165 {
166 	CMPXCHG_LOOP(
167 		new.count--;
168 		if (old.count <= 1)
169 			break;
170 	,
171 		return 1;
172 	);
173 
174 	spin_lock(&lockref->lock);
175 	if (lockref->count <= 1)
176 		return 0;
177 	lockref->count--;
178 	spin_unlock(&lockref->lock);
179 	return 1;
180 }
181 EXPORT_SYMBOL(lockref_put_or_lock);
182 
183 /**
184  * lockref_mark_dead - mark lockref dead
185  * @lockref: pointer to lockref structure
186  */
187 void lockref_mark_dead(struct lockref *lockref)
188 {
189 	assert_spin_locked(&lockref->lock);
190 	lockref->count = -128;
191 }
192 EXPORT_SYMBOL(lockref_mark_dead);
193 
194 /**
195  * lockref_get_not_dead - Increments count unless the ref is dead
196  * @lockref: pointer to lockref structure
197  * Return: 1 if count updated successfully or 0 if lockref was dead
198  */
199 int lockref_get_not_dead(struct lockref *lockref)
200 {
201 	int retval;
202 
203 	CMPXCHG_LOOP(
204 		new.count++;
205 		if (old.count < 0)
206 			return 0;
207 	,
208 		return 1;
209 	);
210 
211 	spin_lock(&lockref->lock);
212 	retval = 0;
213 	if (lockref->count >= 0) {
214 		lockref->count++;
215 		retval = 1;
216 	}
217 	spin_unlock(&lockref->lock);
218 	return retval;
219 }
220 EXPORT_SYMBOL(lockref_get_not_dead);
221