xref: /openbmc/linux/include/linux/lockref.h (revision e6c81cce)
1 #ifndef __LINUX_LOCKREF_H
2 #define __LINUX_LOCKREF_H
3 
4 /*
5  * Locked reference counts.
6  *
7  * These are different from just plain atomic refcounts in that they
8  * are atomic with respect to the spinlock that goes with them.  In
9  * particular, there can be implementations that don't actually get
10  * the spinlock for the common decrement/increment operations, but they
11  * still have to check that the operation is done semantically as if
12  * the spinlock had been taken (using a cmpxchg operation that covers
13  * both the lock and the count word, or using memory transactions, for
14  * example).
15  */
16 
17 #include <linux/spinlock.h>
18 #include <generated/bounds.h>
19 
20 #define USE_CMPXCHG_LOCKREF \
21 	(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
22 	 IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
23 
24 struct lockref {
25 	union {
26 #if USE_CMPXCHG_LOCKREF
27 		aligned_u64 lock_count;
28 #endif
29 		struct {
30 			spinlock_t lock;
31 			int count;
32 		};
33 	};
34 };
35 
36 extern void lockref_get(struct lockref *);
37 extern int lockref_put_return(struct lockref *);
38 extern int lockref_get_not_zero(struct lockref *);
39 extern int lockref_get_or_lock(struct lockref *);
40 extern int lockref_put_or_lock(struct lockref *);
41 
42 extern void lockref_mark_dead(struct lockref *);
43 extern int lockref_get_not_dead(struct lockref *);
44 
45 /* Must be called under spinlock for reliable results */
46 static inline int __lockref_is_dead(const struct lockref *l)
47 {
48 	return ((int)l->count < 0);
49 }
50 
51 #endif /* __LINUX_LOCKREF_H */
52