xref: /openbmc/linux/include/linux/kref.h (revision 55716d26)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * kref.h - library routines for handling generic reference counted objects
4  *
5  * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
6  * Copyright (C) 2004 IBM Corp.
7  *
8  * based on kobject.h which was:
9  * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
10  * Copyright (C) 2002-2003 Open Source Development Labs
11  */
12 
13 #ifndef _KREF_H_
14 #define _KREF_H_
15 
16 #include <linux/spinlock.h>
17 #include <linux/refcount.h>
18 
19 struct kref {
20 	refcount_t refcount;
21 };
22 
23 #define KREF_INIT(n)	{ .refcount = REFCOUNT_INIT(n), }
24 
25 /**
26  * kref_init - initialize object.
27  * @kref: object in question.
28  */
kref_init(struct kref * kref)29 static inline void kref_init(struct kref *kref)
30 {
31 	refcount_set(&kref->refcount, 1);
32 }
33 
kref_read(const struct kref * kref)34 static inline unsigned int kref_read(const struct kref *kref)
35 {
36 	return refcount_read(&kref->refcount);
37 }
38 
39 /**
40  * kref_get - increment refcount for object.
41  * @kref: object.
42  */
kref_get(struct kref * kref)43 static inline void kref_get(struct kref *kref)
44 {
45 	refcount_inc(&kref->refcount);
46 }
47 
48 /**
49  * kref_put - decrement refcount for object.
50  * @kref: object.
51  * @release: pointer to the function that will clean up the object when the
52  *	     last reference to the object is released.
53  *	     This pointer is required, and it is not acceptable to pass kfree
54  *	     in as this function.
55  *
56  * Decrement the refcount, and if 0, call release().
57  * Return 1 if the object was removed, otherwise return 0.  Beware, if this
58  * function returns 0, you still can not count on the kref from remaining in
59  * memory.  Only use the return value if you want to see if the kref is now
60  * gone, not present.
61  */
kref_put(struct kref * kref,void (* release)(struct kref * kref))62 static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63 {
64 	if (refcount_dec_and_test(&kref->refcount)) {
65 		release(kref);
66 		return 1;
67 	}
68 	return 0;
69 }
70 
kref_put_mutex(struct kref * kref,void (* release)(struct kref * kref),struct mutex * lock)71 static inline int kref_put_mutex(struct kref *kref,
72 				 void (*release)(struct kref *kref),
73 				 struct mutex *lock)
74 {
75 	if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
76 		release(kref);
77 		return 1;
78 	}
79 	return 0;
80 }
81 
kref_put_lock(struct kref * kref,void (* release)(struct kref * kref),spinlock_t * lock)82 static inline int kref_put_lock(struct kref *kref,
83 				void (*release)(struct kref *kref),
84 				spinlock_t *lock)
85 {
86 	if (refcount_dec_and_lock(&kref->refcount, lock)) {
87 		release(kref);
88 		return 1;
89 	}
90 	return 0;
91 }
92 
93 /**
94  * kref_get_unless_zero - Increment refcount for object unless it is zero.
95  * @kref: object.
96  *
97  * Return non-zero if the increment succeeded. Otherwise return 0.
98  *
99  * This function is intended to simplify locking around refcounting for
100  * objects that can be looked up from a lookup structure, and which are
101  * removed from that lookup structure in the object destructor.
102  * Operations on such objects require at least a read lock around
103  * lookup + kref_get, and a write lock around kref_put + remove from lookup
104  * structure. Furthermore, RCU implementations become extremely tricky.
105  * With a lookup followed by a kref_get_unless_zero *with return value check*
106  * locking in the kref_put path can be deferred to the actual removal from
107  * the lookup structure and RCU lookups become trivial.
108  */
kref_get_unless_zero(struct kref * kref)109 static inline int __must_check kref_get_unless_zero(struct kref *kref)
110 {
111 	return refcount_inc_not_zero(&kref->refcount);
112 }
113 #endif /* _KREF_H_ */
114