xref: /openbmc/linux/include/linux/rculist_bl.h (revision ba61bb17)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RCULIST_BL_H
3 #define _LINUX_RCULIST_BL_H
4 
5 /*
6  * RCU-protected bl list version. See include/linux/list_bl.h.
7  */
8 #include <linux/list_bl.h>
9 #include <linux/rcupdate.h>
10 
11 static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
12 					struct hlist_bl_node *n)
13 {
14 	LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
15 	LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
16 							LIST_BL_LOCKMASK);
17 	rcu_assign_pointer(h->first,
18 		(struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
19 }
20 
21 static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
22 {
23 	return (struct hlist_bl_node *)
24 		((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
25 }
26 
27 /**
28  * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
29  * @n: the element to delete from the hash list.
30  *
31  * Note: hlist_bl_unhashed() on the node returns true after this. It is
32  * useful for RCU based read lockfree traversal if the writer side
33  * must know if the list entry is still hashed or already unhashed.
34  *
35  * In particular, it means that we can not poison the forward pointers
36  * that may still be used for walking the hash list and we can only
37  * zero the pprev pointer so list_unhashed() will return true after
38  * this.
39  *
40  * The caller must take whatever precautions are necessary (such as
41  * holding appropriate locks) to avoid racing with another
42  * list-mutation primitive, such as hlist_bl_add_head_rcu() or
43  * hlist_bl_del_rcu(), running on this same list.  However, it is
44  * perfectly legal to run concurrently with the _rcu list-traversal
45  * primitives, such as hlist_bl_for_each_entry_rcu().
46  */
47 static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
48 {
49 	if (!hlist_bl_unhashed(n)) {
50 		__hlist_bl_del(n);
51 		n->pprev = NULL;
52 	}
53 }
54 
55 /**
56  * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
57  * @n: the element to delete from the hash list.
58  *
59  * Note: hlist_bl_unhashed() on entry does not return true after this,
60  * the entry is in an undefined state. It is useful for RCU based
61  * lockfree traversal.
62  *
63  * In particular, it means that we can not poison the forward
64  * pointers that may still be used for walking the hash list.
65  *
66  * The caller must take whatever precautions are necessary
67  * (such as holding appropriate locks) to avoid racing
68  * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
69  * or hlist_bl_del_rcu(), running on this same list.
70  * However, it is perfectly legal to run concurrently with
71  * the _rcu list-traversal primitives, such as
72  * hlist_bl_for_each_entry().
73  */
74 static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
75 {
76 	__hlist_bl_del(n);
77 	n->pprev = LIST_POISON2;
78 }
79 
80 /**
81  * hlist_bl_add_head_rcu
82  * @n: the element to add to the hash list.
83  * @h: the list to add to.
84  *
85  * Description:
86  * Adds the specified element to the specified hlist_bl,
87  * while permitting racing traversals.
88  *
89  * The caller must take whatever precautions are necessary
90  * (such as holding appropriate locks) to avoid racing
91  * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
92  * or hlist_bl_del_rcu(), running on this same list.
93  * However, it is perfectly legal to run concurrently with
94  * the _rcu list-traversal primitives, such as
95  * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
96  * problems on Alpha CPUs.  Regardless of the type of CPU, the
97  * list-traversal primitive must be guarded by rcu_read_lock().
98  */
99 static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
100 					struct hlist_bl_head *h)
101 {
102 	struct hlist_bl_node *first;
103 
104 	/* don't need hlist_bl_first_rcu because we're under lock */
105 	first = hlist_bl_first(h);
106 
107 	n->next = first;
108 	if (first)
109 		first->pprev = &n->next;
110 	n->pprev = &h->first;
111 
112 	/* need _rcu because we can have concurrent lock free readers */
113 	hlist_bl_set_first_rcu(h, n);
114 }
115 /**
116  * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
117  * @tpos:	the type * to use as a loop cursor.
118  * @pos:	the &struct hlist_bl_node to use as a loop cursor.
119  * @head:	the head for your list.
120  * @member:	the name of the hlist_bl_node within the struct.
121  *
122  */
123 #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member)		\
124 	for (pos = hlist_bl_first_rcu(head);				\
125 		pos &&							\
126 		({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
127 		pos = rcu_dereference_raw(pos->next))
128 
129 #endif
130