xref: /openbmc/linux/kernel/rcu/rcu_segcblist.h (revision e3d786a3)
1 /*
2  * RCU segmented callback lists, internal-to-rcu header file
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright IBM Corporation, 2017
19  *
20  * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21  */
22 
23 #include <linux/rcu_segcblist.h>
24 
25 /*
26  * Account for the fact that a previously dequeued callback turned out
27  * to be marked as lazy.
28  */
29 static inline void rcu_cblist_dequeued_lazy(struct rcu_cblist *rclp)
30 {
31 	rclp->len_lazy--;
32 }
33 
34 void rcu_cblist_init(struct rcu_cblist *rclp);
35 struct rcu_head *rcu_cblist_dequeue(struct rcu_cblist *rclp);
36 
37 /*
38  * Is the specified rcu_segcblist structure empty?
39  *
40  * But careful!  The fact that the ->head field is NULL does not
41  * necessarily imply that there are no callbacks associated with
42  * this structure.  When callbacks are being invoked, they are
43  * removed as a group.  If callback invocation must be preempted,
44  * the remaining callbacks will be added back to the list.  Either
45  * way, the counts are updated later.
46  *
47  * So it is often the case that rcu_segcblist_n_cbs() should be used
48  * instead.
49  */
50 static inline bool rcu_segcblist_empty(struct rcu_segcblist *rsclp)
51 {
52 	return !rsclp->head;
53 }
54 
55 /* Return number of callbacks in segmented callback list. */
56 static inline long rcu_segcblist_n_cbs(struct rcu_segcblist *rsclp)
57 {
58 	return READ_ONCE(rsclp->len);
59 }
60 
61 /* Return number of lazy callbacks in segmented callback list. */
62 static inline long rcu_segcblist_n_lazy_cbs(struct rcu_segcblist *rsclp)
63 {
64 	return rsclp->len_lazy;
65 }
66 
67 /* Return number of lazy callbacks in segmented callback list. */
68 static inline long rcu_segcblist_n_nonlazy_cbs(struct rcu_segcblist *rsclp)
69 {
70 	return rsclp->len - rsclp->len_lazy;
71 }
72 
73 /*
74  * Is the specified rcu_segcblist enabled, for example, not corresponding
75  * to an offline or callback-offloaded CPU?
76  */
77 static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
78 {
79 	return !!rsclp->tails[RCU_NEXT_TAIL];
80 }
81 
82 /*
83  * Are all segments following the specified segment of the specified
84  * rcu_segcblist structure empty of callbacks?  (The specified
85  * segment might well contain callbacks.)
86  */
87 static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
88 {
89 	return !*rsclp->tails[seg];
90 }
91 
92 /*
93  * Interim function to return rcu_segcblist head pointer.  Longer term, the
94  * rcu_segcblist will be used more pervasively, removing the need for this
95  * function.
96  */
97 static inline struct rcu_head *rcu_segcblist_head(struct rcu_segcblist *rsclp)
98 {
99 	return rsclp->head;
100 }
101 
102 /*
103  * Interim function to return rcu_segcblist head pointer.  Longer term, the
104  * rcu_segcblist will be used more pervasively, removing the need for this
105  * function.
106  */
107 static inline struct rcu_head **rcu_segcblist_tail(struct rcu_segcblist *rsclp)
108 {
109 	WARN_ON_ONCE(rcu_segcblist_empty(rsclp));
110 	return rsclp->tails[RCU_NEXT_TAIL];
111 }
112 
113 void rcu_segcblist_init(struct rcu_segcblist *rsclp);
114 void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
115 bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
116 bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
117 struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
118 struct rcu_head *rcu_segcblist_first_pend_cb(struct rcu_segcblist *rsclp);
119 void rcu_segcblist_enqueue(struct rcu_segcblist *rsclp,
120 			   struct rcu_head *rhp, bool lazy);
121 bool rcu_segcblist_entrain(struct rcu_segcblist *rsclp,
122 			   struct rcu_head *rhp, bool lazy);
123 void rcu_segcblist_extract_count(struct rcu_segcblist *rsclp,
124 				 struct rcu_cblist *rclp);
125 void rcu_segcblist_extract_done_cbs(struct rcu_segcblist *rsclp,
126 				    struct rcu_cblist *rclp);
127 void rcu_segcblist_extract_pend_cbs(struct rcu_segcblist *rsclp,
128 				    struct rcu_cblist *rclp);
129 void rcu_segcblist_insert_count(struct rcu_segcblist *rsclp,
130 				struct rcu_cblist *rclp);
131 void rcu_segcblist_insert_done_cbs(struct rcu_segcblist *rsclp,
132 				   struct rcu_cblist *rclp);
133 void rcu_segcblist_insert_pend_cbs(struct rcu_segcblist *rsclp,
134 				   struct rcu_cblist *rclp);
135 void rcu_segcblist_advance(struct rcu_segcblist *rsclp, unsigned long seq);
136 bool rcu_segcblist_accelerate(struct rcu_segcblist *rsclp, unsigned long seq);
137 void rcu_segcblist_merge(struct rcu_segcblist *dst_rsclp,
138 			 struct rcu_segcblist *src_rsclp);
139