xref: /openbmc/linux/include/linux/rcutiny.h (revision 43a89bae)
16c442127SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
29b1d82faSPaul E. McKenney /*
39b1d82faSPaul E. McKenney  * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
49b1d82faSPaul E. McKenney  *
59b1d82faSPaul E. McKenney  * Copyright IBM Corporation, 2008
69b1d82faSPaul E. McKenney  *
76c442127SPaul E. McKenney  * Author: Paul E. McKenney <paulmck@linux.ibm.com>
89b1d82faSPaul E. McKenney  *
99b1d82faSPaul E. McKenney  * For detailed explanation of Read-Copy Update mechanism see -
109b1d82faSPaul E. McKenney  *		Documentation/RCU
119b1d82faSPaul E. McKenney  */
129b1d82faSPaul E. McKenney #ifndef __LINUX_TINY_H
139b1d82faSPaul E. McKenney #define __LINUX_TINY_H
149b1d82faSPaul E. McKenney 
1524691069SChristoph Hellwig #include <asm/param.h> /* for HZ */
169b1d82faSPaul E. McKenney 
1791a967fdSPaul E. McKenney struct rcu_gp_oldstate {
1891a967fdSPaul E. McKenney 	unsigned long rgos_norm;
1991a967fdSPaul E. McKenney };
2091a967fdSPaul E. McKenney 
2118538248SPaul E. McKenney // Maximum number of rcu_gp_oldstate values corresponding to
2218538248SPaul E. McKenney // not-yet-completed RCU grace periods.
2318538248SPaul E. McKenney #define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
2418538248SPaul E. McKenney 
2518538248SPaul E. McKenney /*
2618538248SPaul E. McKenney  * Are the two oldstate values the same?  See the Tree RCU version for
2718538248SPaul E. McKenney  * docbook header.
2818538248SPaul E. McKenney  */
same_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp1,struct rcu_gp_oldstate * rgosp2)2918538248SPaul E. McKenney static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
3018538248SPaul E. McKenney 						   struct rcu_gp_oldstate *rgosp2)
3118538248SPaul E. McKenney {
3218538248SPaul E. McKenney 	return rgosp1->rgos_norm == rgosp2->rgos_norm;
3318538248SPaul E. McKenney }
3418538248SPaul E. McKenney 
350909fc2bSPaul E. McKenney unsigned long get_state_synchronize_rcu(void);
363fdefca9SPaul E. McKenney 
get_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)373fdefca9SPaul E. McKenney static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
383fdefca9SPaul E. McKenney {
393fdefca9SPaul E. McKenney 	rgosp->rgos_norm = get_state_synchronize_rcu();
403fdefca9SPaul E. McKenney }
413fdefca9SPaul E. McKenney 
420909fc2bSPaul E. McKenney unsigned long start_poll_synchronize_rcu(void);
4376ea3641SPaul E. McKenney 
start_poll_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)4476ea3641SPaul E. McKenney static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
4576ea3641SPaul E. McKenney {
4676ea3641SPaul E. McKenney 	rgosp->rgos_norm = start_poll_synchronize_rcu();
4776ea3641SPaul E. McKenney }
4876ea3641SPaul E. McKenney 
490909fc2bSPaul E. McKenney bool poll_state_synchronize_rcu(unsigned long oldstate);
50765a3f4fSPaul E. McKenney 
poll_state_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)5191a967fdSPaul E. McKenney static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
5291a967fdSPaul E. McKenney {
5391a967fdSPaul E. McKenney 	return poll_state_synchronize_rcu(rgosp->rgos_norm);
5491a967fdSPaul E. McKenney }
5591a967fdSPaul E. McKenney 
cond_synchronize_rcu(unsigned long oldstate)56765a3f4fSPaul E. McKenney static inline void cond_synchronize_rcu(unsigned long oldstate)
57765a3f4fSPaul E. McKenney {
58765a3f4fSPaul E. McKenney 	might_sleep();
59765a3f4fSPaul E. McKenney }
60765a3f4fSPaul E. McKenney 
cond_synchronize_rcu_full(struct rcu_gp_oldstate * rgosp)61b6fe4917SPaul E. McKenney static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
62b6fe4917SPaul E. McKenney {
63b6fe4917SPaul E. McKenney 	cond_synchronize_rcu(rgosp->rgos_norm);
64b6fe4917SPaul E. McKenney }
65b6fe4917SPaul E. McKenney 
start_poll_synchronize_rcu_expedited(void)66d96c52feSPaul E. McKenney static inline unsigned long start_poll_synchronize_rcu_expedited(void)
67d96c52feSPaul E. McKenney {
68d96c52feSPaul E. McKenney 	return start_poll_synchronize_rcu();
69d96c52feSPaul E. McKenney }
70d96c52feSPaul E. McKenney 
start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)716c502b14SPaul E. McKenney static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
726c502b14SPaul E. McKenney {
736c502b14SPaul E. McKenney 	rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
746c502b14SPaul E. McKenney }
756c502b14SPaul E. McKenney 
cond_synchronize_rcu_expedited(unsigned long oldstate)76d96c52feSPaul E. McKenney static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
77d96c52feSPaul E. McKenney {
78d96c52feSPaul E. McKenney 	cond_synchronize_rcu(oldstate);
79d96c52feSPaul E. McKenney }
80d96c52feSPaul E. McKenney 
cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate * rgosp)818df13f01SPaul E. McKenney static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
828df13f01SPaul E. McKenney {
838df13f01SPaul E. McKenney 	cond_synchronize_rcu_expedited(rgosp->rgos_norm);
848df13f01SPaul E. McKenney }
858df13f01SPaul E. McKenney 
86709fdce7SPaul E. McKenney extern void rcu_barrier(void);
872c42818eSPaul E. McKenney 
synchronize_rcu_expedited(void)88a57eb940SPaul E. McKenney static inline void synchronize_rcu_expedited(void)
89da848c47SPaul E. McKenney {
90a8bb74acSPaul E. McKenney 	synchronize_rcu();
91da848c47SPaul E. McKenney }
926ebb237bSPaul E. McKenney 
933042f83fSUladzislau Rezki (Sony) /*
943042f83fSUladzislau Rezki (Sony)  * Add one more declaration of kvfree() here. It is
953042f83fSUladzislau Rezki (Sony)  * not so straight forward to just include <linux/mm.h>
963042f83fSUladzislau Rezki (Sony)  * where it is defined due to getting many compile
973042f83fSUladzislau Rezki (Sony)  * errors caused by that include.
983042f83fSUladzislau Rezki (Sony)  */
993042f83fSUladzislau Rezki (Sony) extern void kvfree(const void *addr);
1003042f83fSUladzislau Rezki (Sony) 
__kvfree_call_rcu(struct rcu_head * head,void * ptr)10104a522b7SUladzislau Rezki (Sony) static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
102486e2593SPaul E. McKenney {
1033042f83fSUladzislau Rezki (Sony) 	if (head) {
10404a522b7SUladzislau Rezki (Sony) 		call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
1053042f83fSUladzislau Rezki (Sony) 		return;
1063042f83fSUladzislau Rezki (Sony) 	}
1073042f83fSUladzislau Rezki (Sony) 
1083042f83fSUladzislau Rezki (Sony) 	// kvfree_rcu(one_arg) call.
1093042f83fSUladzislau Rezki (Sony) 	might_sleep();
1103042f83fSUladzislau Rezki (Sony) 	synchronize_rcu();
11104a522b7SUladzislau Rezki (Sony) 	kvfree(ptr);
112486e2593SPaul E. McKenney }
113486e2593SPaul E. McKenney 
114800d6acfSJohannes Berg #ifdef CONFIG_KASAN_GENERIC
11504a522b7SUladzislau Rezki (Sony) void kvfree_call_rcu(struct rcu_head *head, void *ptr);
116800d6acfSJohannes Berg #else
kvfree_call_rcu(struct rcu_head * head,void * ptr)11704a522b7SUladzislau Rezki (Sony) static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
118800d6acfSJohannes Berg {
11904a522b7SUladzislau Rezki (Sony) 	__kvfree_call_rcu(head, ptr);
120800d6acfSJohannes Berg }
121800d6acfSJohannes Berg #endif
122800d6acfSJohannes Berg 
123709fdce7SPaul E. McKenney void rcu_qs(void);
12445975c7dSPaul E. McKenney 
rcu_softirq_qs(void)125d28139c4SPaul E. McKenney static inline void rcu_softirq_qs(void)
126d28139c4SPaul E. McKenney {
127709fdce7SPaul E. McKenney 	rcu_qs();
128d28139c4SPaul E. McKenney }
129d28139c4SPaul E. McKenney 
130bcbfdd01SPaul E. McKenney #define rcu_note_context_switch(preempt) \
131bcbfdd01SPaul E. McKenney 	do { \
132709fdce7SPaul E. McKenney 		rcu_qs(); \
13343766c3eSPaul E. McKenney 		rcu_tasks_qs(current, (preempt)); \
134bcbfdd01SPaul E. McKenney 	} while (0)
135a57eb940SPaul E. McKenney 
rcu_needs_cpu(void)13629845399SFrederic Weisbecker static inline int rcu_needs_cpu(void)
1375f192ab0SPaul E. McKenney {
1385f192ab0SPaul E. McKenney 	return 0;
1395f192ab0SPaul E. McKenney }
1405f192ab0SPaul E. McKenney 
rcu_request_urgent_qs_task(struct task_struct * t)141*43a89baeSPaul E. McKenney static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
142*43a89baeSPaul E. McKenney 
143a57eb940SPaul E. McKenney /*
14429ce8310SGleb Natapov  * Take advantage of the fact that there is only one CPU, which
14529ce8310SGleb Natapov  * allows us to ignore virtualization-based context switches.
14629ce8310SGleb Natapov  */
rcu_virt_note_context_switch(void)147b5ad0d2eSZeng Heng static inline void rcu_virt_note_context_switch(void) { }
rcu_cpu_stall_reset(void)14871c40fd0SPaul E. McKenney static inline void rcu_cpu_stall_reset(void) { }
rcu_jiffies_till_stall_check(void)1491b27291bSPaul E. McKenney static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
rcu_irq_exit_check_preempt(void)15007325d4aSThomas Gleixner static inline void rcu_irq_exit_check_preempt(void) { }
exit_rcu(void)15171c40fd0SPaul E. McKenney static inline void exit_rcu(void) { }
rcu_preempt_need_deferred_qs(struct task_struct * t)1523e310098SPaul E. McKenney static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
1533e310098SPaul E. McKenney {
1543e310098SPaul E. McKenney 	return false;
1553e310098SPaul E. McKenney }
rcu_preempt_deferred_qs(struct task_struct * t)1563e310098SPaul E. McKenney static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
157584dc4ceSTeodora Baluta void rcu_scheduler_starting(void);
rcu_end_inkernel_boot(void)158d2b1654fSPaul E. McKenney static inline void rcu_end_inkernel_boot(void) { }
rcu_inkernel_boot_has_ended(void)15959ee0326SPaul E. McKenney static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
rcu_is_watching(void)16071c40fd0SPaul E. McKenney static inline bool rcu_is_watching(void) { return true; }
rcu_momentary_dyntick_idle(void)16179ba7ff5SPaul E. McKenney static inline void rcu_momentary_dyntick_idle(void) { }
kfree_rcu_scheduler_running(void)162a35d1690SByungchul Park static inline void kfree_rcu_scheduler_running(void) { }
rcu_gp_might_be_stalled(void)1636be7436dSPaul E. McKenney static inline bool rcu_gp_might_be_stalled(void) { return false; }
1645c173eb8SPaul E. McKenney 
16571c40fd0SPaul E. McKenney /* Avoid RCU read-side critical sections leaking across. */
rcu_all_qs(void)16671c40fd0SPaul E. McKenney static inline void rcu_all_qs(void) { barrier(); }
1675cd37193SPaul E. McKenney 
1684df83742SThomas Gleixner /* RCUtree hotplug events */
1694df83742SThomas Gleixner #define rcutree_prepare_cpu      NULL
1704df83742SThomas Gleixner #define rcutree_online_cpu       NULL
1714df83742SThomas Gleixner #define rcutree_offline_cpu      NULL
1724df83742SThomas Gleixner #define rcutree_dead_cpu         NULL
1734df83742SThomas Gleixner #define rcutree_dying_cpu        NULL
rcu_cpu_starting(unsigned int cpu)174f64c6013SPeter Zijlstra static inline void rcu_cpu_starting(unsigned int cpu) { }
1754df83742SThomas Gleixner 
1769b1d82faSPaul E. McKenney #endif /* __LINUX_RCUTINY_H */
177