18c366db0SPaul E. McKenney /* SPDX-License-Identifier: GPL-2.0+ */
2d8be8173SPaul E. McKenney /*
3d8be8173SPaul E. McKenney * Sleepable Read-Copy Update mechanism for mutual exclusion,
4d8be8173SPaul E. McKenney * tiny variant.
5d8be8173SPaul E. McKenney *
6d8be8173SPaul E. McKenney * Copyright (C) IBM Corporation, 2017
7d8be8173SPaul E. McKenney *
88c366db0SPaul E. McKenney * Author: Paul McKenney <paulmck@linux.ibm.com>
9d8be8173SPaul E. McKenney */
10d8be8173SPaul E. McKenney
11d8be8173SPaul E. McKenney #ifndef _LINUX_SRCU_TINY_H
12d8be8173SPaul E. McKenney #define _LINUX_SRCU_TINY_H
13d8be8173SPaul E. McKenney
14d8be8173SPaul E. McKenney #include <linux/swait.h>
15d8be8173SPaul E. McKenney
16d8be8173SPaul E. McKenney struct srcu_struct {
173ddf20c9SPaul E. McKenney short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
183ddf20c9SPaul E. McKenney u8 srcu_gp_running; /* GP workqueue running? */
193ddf20c9SPaul E. McKenney u8 srcu_gp_waiting; /* GP waiting for readers? */
205fe89191SPaul E. McKenney unsigned long srcu_idx; /* Current reader array element in bit 0x2. */
215fe89191SPaul E. McKenney unsigned long srcu_idx_max; /* Furthest future srcu_idx request. */
22d8be8173SPaul E. McKenney struct swait_queue_head srcu_wq;
23d8be8173SPaul E. McKenney /* Last srcu_read_unlock() wakes GP. */
242464dd94SPaul E. McKenney struct rcu_head *srcu_cb_head; /* Pending callbacks: Head. */
252464dd94SPaul E. McKenney struct rcu_head **srcu_cb_tail; /* Pending callbacks: Tail. */
26d8be8173SPaul E. McKenney struct work_struct srcu_work; /* For driving grace periods. */
27d8be8173SPaul E. McKenney #ifdef CONFIG_DEBUG_LOCK_ALLOC
28d8be8173SPaul E. McKenney struct lockdep_map dep_map;
29d8be8173SPaul E. McKenney #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
30d8be8173SPaul E. McKenney };
31d8be8173SPaul E. McKenney
32d8be8173SPaul E. McKenney void srcu_drive_gp(struct work_struct *wp);
33d8be8173SPaul E. McKenney
3495433f72SPaul E. McKenney #define __SRCU_STRUCT_INIT(name, __ignored, ___ignored) \
35d8be8173SPaul E. McKenney { \
36d8be8173SPaul E. McKenney .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
372464dd94SPaul E. McKenney .srcu_cb_tail = &name.srcu_cb_head, \
38d8be8173SPaul E. McKenney .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
39d8be8173SPaul E. McKenney __SRCU_DEP_MAP_INIT(name) \
40d8be8173SPaul E. McKenney }
41d8be8173SPaul E. McKenney
42d8be8173SPaul E. McKenney /*
43d8be8173SPaul E. McKenney * This odd _STATIC_ arrangement is needed for API compatibility with
44d8be8173SPaul E. McKenney * Tree SRCU, which needs some per-CPU data.
45d8be8173SPaul E. McKenney */
46d8be8173SPaul E. McKenney #define DEFINE_SRCU(name) \
4795433f72SPaul E. McKenney struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
48d8be8173SPaul E. McKenney #define DEFINE_STATIC_SRCU(name) \
4995433f72SPaul E. McKenney static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name, name)
50d8be8173SPaul E. McKenney
51*ed2b9e1bSPaul E. McKenney // Dummy structure for srcu_notifier_head.
52*ed2b9e1bSPaul E. McKenney struct srcu_usage { };
53*ed2b9e1bSPaul E. McKenney #define __SRCU_USAGE_INIT(name) { }
54*ed2b9e1bSPaul E. McKenney
55aacb5d91SPaul E. McKenney void synchronize_srcu(struct srcu_struct *ssp);
56d8be8173SPaul E. McKenney
57d4efe6c5SPaul E. McKenney /*
58d4efe6c5SPaul E. McKenney * Counts the new reader in the appropriate per-CPU element of the
59d4efe6c5SPaul E. McKenney * srcu_struct. Can be invoked from irq/bh handlers, but the matching
60d4efe6c5SPaul E. McKenney * __srcu_read_unlock() must be in the same handler instance. Returns an
61d4efe6c5SPaul E. McKenney * index that must be passed to the matching srcu_read_unlock().
62d4efe6c5SPaul E. McKenney */
__srcu_read_lock(struct srcu_struct * ssp)63aacb5d91SPaul E. McKenney static inline int __srcu_read_lock(struct srcu_struct *ssp)
64d4efe6c5SPaul E. McKenney {
65d4efe6c5SPaul E. McKenney int idx;
66d4efe6c5SPaul E. McKenney
6774612a07SPaul E. McKenney idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1;
6865bfdd36SPaul E. McKenney WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1);
69d4efe6c5SPaul E. McKenney return idx;
70d4efe6c5SPaul E. McKenney }
71d4efe6c5SPaul E. McKenney
synchronize_srcu_expedited(struct srcu_struct * ssp)72aacb5d91SPaul E. McKenney static inline void synchronize_srcu_expedited(struct srcu_struct *ssp)
73d8be8173SPaul E. McKenney {
74aacb5d91SPaul E. McKenney synchronize_srcu(ssp);
75d8be8173SPaul E. McKenney }
76d8be8173SPaul E. McKenney
srcu_barrier(struct srcu_struct * ssp)77aacb5d91SPaul E. McKenney static inline void srcu_barrier(struct srcu_struct *ssp)
78d8be8173SPaul E. McKenney {
79aacb5d91SPaul E. McKenney synchronize_srcu(ssp);
80d8be8173SPaul E. McKenney }
81d8be8173SPaul E. McKenney
82115a1a52SPaul E. McKenney /* Defined here to avoid size increase for non-torture kernels. */
srcu_torture_stats_print(struct srcu_struct * ssp,char * tt,char * tf)83aacb5d91SPaul E. McKenney static inline void srcu_torture_stats_print(struct srcu_struct *ssp,
84115a1a52SPaul E. McKenney char *tt, char *tf)
85115a1a52SPaul E. McKenney {
86115a1a52SPaul E. McKenney int idx;
87115a1a52SPaul E. McKenney
8865bfdd36SPaul E. McKenney idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1;
895fe89191SPaul E. McKenney pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd) gp: %lu->%lu\n",
90115a1a52SPaul E. McKenney tt, tf, idx,
9165bfdd36SPaul E. McKenney data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])),
92d66e4cf9SPaul E. McKenney data_race(READ_ONCE(ssp->srcu_lock_nesting[idx])),
93d66e4cf9SPaul E. McKenney data_race(READ_ONCE(ssp->srcu_idx)),
94d66e4cf9SPaul E. McKenney data_race(READ_ONCE(ssp->srcu_idx_max)));
95115a1a52SPaul E. McKenney }
96115a1a52SPaul E. McKenney
97d8be8173SPaul E. McKenney #endif
98