xref: /openbmc/linux/kernel/locking/percpu-rwsem.c (revision bc5aa3a0)
1 #include <linux/atomic.h>
2 #include <linux/rwsem.h>
3 #include <linux/percpu.h>
4 #include <linux/wait.h>
5 #include <linux/lockdep.h>
6 #include <linux/percpu-rwsem.h>
7 #include <linux/rcupdate.h>
8 #include <linux/sched.h>
9 #include <linux/errno.h>
10 
11 int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
12 			const char *name, struct lock_class_key *rwsem_key)
13 {
14 	brw->fast_read_ctr = alloc_percpu(int);
15 	if (unlikely(!brw->fast_read_ctr))
16 		return -ENOMEM;
17 
18 	/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
19 	__init_rwsem(&brw->rw_sem, name, rwsem_key);
20 	rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
21 	atomic_set(&brw->slow_read_ctr, 0);
22 	init_waitqueue_head(&brw->write_waitq);
23 	return 0;
24 }
25 EXPORT_SYMBOL_GPL(__percpu_init_rwsem);
26 
27 void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
28 {
29 	/*
30 	 * XXX: temporary kludge. The error path in alloc_super()
31 	 * assumes that percpu_free_rwsem() is safe after kzalloc().
32 	 */
33 	if (!brw->fast_read_ctr)
34 		return;
35 
36 	rcu_sync_dtor(&brw->rss);
37 	free_percpu(brw->fast_read_ctr);
38 	brw->fast_read_ctr = NULL; /* catch use after free bugs */
39 }
40 EXPORT_SYMBOL_GPL(percpu_free_rwsem);
41 
42 /*
43  * This is the fast-path for down_read/up_read. If it succeeds we rely
44  * on the barriers provided by rcu_sync_enter/exit; see the comments in
45  * percpu_down_write() and percpu_up_write().
46  *
47  * If this helper fails the callers rely on the normal rw_semaphore and
48  * atomic_dec_and_test(), so in this case we have the necessary barriers.
49  */
50 static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
51 {
52 	bool success;
53 
54 	preempt_disable();
55 	success = rcu_sync_is_idle(&brw->rss);
56 	if (likely(success))
57 		__this_cpu_add(*brw->fast_read_ctr, val);
58 	preempt_enable();
59 
60 	return success;
61 }
62 
63 /*
64  * Like the normal down_read() this is not recursive, the writer can
65  * come after the first percpu_down_read() and create the deadlock.
66  *
67  * Note: returns with lock_is_held(brw->rw_sem) == T for lockdep,
68  * percpu_up_read() does rwsem_release(). This pairs with the usage
69  * of ->rw_sem in percpu_down/up_write().
70  */
71 void percpu_down_read(struct percpu_rw_semaphore *brw)
72 {
73 	might_sleep();
74 	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 0, _RET_IP_);
75 
76 	if (likely(update_fast_ctr(brw, +1)))
77 		return;
78 
79 	/* Avoid rwsem_acquire_read() and rwsem_release() */
80 	__down_read(&brw->rw_sem);
81 	atomic_inc(&brw->slow_read_ctr);
82 	__up_read(&brw->rw_sem);
83 }
84 EXPORT_SYMBOL_GPL(percpu_down_read);
85 
86 int percpu_down_read_trylock(struct percpu_rw_semaphore *brw)
87 {
88 	if (unlikely(!update_fast_ctr(brw, +1))) {
89 		if (!__down_read_trylock(&brw->rw_sem))
90 			return 0;
91 		atomic_inc(&brw->slow_read_ctr);
92 		__up_read(&brw->rw_sem);
93 	}
94 
95 	rwsem_acquire_read(&brw->rw_sem.dep_map, 0, 1, _RET_IP_);
96 	return 1;
97 }
98 
99 void percpu_up_read(struct percpu_rw_semaphore *brw)
100 {
101 	rwsem_release(&brw->rw_sem.dep_map, 1, _RET_IP_);
102 
103 	if (likely(update_fast_ctr(brw, -1)))
104 		return;
105 
106 	/* false-positive is possible but harmless */
107 	if (atomic_dec_and_test(&brw->slow_read_ctr))
108 		wake_up_all(&brw->write_waitq);
109 }
110 EXPORT_SYMBOL_GPL(percpu_up_read);
111 
112 static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
113 {
114 	unsigned int sum = 0;
115 	int cpu;
116 
117 	for_each_possible_cpu(cpu) {
118 		sum += per_cpu(*brw->fast_read_ctr, cpu);
119 		per_cpu(*brw->fast_read_ctr, cpu) = 0;
120 	}
121 
122 	return sum;
123 }
124 
125 void percpu_down_write(struct percpu_rw_semaphore *brw)
126 {
127 	/*
128 	 * Make rcu_sync_is_idle() == F and thus disable the fast-path in
129 	 * percpu_down_read() and percpu_up_read(), and wait for gp pass.
130 	 *
131 	 * The latter synchronises us with the preceding readers which used
132 	 * the fast-past, so we can not miss the result of __this_cpu_add()
133 	 * or anything else inside their criticial sections.
134 	 */
135 	rcu_sync_enter(&brw->rss);
136 
137 	/* exclude other writers, and block the new readers completely */
138 	down_write(&brw->rw_sem);
139 
140 	/* nobody can use fast_read_ctr, move its sum into slow_read_ctr */
141 	atomic_add(clear_fast_ctr(brw), &brw->slow_read_ctr);
142 
143 	/* wait for all readers to complete their percpu_up_read() */
144 	wait_event(brw->write_waitq, !atomic_read(&brw->slow_read_ctr));
145 }
146 EXPORT_SYMBOL_GPL(percpu_down_write);
147 
148 void percpu_up_write(struct percpu_rw_semaphore *brw)
149 {
150 	/* release the lock, but the readers can't use the fast-path */
151 	up_write(&brw->rw_sem);
152 	/*
153 	 * Enable the fast-path in percpu_down_read() and percpu_up_read()
154 	 * but only after another gp pass; this adds the necessary barrier
155 	 * to ensure the reader can't miss the changes done by us.
156 	 */
157 	rcu_sync_exit(&brw->rss);
158 }
159 EXPORT_SYMBOL_GPL(percpu_up_write);
160