xref: /openbmc/linux/fs/btrfs/locking.c (revision d2999e1b)
1 /*
2  * Copyright (C) 2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/spinlock.h>
21 #include <linux/page-flags.h>
22 #include <asm/bug.h>
23 #include "ctree.h"
24 #include "extent_io.h"
25 #include "locking.h"
26 
27 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28 
29 /*
30  * if we currently have a spinning reader or writer lock
31  * (indicated by the rw flag) this will bump the count
32  * of blocking holders and drop the spinlock.
33  */
34 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
35 {
36 	/*
37 	 * no lock is required.  The lock owner may change if
38 	 * we have a read lock, but it won't change to or away
39 	 * from us.  If we have the write lock, we are the owner
40 	 * and it'll never change.
41 	 */
42 	if (eb->lock_nested && current->pid == eb->lock_owner)
43 		return;
44 	if (rw == BTRFS_WRITE_LOCK) {
45 		if (atomic_read(&eb->blocking_writers) == 0) {
46 			WARN_ON(atomic_read(&eb->spinning_writers) != 1);
47 			atomic_dec(&eb->spinning_writers);
48 			btrfs_assert_tree_locked(eb);
49 			atomic_inc(&eb->blocking_writers);
50 			write_unlock(&eb->lock);
51 		}
52 	} else if (rw == BTRFS_READ_LOCK) {
53 		btrfs_assert_tree_read_locked(eb);
54 		atomic_inc(&eb->blocking_readers);
55 		WARN_ON(atomic_read(&eb->spinning_readers) == 0);
56 		atomic_dec(&eb->spinning_readers);
57 		read_unlock(&eb->lock);
58 	}
59 	return;
60 }
61 
62 /*
63  * if we currently have a blocking lock, take the spinlock
64  * and drop our blocking count
65  */
66 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
67 {
68 	/*
69 	 * no lock is required.  The lock owner may change if
70 	 * we have a read lock, but it won't change to or away
71 	 * from us.  If we have the write lock, we are the owner
72 	 * and it'll never change.
73 	 */
74 	if (eb->lock_nested && current->pid == eb->lock_owner)
75 		return;
76 
77 	if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
78 		BUG_ON(atomic_read(&eb->blocking_writers) != 1);
79 		write_lock(&eb->lock);
80 		WARN_ON(atomic_read(&eb->spinning_writers));
81 		atomic_inc(&eb->spinning_writers);
82 		if (atomic_dec_and_test(&eb->blocking_writers) &&
83 		    waitqueue_active(&eb->write_lock_wq))
84 			wake_up(&eb->write_lock_wq);
85 	} else if (rw == BTRFS_READ_LOCK_BLOCKING) {
86 		BUG_ON(atomic_read(&eb->blocking_readers) == 0);
87 		read_lock(&eb->lock);
88 		atomic_inc(&eb->spinning_readers);
89 		if (atomic_dec_and_test(&eb->blocking_readers) &&
90 		    waitqueue_active(&eb->read_lock_wq))
91 			wake_up(&eb->read_lock_wq);
92 	}
93 	return;
94 }
95 
96 /*
97  * take a spinning read lock.  This will wait for any blocking
98  * writers
99  */
100 void btrfs_tree_read_lock(struct extent_buffer *eb)
101 {
102 again:
103 	BUG_ON(!atomic_read(&eb->blocking_writers) &&
104 	       current->pid == eb->lock_owner);
105 
106 	read_lock(&eb->lock);
107 	if (atomic_read(&eb->blocking_writers) &&
108 	    current->pid == eb->lock_owner) {
109 		/*
110 		 * This extent is already write-locked by our thread. We allow
111 		 * an additional read lock to be added because it's for the same
112 		 * thread. btrfs_find_all_roots() depends on this as it may be
113 		 * called on a partly (write-)locked tree.
114 		 */
115 		BUG_ON(eb->lock_nested);
116 		eb->lock_nested = 1;
117 		read_unlock(&eb->lock);
118 		return;
119 	}
120 	if (atomic_read(&eb->blocking_writers)) {
121 		read_unlock(&eb->lock);
122 		wait_event(eb->write_lock_wq,
123 			   atomic_read(&eb->blocking_writers) == 0);
124 		goto again;
125 	}
126 	atomic_inc(&eb->read_locks);
127 	atomic_inc(&eb->spinning_readers);
128 }
129 
130 /*
131  * returns 1 if we get the read lock and 0 if we don't
132  * this won't wait for blocking writers
133  */
134 int btrfs_try_tree_read_lock(struct extent_buffer *eb)
135 {
136 	if (atomic_read(&eb->blocking_writers))
137 		return 0;
138 
139 	if (!read_trylock(&eb->lock))
140 		return 0;
141 
142 	if (atomic_read(&eb->blocking_writers)) {
143 		read_unlock(&eb->lock);
144 		return 0;
145 	}
146 	atomic_inc(&eb->read_locks);
147 	atomic_inc(&eb->spinning_readers);
148 	return 1;
149 }
150 
151 /*
152  * returns 1 if we get the read lock and 0 if we don't
153  * this won't wait for blocking writers or readers
154  */
155 int btrfs_try_tree_write_lock(struct extent_buffer *eb)
156 {
157 	if (atomic_read(&eb->blocking_writers) ||
158 	    atomic_read(&eb->blocking_readers))
159 		return 0;
160 
161 	if (!write_trylock(&eb->lock))
162 		return 0;
163 
164 	if (atomic_read(&eb->blocking_writers) ||
165 	    atomic_read(&eb->blocking_readers)) {
166 		write_unlock(&eb->lock);
167 		return 0;
168 	}
169 	atomic_inc(&eb->write_locks);
170 	atomic_inc(&eb->spinning_writers);
171 	eb->lock_owner = current->pid;
172 	return 1;
173 }
174 
175 /*
176  * drop a spinning read lock
177  */
178 void btrfs_tree_read_unlock(struct extent_buffer *eb)
179 {
180 	/*
181 	 * if we're nested, we have the write lock.  No new locking
182 	 * is needed as long as we are the lock owner.
183 	 * The write unlock will do a barrier for us, and the lock_nested
184 	 * field only matters to the lock owner.
185 	 */
186 	if (eb->lock_nested && current->pid == eb->lock_owner) {
187 		eb->lock_nested = 0;
188 		return;
189 	}
190 	btrfs_assert_tree_read_locked(eb);
191 	WARN_ON(atomic_read(&eb->spinning_readers) == 0);
192 	atomic_dec(&eb->spinning_readers);
193 	atomic_dec(&eb->read_locks);
194 	read_unlock(&eb->lock);
195 }
196 
197 /*
198  * drop a blocking read lock
199  */
200 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
201 {
202 	/*
203 	 * if we're nested, we have the write lock.  No new locking
204 	 * is needed as long as we are the lock owner.
205 	 * The write unlock will do a barrier for us, and the lock_nested
206 	 * field only matters to the lock owner.
207 	 */
208 	if (eb->lock_nested && current->pid == eb->lock_owner) {
209 		eb->lock_nested = 0;
210 		return;
211 	}
212 	btrfs_assert_tree_read_locked(eb);
213 	WARN_ON(atomic_read(&eb->blocking_readers) == 0);
214 	if (atomic_dec_and_test(&eb->blocking_readers) &&
215 	    waitqueue_active(&eb->read_lock_wq))
216 		wake_up(&eb->read_lock_wq);
217 	atomic_dec(&eb->read_locks);
218 }
219 
220 /*
221  * take a spinning write lock.  This will wait for both
222  * blocking readers or writers
223  */
224 void btrfs_tree_lock(struct extent_buffer *eb)
225 {
226 again:
227 	wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
228 	wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
229 	write_lock(&eb->lock);
230 	if (atomic_read(&eb->blocking_readers)) {
231 		write_unlock(&eb->lock);
232 		wait_event(eb->read_lock_wq,
233 			   atomic_read(&eb->blocking_readers) == 0);
234 		goto again;
235 	}
236 	if (atomic_read(&eb->blocking_writers)) {
237 		write_unlock(&eb->lock);
238 		wait_event(eb->write_lock_wq,
239 			   atomic_read(&eb->blocking_writers) == 0);
240 		goto again;
241 	}
242 	WARN_ON(atomic_read(&eb->spinning_writers));
243 	atomic_inc(&eb->spinning_writers);
244 	atomic_inc(&eb->write_locks);
245 	eb->lock_owner = current->pid;
246 }
247 
248 /*
249  * drop a spinning or a blocking write lock.
250  */
251 void btrfs_tree_unlock(struct extent_buffer *eb)
252 {
253 	int blockers = atomic_read(&eb->blocking_writers);
254 
255 	BUG_ON(blockers > 1);
256 
257 	btrfs_assert_tree_locked(eb);
258 	eb->lock_owner = 0;
259 	atomic_dec(&eb->write_locks);
260 
261 	if (blockers) {
262 		WARN_ON(atomic_read(&eb->spinning_writers));
263 		atomic_dec(&eb->blocking_writers);
264 		smp_mb();
265 		if (waitqueue_active(&eb->write_lock_wq))
266 			wake_up(&eb->write_lock_wq);
267 	} else {
268 		WARN_ON(atomic_read(&eb->spinning_writers) != 1);
269 		atomic_dec(&eb->spinning_writers);
270 		write_unlock(&eb->lock);
271 	}
272 }
273 
274 void btrfs_assert_tree_locked(struct extent_buffer *eb)
275 {
276 	BUG_ON(!atomic_read(&eb->write_locks));
277 }
278 
279 static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
280 {
281 	BUG_ON(!atomic_read(&eb->read_locks));
282 }
283