locking.c (aa7eb8e78d8ecd6cd0475d86ea8385ff9cb47ece) locking.c (bd681513fa6f2ff29aa391f01e413a2d1c59fd77)
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,

--- 10 unchanged lines hidden (view full) ---

19#include <linux/pagemap.h>
20#include <linux/spinlock.h>
21#include <linux/page-flags.h>
22#include <asm/bug.h>
23#include "ctree.h"
24#include "extent_io.h"
25#include "locking.h"
26
1/*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,

--- 10 unchanged lines hidden (view full) ---

19#include <linux/pagemap.h>
20#include <linux/spinlock.h>
21#include <linux/page-flags.h>
22#include <asm/bug.h>
23#include "ctree.h"
24#include "extent_io.h"
25#include "locking.h"
26
27static inline void spin_nested(struct extent_buffer *eb)
27void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
28
29/*
30 * if we currently have a spinning reader or writer lock
31 * (indicated by the rw flag) this will bump the count
32 * of blocking holders and drop the spinlock.
33 */
34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
28{
35{
29 spin_lock(&eb->lock);
36 if (rw == BTRFS_WRITE_LOCK) {
37 if (atomic_read(&eb->blocking_writers) == 0) {
38 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
39 atomic_dec(&eb->spinning_writers);
40 btrfs_assert_tree_locked(eb);
41 atomic_inc(&eb->blocking_writers);
42 write_unlock(&eb->lock);
43 }
44 } else if (rw == BTRFS_READ_LOCK) {
45 btrfs_assert_tree_read_locked(eb);
46 atomic_inc(&eb->blocking_readers);
47 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
48 atomic_dec(&eb->spinning_readers);
49 read_unlock(&eb->lock);
50 }
51 return;
30}
31
32/*
52}
53
54/*
33 * Setting a lock to blocking will drop the spinlock and set the
34 * flag that forces other procs who want the lock to wait. After
35 * this you can safely schedule with the lock held.
55 * if we currently have a blocking lock, take the spinlock
56 * and drop our blocking count
36 */
57 */
37void btrfs_set_lock_blocking(struct extent_buffer *eb)
58void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
38{
59{
39 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
40 set_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
41 spin_unlock(&eb->lock);
60 if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
61 BUG_ON(atomic_read(&eb->blocking_writers) != 1);
62 write_lock(&eb->lock);
63 WARN_ON(atomic_read(&eb->spinning_writers));
64 atomic_inc(&eb->spinning_writers);
65 if (atomic_dec_and_test(&eb->blocking_writers))
66 wake_up(&eb->write_lock_wq);
67 } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
68 BUG_ON(atomic_read(&eb->blocking_readers) == 0);
69 read_lock(&eb->lock);
70 atomic_inc(&eb->spinning_readers);
71 if (atomic_dec_and_test(&eb->blocking_readers))
72 wake_up(&eb->read_lock_wq);
42 }
73 }
43 /* exit with the spin lock released and the bit set */
74 return;
44}
45
46/*
75}
76
77/*
47 * clearing the blocking flag will take the spinlock again.
48 * After this you can't safely schedule
78 * take a spinning read lock. This will wait for any blocking
79 * writers
49 */
80 */
50void btrfs_clear_lock_blocking(struct extent_buffer *eb)
81void btrfs_tree_read_lock(struct extent_buffer *eb)
51{
82{
52 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
53 spin_nested(eb);
54 clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags);
55 smp_mb__after_clear_bit();
83again:
84 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
85 read_lock(&eb->lock);
86 if (atomic_read(&eb->blocking_writers)) {
87 read_unlock(&eb->lock);
88 wait_event(eb->write_lock_wq,
89 atomic_read(&eb->blocking_writers) == 0);
90 goto again;
56 }
91 }
57 /* exit with the spin lock held */
92 atomic_inc(&eb->read_locks);
93 atomic_inc(&eb->spinning_readers);
58}
59
60/*
94}
95
96/*
61 * unfortunately, many of the places that currently set a lock to blocking
62 * don't end up blocking for very long, and often they don't block
63 * at all. For a dbench 50 run, if we don't spin on the blocking bit
64 * at all, the context switch rate can jump up to 400,000/sec or more.
65 *
66 * So, we're still stuck with this crummy spin on the blocking bit,
67 * at least until the most common causes of the short blocks
68 * can be dealt with.
97 * returns 1 if we get the read lock and 0 if we don't
98 * this won't wait for blocking writers
69 */
99 */
70static int btrfs_spin_on_block(struct extent_buffer *eb)
100int btrfs_try_tree_read_lock(struct extent_buffer *eb)
71{
101{
72 int i;
102 if (atomic_read(&eb->blocking_writers))
103 return 0;
73
104
74 for (i = 0; i < 512; i++) {
75 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
76 return 1;
77 if (need_resched())
78 break;
79 cpu_relax();
105 read_lock(&eb->lock);
106 if (atomic_read(&eb->blocking_writers)) {
107 read_unlock(&eb->lock);
108 return 0;
80 }
109 }
81 return 0;
110 atomic_inc(&eb->read_locks);
111 atomic_inc(&eb->spinning_readers);
112 return 1;
82}
83
84/*
113}
114
115/*
85 * This is somewhat different from trylock. It will take the
86 * spinlock but if it finds the lock is set to blocking, it will
87 * return without the lock held.
88 *
89 * returns 1 if it was able to take the lock and zero otherwise
90 *
91 * After this call, scheduling is not safe without first calling
92 * btrfs_set_lock_blocking()
116 * returns 1 if we get the read lock and 0 if we don't
117 * this won't wait for blocking writers or readers
93 */
118 */
94int btrfs_try_spin_lock(struct extent_buffer *eb)
119int btrfs_try_tree_write_lock(struct extent_buffer *eb)
95{
120{
96 int i;
97
98 if (btrfs_spin_on_block(eb)) {
99 spin_nested(eb);
100 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
101 return 1;
102 spin_unlock(&eb->lock);
121 if (atomic_read(&eb->blocking_writers) ||
122 atomic_read(&eb->blocking_readers))
123 return 0;
124 write_lock(&eb->lock);
125 if (atomic_read(&eb->blocking_writers) ||
126 atomic_read(&eb->blocking_readers)) {
127 write_unlock(&eb->lock);
128 return 0;
103 }
129 }
104 /* spin for a bit on the BLOCKING flag */
105 for (i = 0; i < 2; i++) {
106 cpu_relax();
107 if (!btrfs_spin_on_block(eb))
108 break;
130 atomic_inc(&eb->write_locks);
131 atomic_inc(&eb->spinning_writers);
132 return 1;
133}
109
134
110 spin_nested(eb);
111 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
112 return 1;
113 spin_unlock(&eb->lock);
114 }
115 return 0;
135/*
136 * drop a spinning read lock
137 */
138void btrfs_tree_read_unlock(struct extent_buffer *eb)
139{
140 btrfs_assert_tree_read_locked(eb);
141 WARN_ON(atomic_read(&eb->spinning_readers) == 0);
142 atomic_dec(&eb->spinning_readers);
143 atomic_dec(&eb->read_locks);
144 read_unlock(&eb->lock);
116}
117
118/*
145}
146
147/*
119 * the autoremove wake function will return 0 if it tried to wake up
120 * a process that was already awake, which means that process won't
121 * count as an exclusive wakeup. The waitq code will continue waking
122 * procs until it finds one that was actually sleeping.
123 *
124 * For btrfs, this isn't quite what we want. We want a single proc
125 * to be notified that the lock is ready for taking. If that proc
126 * already happen to be awake, great, it will loop around and try for
127 * the lock.
128 *
129 * So, btrfs_wake_function always returns 1, even when the proc that we
130 * tried to wake up was already awake.
148 * drop a blocking read lock
131 */
149 */
132static int btrfs_wake_function(wait_queue_t *wait, unsigned mode,
133 int sync, void *key)
150void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
134{
151{
135 autoremove_wake_function(wait, mode, sync, key);
136 return 1;
152 btrfs_assert_tree_read_locked(eb);
153 WARN_ON(atomic_read(&eb->blocking_readers) == 0);
154 if (atomic_dec_and_test(&eb->blocking_readers))
155 wake_up(&eb->read_lock_wq);
156 atomic_dec(&eb->read_locks);
137}
138
139/*
157}
158
159/*
140 * returns with the extent buffer spinlocked.
141 *
142 * This will spin and/or wait as required to take the lock, and then
143 * return with the spinlock held.
144 *
145 * After this call, scheduling is not safe without first calling
146 * btrfs_set_lock_blocking()
160 * take a spinning write lock. This will wait for both
161 * blocking readers or writers
147 */
148int btrfs_tree_lock(struct extent_buffer *eb)
149{
162 */
163int btrfs_tree_lock(struct extent_buffer *eb)
164{
150 DEFINE_WAIT(wait);
151 wait.func = btrfs_wake_function;
152
153 if (!btrfs_spin_on_block(eb))
154 goto sleep;
155
156 while(1) {
157 spin_nested(eb);
158
159 /* nobody is blocking, exit with the spinlock held */
160 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
161 return 0;
162
163 /*
164 * we have the spinlock, but the real owner is blocking.
165 * wait for them
166 */
167 spin_unlock(&eb->lock);
168
169 /*
170 * spin for a bit, and if the blocking flag goes away,
171 * loop around
172 */
173 cpu_relax();
174 if (btrfs_spin_on_block(eb))
175 continue;
176sleep:
177 prepare_to_wait_exclusive(&eb->lock_wq, &wait,
178 TASK_UNINTERRUPTIBLE);
179
180 if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
181 schedule();
182
183 finish_wait(&eb->lock_wq, &wait);
165again:
166 wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
167 wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
168 write_lock(&eb->lock);
169 if (atomic_read(&eb->blocking_readers)) {
170 write_unlock(&eb->lock);
171 wait_event(eb->read_lock_wq,
172 atomic_read(&eb->blocking_readers) == 0);
173 goto again;
184 }
174 }
175 if (atomic_read(&eb->blocking_writers)) {
176 write_unlock(&eb->lock);
177 wait_event(eb->write_lock_wq,
178 atomic_read(&eb->blocking_writers) == 0);
179 goto again;
180 }
181 WARN_ON(atomic_read(&eb->spinning_writers));
182 atomic_inc(&eb->spinning_writers);
183 atomic_inc(&eb->write_locks);
185 return 0;
186}
187
184 return 0;
185}
186
187/*
188 * drop a spinning or a blocking write lock.
189 */
188int btrfs_tree_unlock(struct extent_buffer *eb)
189{
190int btrfs_tree_unlock(struct extent_buffer *eb)
191{
190 /*
191 * if we were a blocking owner, we don't have the spinlock held
192 * just clear the bit and look for waiters
193 */
194 if (test_and_clear_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
195 smp_mb__after_clear_bit();
196 else
197 spin_unlock(&eb->lock);
192 int blockers = atomic_read(&eb->blocking_writers);
198
193
199 if (waitqueue_active(&eb->lock_wq))
200 wake_up(&eb->lock_wq);
194 BUG_ON(blockers > 1);
195
196 btrfs_assert_tree_locked(eb);
197 atomic_dec(&eb->write_locks);
198
199 if (blockers) {
200 WARN_ON(atomic_read(&eb->spinning_writers));
201 atomic_dec(&eb->blocking_writers);
202 smp_wmb();
203 wake_up(&eb->write_lock_wq);
204 } else {
205 WARN_ON(atomic_read(&eb->spinning_writers) != 1);
206 atomic_dec(&eb->spinning_writers);
207 write_unlock(&eb->lock);
208 }
201 return 0;
202}
203
204void btrfs_assert_tree_locked(struct extent_buffer *eb)
205{
209 return 0;
210}
211
212void btrfs_assert_tree_locked(struct extent_buffer *eb)
213{
206 if (!test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags))
207 assert_spin_locked(&eb->lock);
214 BUG_ON(!atomic_read(&eb->write_locks));
208}
215}
216
217void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
218{
219 BUG_ON(!atomic_read(&eb->read_locks));
220}