xref: /openbmc/linux/fs/btrfs/extent_map.c (revision b888db2bd7b67f190b32934e6a86181f262ac3ec)
1a52d9a80SChris Mason #include <linux/bitops.h>
2a52d9a80SChris Mason #include <linux/slab.h>
3a52d9a80SChris Mason #include <linux/bio.h>
4a52d9a80SChris Mason #include <linux/mm.h>
5a52d9a80SChris Mason #include <linux/gfp.h>
6a52d9a80SChris Mason #include <linux/pagemap.h>
7a52d9a80SChris Mason #include <linux/page-flags.h>
8a52d9a80SChris Mason #include <linux/module.h>
9a52d9a80SChris Mason #include <linux/spinlock.h>
10a52d9a80SChris Mason #include <linux/blkdev.h>
11a52d9a80SChris Mason #include "extent_map.h"
12a52d9a80SChris Mason 
13a52d9a80SChris Mason static struct kmem_cache *extent_map_cache;
14a52d9a80SChris Mason static struct kmem_cache *extent_state_cache;
15a52d9a80SChris Mason 
16a52d9a80SChris Mason struct tree_entry {
17a52d9a80SChris Mason 	u64 start;
18a52d9a80SChris Mason 	u64 end;
19a52d9a80SChris Mason 	int in_tree;
20a52d9a80SChris Mason 	struct rb_node rb_node;
21a52d9a80SChris Mason };
22a52d9a80SChris Mason 
23a52d9a80SChris Mason /* bits for the extent state */
24a52d9a80SChris Mason #define EXTENT_DIRTY 1
25a52d9a80SChris Mason #define EXTENT_WRITEBACK (1 << 1)
26a52d9a80SChris Mason #define EXTENT_UPTODATE (1 << 2)
27a52d9a80SChris Mason #define EXTENT_LOCKED (1 << 3)
28a52d9a80SChris Mason #define EXTENT_NEW (1 << 4)
29a52d9a80SChris Mason #define EXTENT_DELALLOC (1 << 5)
30a52d9a80SChris Mason 
31a52d9a80SChris Mason #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
32a52d9a80SChris Mason 
33a52d9a80SChris Mason void __init extent_map_init(void)
34a52d9a80SChris Mason {
35a52d9a80SChris Mason 	extent_map_cache = kmem_cache_create("extent_map",
36a52d9a80SChris Mason 					    sizeof(struct extent_map), 0,
37a52d9a80SChris Mason 					    SLAB_RECLAIM_ACCOUNT |
38a52d9a80SChris Mason 					    SLAB_DESTROY_BY_RCU,
39a52d9a80SChris Mason 					    NULL);
40a52d9a80SChris Mason 	extent_state_cache = kmem_cache_create("extent_state",
41a52d9a80SChris Mason 					    sizeof(struct extent_state), 0,
42a52d9a80SChris Mason 					    SLAB_RECLAIM_ACCOUNT |
43a52d9a80SChris Mason 					    SLAB_DESTROY_BY_RCU,
44a52d9a80SChris Mason 					    NULL);
45a52d9a80SChris Mason }
46a52d9a80SChris Mason 
47a52d9a80SChris Mason void __exit extent_map_exit(void)
48a52d9a80SChris Mason {
49a52d9a80SChris Mason 	if (extent_map_cache)
50a52d9a80SChris Mason 		kmem_cache_destroy(extent_map_cache);
51a52d9a80SChris Mason 	if (extent_state_cache)
52a52d9a80SChris Mason 		kmem_cache_destroy(extent_state_cache);
53a52d9a80SChris Mason }
54a52d9a80SChris Mason 
55a52d9a80SChris Mason void extent_map_tree_init(struct extent_map_tree *tree,
56a52d9a80SChris Mason 			  struct address_space *mapping, gfp_t mask)
57a52d9a80SChris Mason {
58a52d9a80SChris Mason 	tree->map.rb_node = NULL;
59a52d9a80SChris Mason 	tree->state.rb_node = NULL;
60*b888db2bSChris Mason 	tree->fill_delalloc = NULL;
61a52d9a80SChris Mason 	rwlock_init(&tree->lock);
62a52d9a80SChris Mason 	tree->mapping = mapping;
63a52d9a80SChris Mason }
64a52d9a80SChris Mason EXPORT_SYMBOL(extent_map_tree_init);
65a52d9a80SChris Mason 
66a52d9a80SChris Mason struct extent_map *alloc_extent_map(gfp_t mask)
67a52d9a80SChris Mason {
68a52d9a80SChris Mason 	struct extent_map *em;
69a52d9a80SChris Mason 	em = kmem_cache_alloc(extent_map_cache, mask);
70a52d9a80SChris Mason 	if (!em || IS_ERR(em))
71a52d9a80SChris Mason 		return em;
72a52d9a80SChris Mason 	em->in_tree = 0;
73a52d9a80SChris Mason 	atomic_set(&em->refs, 1);
74a52d9a80SChris Mason 	return em;
75a52d9a80SChris Mason }
76a52d9a80SChris Mason EXPORT_SYMBOL(alloc_extent_map);
77a52d9a80SChris Mason 
78a52d9a80SChris Mason void free_extent_map(struct extent_map *em)
79a52d9a80SChris Mason {
80a52d9a80SChris Mason 	if (atomic_dec_and_test(&em->refs)) {
81a52d9a80SChris Mason 		WARN_ON(em->in_tree);
82a52d9a80SChris Mason 		kmem_cache_free(extent_map_cache, em);
83a52d9a80SChris Mason 	}
84a52d9a80SChris Mason }
85a52d9a80SChris Mason EXPORT_SYMBOL(free_extent_map);
86a52d9a80SChris Mason 
87a52d9a80SChris Mason 
88a52d9a80SChris Mason struct extent_state *alloc_extent_state(gfp_t mask)
89a52d9a80SChris Mason {
90a52d9a80SChris Mason 	struct extent_state *state;
91a52d9a80SChris Mason 	state = kmem_cache_alloc(extent_state_cache, mask);
92a52d9a80SChris Mason 	if (!state || IS_ERR(state))
93a52d9a80SChris Mason 		return state;
94a52d9a80SChris Mason 	state->state = 0;
95a52d9a80SChris Mason 	state->in_tree = 0;
96a52d9a80SChris Mason 	atomic_set(&state->refs, 1);
97a52d9a80SChris Mason 	init_waitqueue_head(&state->wq);
98a52d9a80SChris Mason 	return state;
99a52d9a80SChris Mason }
100a52d9a80SChris Mason EXPORT_SYMBOL(alloc_extent_state);
101a52d9a80SChris Mason 
102a52d9a80SChris Mason void free_extent_state(struct extent_state *state)
103a52d9a80SChris Mason {
104a52d9a80SChris Mason 	if (atomic_dec_and_test(&state->refs)) {
105a52d9a80SChris Mason 		WARN_ON(state->in_tree);
106a52d9a80SChris Mason 		kmem_cache_free(extent_state_cache, state);
107a52d9a80SChris Mason 	}
108a52d9a80SChris Mason }
109a52d9a80SChris Mason EXPORT_SYMBOL(free_extent_state);
110a52d9a80SChris Mason 
111a52d9a80SChris Mason static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
112a52d9a80SChris Mason 				   struct rb_node *node)
113a52d9a80SChris Mason {
114a52d9a80SChris Mason 	struct rb_node ** p = &root->rb_node;
115a52d9a80SChris Mason 	struct rb_node * parent = NULL;
116a52d9a80SChris Mason 	struct tree_entry *entry;
117a52d9a80SChris Mason 
118a52d9a80SChris Mason 	while(*p) {
119a52d9a80SChris Mason 		parent = *p;
120a52d9a80SChris Mason 		entry = rb_entry(parent, struct tree_entry, rb_node);
121a52d9a80SChris Mason 
122a52d9a80SChris Mason 		if (offset < entry->start)
123a52d9a80SChris Mason 			p = &(*p)->rb_left;
124a52d9a80SChris Mason 		else if (offset > entry->end)
125a52d9a80SChris Mason 			p = &(*p)->rb_right;
126a52d9a80SChris Mason 		else
127a52d9a80SChris Mason 			return parent;
128a52d9a80SChris Mason 	}
129a52d9a80SChris Mason 
130a52d9a80SChris Mason 	entry = rb_entry(node, struct tree_entry, rb_node);
131a52d9a80SChris Mason 	entry->in_tree = 1;
132a52d9a80SChris Mason 	rb_link_node(node, parent, p);
133a52d9a80SChris Mason 	rb_insert_color(node, root);
134a52d9a80SChris Mason 	return NULL;
135a52d9a80SChris Mason }
136a52d9a80SChris Mason 
137a52d9a80SChris Mason static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
138a52d9a80SChris Mason 				   struct rb_node **prev_ret)
139a52d9a80SChris Mason {
140a52d9a80SChris Mason 	struct rb_node * n = root->rb_node;
141a52d9a80SChris Mason 	struct rb_node *prev = NULL;
142a52d9a80SChris Mason 	struct tree_entry *entry;
143a52d9a80SChris Mason 	struct tree_entry *prev_entry = NULL;
144a52d9a80SChris Mason 
145a52d9a80SChris Mason 	while(n) {
146a52d9a80SChris Mason 		entry = rb_entry(n, struct tree_entry, rb_node);
147a52d9a80SChris Mason 		prev = n;
148a52d9a80SChris Mason 		prev_entry = entry;
149a52d9a80SChris Mason 
150a52d9a80SChris Mason 		if (offset < entry->start)
151a52d9a80SChris Mason 			n = n->rb_left;
152a52d9a80SChris Mason 		else if (offset > entry->end)
153a52d9a80SChris Mason 			n = n->rb_right;
154a52d9a80SChris Mason 		else
155a52d9a80SChris Mason 			return n;
156a52d9a80SChris Mason 	}
157a52d9a80SChris Mason 	if (!prev_ret)
158a52d9a80SChris Mason 		return NULL;
159a52d9a80SChris Mason 	while(prev && offset > prev_entry->end) {
160a52d9a80SChris Mason 		prev = rb_next(prev);
161a52d9a80SChris Mason 		prev_entry = rb_entry(prev, struct tree_entry, rb_node);
162a52d9a80SChris Mason 	}
163a52d9a80SChris Mason 	*prev_ret = prev;
164a52d9a80SChris Mason 	return NULL;
165a52d9a80SChris Mason }
166a52d9a80SChris Mason 
167a52d9a80SChris Mason static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
168a52d9a80SChris Mason {
169a52d9a80SChris Mason 	struct rb_node *prev;
170a52d9a80SChris Mason 	struct rb_node *ret;
171a52d9a80SChris Mason 	ret = __tree_search(root, offset, &prev);
172a52d9a80SChris Mason 	if (!ret)
173a52d9a80SChris Mason 		return prev;
174a52d9a80SChris Mason 	return ret;
175a52d9a80SChris Mason }
176a52d9a80SChris Mason 
177a52d9a80SChris Mason static int tree_delete(struct rb_root *root, u64 offset)
178a52d9a80SChris Mason {
179a52d9a80SChris Mason 	struct rb_node *node;
180a52d9a80SChris Mason 	struct tree_entry *entry;
181a52d9a80SChris Mason 
182a52d9a80SChris Mason 	node = __tree_search(root, offset, NULL);
183a52d9a80SChris Mason 	if (!node)
184a52d9a80SChris Mason 		return -ENOENT;
185a52d9a80SChris Mason 	entry = rb_entry(node, struct tree_entry, rb_node);
186a52d9a80SChris Mason 	entry->in_tree = 0;
187a52d9a80SChris Mason 	rb_erase(node, root);
188a52d9a80SChris Mason 	return 0;
189a52d9a80SChris Mason }
190a52d9a80SChris Mason 
191a52d9a80SChris Mason /*
192a52d9a80SChris Mason  * add_extent_mapping tries a simple backward merge with existing
193a52d9a80SChris Mason  * mappings.  The extent_map struct passed in will be inserted into
194a52d9a80SChris Mason  * the tree directly (no copies made, just a reference taken).
195a52d9a80SChris Mason  */
196a52d9a80SChris Mason int add_extent_mapping(struct extent_map_tree *tree,
197a52d9a80SChris Mason 		       struct extent_map *em)
198a52d9a80SChris Mason {
199a52d9a80SChris Mason 	int ret = 0;
200a52d9a80SChris Mason 	struct extent_map *prev = NULL;
201a52d9a80SChris Mason 	struct rb_node *rb;
202a52d9a80SChris Mason 
203a52d9a80SChris Mason 	write_lock_irq(&tree->lock);
204a52d9a80SChris Mason 	rb = tree_insert(&tree->map, em->end, &em->rb_node);
205a52d9a80SChris Mason 	if (rb) {
206a52d9a80SChris Mason 		prev = rb_entry(rb, struct extent_map, rb_node);
207a52d9a80SChris Mason 		printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
208a52d9a80SChris Mason 		ret = -EEXIST;
209a52d9a80SChris Mason 		goto out;
210a52d9a80SChris Mason 	}
211a52d9a80SChris Mason 	atomic_inc(&em->refs);
212a52d9a80SChris Mason 	if (em->start != 0) {
213a52d9a80SChris Mason 		rb = rb_prev(&em->rb_node);
214a52d9a80SChris Mason 		if (rb)
215a52d9a80SChris Mason 			prev = rb_entry(rb, struct extent_map, rb_node);
216a52d9a80SChris Mason 		if (prev && prev->end + 1 == em->start &&
217a52d9a80SChris Mason 		    ((em->block_start == 0 && prev->block_start == 0) ||
218a52d9a80SChris Mason 			     (em->block_start == prev->block_end + 1))) {
219a52d9a80SChris Mason 			em->start = prev->start;
220a52d9a80SChris Mason 			em->block_start = prev->block_start;
221a52d9a80SChris Mason 			rb_erase(&prev->rb_node, &tree->map);
222a52d9a80SChris Mason 			prev->in_tree = 0;
223a52d9a80SChris Mason 			free_extent_map(prev);
224a52d9a80SChris Mason 		}
225a52d9a80SChris Mason 	 }
226a52d9a80SChris Mason out:
227a52d9a80SChris Mason 	write_unlock_irq(&tree->lock);
228a52d9a80SChris Mason 	return ret;
229a52d9a80SChris Mason }
230a52d9a80SChris Mason EXPORT_SYMBOL(add_extent_mapping);
231a52d9a80SChris Mason 
232a52d9a80SChris Mason /*
233a52d9a80SChris Mason  * lookup_extent_mapping returns the first extent_map struct in the
234a52d9a80SChris Mason  * tree that intersects the [start, end] (inclusive) range.  There may
235a52d9a80SChris Mason  * be additional objects in the tree that intersect, so check the object
236a52d9a80SChris Mason  * returned carefully to make sure you don't need additional lookups.
237a52d9a80SChris Mason  */
238a52d9a80SChris Mason struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
239a52d9a80SChris Mason 					 u64 start, u64 end)
240a52d9a80SChris Mason {
241a52d9a80SChris Mason 	struct extent_map *em;
242a52d9a80SChris Mason 	struct rb_node *rb_node;
243a52d9a80SChris Mason 
244a52d9a80SChris Mason 	read_lock_irq(&tree->lock);
245a52d9a80SChris Mason 	rb_node = tree_search(&tree->map, start);
246a52d9a80SChris Mason 	if (!rb_node) {
247a52d9a80SChris Mason 		em = NULL;
248a52d9a80SChris Mason 		goto out;
249a52d9a80SChris Mason 	}
250a52d9a80SChris Mason 	if (IS_ERR(rb_node)) {
251a52d9a80SChris Mason 		em = ERR_PTR(PTR_ERR(rb_node));
252a52d9a80SChris Mason 		goto out;
253a52d9a80SChris Mason 	}
254a52d9a80SChris Mason 	em = rb_entry(rb_node, struct extent_map, rb_node);
255a52d9a80SChris Mason 	if (em->end < start || em->start > end) {
256a52d9a80SChris Mason 		em = NULL;
257a52d9a80SChris Mason 		goto out;
258a52d9a80SChris Mason 	}
259a52d9a80SChris Mason 	atomic_inc(&em->refs);
260a52d9a80SChris Mason out:
261a52d9a80SChris Mason 	read_unlock_irq(&tree->lock);
262a52d9a80SChris Mason 	return em;
263a52d9a80SChris Mason }
264a52d9a80SChris Mason EXPORT_SYMBOL(lookup_extent_mapping);
265a52d9a80SChris Mason 
266a52d9a80SChris Mason /*
267a52d9a80SChris Mason  * removes an extent_map struct from the tree.  No reference counts are
268a52d9a80SChris Mason  * dropped, and no checks are done to  see if the range is in use
269a52d9a80SChris Mason  */
270a52d9a80SChris Mason int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
271a52d9a80SChris Mason {
272a52d9a80SChris Mason 	int ret;
273a52d9a80SChris Mason 
274a52d9a80SChris Mason 	write_lock_irq(&tree->lock);
275a52d9a80SChris Mason 	ret = tree_delete(&tree->map, em->end);
276a52d9a80SChris Mason 	write_unlock_irq(&tree->lock);
277a52d9a80SChris Mason 	return ret;
278a52d9a80SChris Mason }
279a52d9a80SChris Mason EXPORT_SYMBOL(remove_extent_mapping);
280a52d9a80SChris Mason 
281a52d9a80SChris Mason /*
282a52d9a80SChris Mason  * utility function to look for merge candidates inside a given range.
283a52d9a80SChris Mason  * Any extents with matching state are merged together into a single
284a52d9a80SChris Mason  * extent in the tree.  Extents with EXTENT_IO in their state field
285a52d9a80SChris Mason  * are not merged because the end_io handlers need to be able to do
286a52d9a80SChris Mason  * operations on them without sleeping (or doing allocations/splits).
287a52d9a80SChris Mason  *
288a52d9a80SChris Mason  * This should be called with the tree lock held.
289a52d9a80SChris Mason  */
290a52d9a80SChris Mason static int merge_state(struct extent_map_tree *tree,
291a52d9a80SChris Mason 		       struct extent_state *state)
292a52d9a80SChris Mason {
293a52d9a80SChris Mason 	struct extent_state *other;
294a52d9a80SChris Mason 	struct rb_node *other_node;
295a52d9a80SChris Mason 
296a52d9a80SChris Mason 	if (state->state & EXTENT_IOBITS)
297a52d9a80SChris Mason 		return 0;
298a52d9a80SChris Mason 
299a52d9a80SChris Mason 	other_node = rb_prev(&state->rb_node);
300a52d9a80SChris Mason 	if (other_node) {
301a52d9a80SChris Mason 		other = rb_entry(other_node, struct extent_state, rb_node);
302a52d9a80SChris Mason 		if (other->end == state->start - 1 &&
303a52d9a80SChris Mason 		    other->state == state->state) {
304a52d9a80SChris Mason 			state->start = other->start;
305a52d9a80SChris Mason 			other->in_tree = 0;
306a52d9a80SChris Mason 			rb_erase(&other->rb_node, &tree->state);
307a52d9a80SChris Mason 			free_extent_state(other);
308a52d9a80SChris Mason 		}
309a52d9a80SChris Mason 	}
310a52d9a80SChris Mason 	other_node = rb_next(&state->rb_node);
311a52d9a80SChris Mason 	if (other_node) {
312a52d9a80SChris Mason 		other = rb_entry(other_node, struct extent_state, rb_node);
313a52d9a80SChris Mason 		if (other->start == state->end + 1 &&
314a52d9a80SChris Mason 		    other->state == state->state) {
315a52d9a80SChris Mason 			other->start = state->start;
316a52d9a80SChris Mason 			state->in_tree = 0;
317a52d9a80SChris Mason 			rb_erase(&state->rb_node, &tree->state);
318a52d9a80SChris Mason 			free_extent_state(state);
319a52d9a80SChris Mason 		}
320a52d9a80SChris Mason 	}
321a52d9a80SChris Mason 	return 0;
322a52d9a80SChris Mason }
323a52d9a80SChris Mason 
324a52d9a80SChris Mason /*
325a52d9a80SChris Mason  * insert an extent_state struct into the tree.  'bits' are set on the
326a52d9a80SChris Mason  * struct before it is inserted.
327a52d9a80SChris Mason  *
328a52d9a80SChris Mason  * This may return -EEXIST if the extent is already there, in which case the
329a52d9a80SChris Mason  * state struct is freed.
330a52d9a80SChris Mason  *
331a52d9a80SChris Mason  * The tree lock is not taken internally.  This is a utility function and
332a52d9a80SChris Mason  * probably isn't what you want to call (see set/clear_extent_bit).
333a52d9a80SChris Mason  */
334a52d9a80SChris Mason static int insert_state(struct extent_map_tree *tree,
335a52d9a80SChris Mason 			struct extent_state *state, u64 start, u64 end,
336a52d9a80SChris Mason 			int bits)
337a52d9a80SChris Mason {
338a52d9a80SChris Mason 	struct rb_node *node;
339a52d9a80SChris Mason 
340a52d9a80SChris Mason 	if (end < start) {
341a52d9a80SChris Mason 		printk("end < start %Lu %Lu\n", end, start);
342a52d9a80SChris Mason 		WARN_ON(1);
343a52d9a80SChris Mason 	}
344a52d9a80SChris Mason 	state->state |= bits;
345a52d9a80SChris Mason 	state->start = start;
346a52d9a80SChris Mason 	state->end = end;
347a52d9a80SChris Mason 	if ((end & 4095) == 0) {
348a52d9a80SChris Mason 		printk("insert state %Lu %Lu strange end\n", start, end);
349a52d9a80SChris Mason 		WARN_ON(1);
350a52d9a80SChris Mason 	}
351a52d9a80SChris Mason 	node = tree_insert(&tree->state, end, &state->rb_node);
352a52d9a80SChris Mason 	if (node) {
353a52d9a80SChris Mason 		struct extent_state *found;
354a52d9a80SChris Mason 		found = rb_entry(node, struct extent_state, rb_node);
355a52d9a80SChris Mason 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
356a52d9a80SChris Mason 		free_extent_state(state);
357a52d9a80SChris Mason 		return -EEXIST;
358a52d9a80SChris Mason 	}
359a52d9a80SChris Mason 	merge_state(tree, state);
360a52d9a80SChris Mason 	return 0;
361a52d9a80SChris Mason }
362a52d9a80SChris Mason 
363a52d9a80SChris Mason /*
364a52d9a80SChris Mason  * split a given extent state struct in two, inserting the preallocated
365a52d9a80SChris Mason  * struct 'prealloc' as the newly created second half.  'split' indicates an
366a52d9a80SChris Mason  * offset inside 'orig' where it should be split.
367a52d9a80SChris Mason  *
368a52d9a80SChris Mason  * Before calling,
369a52d9a80SChris Mason  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
370a52d9a80SChris Mason  * are two extent state structs in the tree:
371a52d9a80SChris Mason  * prealloc: [orig->start, split - 1]
372a52d9a80SChris Mason  * orig: [ split, orig->end ]
373a52d9a80SChris Mason  *
374a52d9a80SChris Mason  * The tree locks are not taken by this function. They need to be held
375a52d9a80SChris Mason  * by the caller.
376a52d9a80SChris Mason  */
377a52d9a80SChris Mason static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
378a52d9a80SChris Mason 		       struct extent_state *prealloc, u64 split)
379a52d9a80SChris Mason {
380a52d9a80SChris Mason 	struct rb_node *node;
381a52d9a80SChris Mason 	prealloc->start = orig->start;
382a52d9a80SChris Mason 	prealloc->end = split - 1;
383a52d9a80SChris Mason 	prealloc->state = orig->state;
384a52d9a80SChris Mason 	orig->start = split;
385a52d9a80SChris Mason 	if ((prealloc->end & 4095) == 0) {
386a52d9a80SChris Mason 		printk("insert state %Lu %Lu strange end\n", prealloc->start,
387a52d9a80SChris Mason 		       prealloc->end);
388a52d9a80SChris Mason 		WARN_ON(1);
389a52d9a80SChris Mason 	}
390a52d9a80SChris Mason 	node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
391a52d9a80SChris Mason 	if (node) {
392a52d9a80SChris Mason 		struct extent_state *found;
393a52d9a80SChris Mason 		found = rb_entry(node, struct extent_state, rb_node);
394a52d9a80SChris Mason 		printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
395a52d9a80SChris Mason 		free_extent_state(prealloc);
396a52d9a80SChris Mason 		return -EEXIST;
397a52d9a80SChris Mason 	}
398a52d9a80SChris Mason 	return 0;
399a52d9a80SChris Mason }
400a52d9a80SChris Mason 
401a52d9a80SChris Mason /*
402a52d9a80SChris Mason  * utility function to clear some bits in an extent state struct.
403a52d9a80SChris Mason  * it will optionally wake up any one waiting on this state (wake == 1), or
404a52d9a80SChris Mason  * forcibly remove the state from the tree (delete == 1).
405a52d9a80SChris Mason  *
406a52d9a80SChris Mason  * If no bits are set on the state struct after clearing things, the
407a52d9a80SChris Mason  * struct is freed and removed from the tree
408a52d9a80SChris Mason  */
409a52d9a80SChris Mason static int clear_state_bit(struct extent_map_tree *tree,
410a52d9a80SChris Mason 			    struct extent_state *state, int bits, int wake,
411a52d9a80SChris Mason 			    int delete)
412a52d9a80SChris Mason {
413a52d9a80SChris Mason 	int ret = state->state & bits;
414a52d9a80SChris Mason 	state->state &= ~bits;
415a52d9a80SChris Mason 	if (wake)
416a52d9a80SChris Mason 		wake_up(&state->wq);
417a52d9a80SChris Mason 	if (delete || state->state == 0) {
418a52d9a80SChris Mason 		if (state->in_tree) {
419a52d9a80SChris Mason 			rb_erase(&state->rb_node, &tree->state);
420a52d9a80SChris Mason 			state->in_tree = 0;
421a52d9a80SChris Mason 			free_extent_state(state);
422a52d9a80SChris Mason 		} else {
423a52d9a80SChris Mason 			WARN_ON(1);
424a52d9a80SChris Mason 		}
425a52d9a80SChris Mason 	} else {
426a52d9a80SChris Mason 		merge_state(tree, state);
427a52d9a80SChris Mason 	}
428a52d9a80SChris Mason 	return ret;
429a52d9a80SChris Mason }
430a52d9a80SChris Mason 
431a52d9a80SChris Mason /*
432a52d9a80SChris Mason  * clear some bits on a range in the tree.  This may require splitting
433a52d9a80SChris Mason  * or inserting elements in the tree, so the gfp mask is used to
434a52d9a80SChris Mason  * indicate which allocations or sleeping are allowed.
435a52d9a80SChris Mason  *
436a52d9a80SChris Mason  * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
437a52d9a80SChris Mason  * the given range from the tree regardless of state (ie for truncate).
438a52d9a80SChris Mason  *
439a52d9a80SChris Mason  * the range [start, end] is inclusive.
440a52d9a80SChris Mason  *
441a52d9a80SChris Mason  * This takes the tree lock, and returns < 0 on error, > 0 if any of the
442a52d9a80SChris Mason  * bits were already set, or zero if none of the bits were already set.
443a52d9a80SChris Mason  */
444a52d9a80SChris Mason int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
445a52d9a80SChris Mason 		     int bits, int wake, int delete, gfp_t mask)
446a52d9a80SChris Mason {
447a52d9a80SChris Mason 	struct extent_state *state;
448a52d9a80SChris Mason 	struct extent_state *prealloc = NULL;
449a52d9a80SChris Mason 	struct rb_node *node;
450a52d9a80SChris Mason 	int err;
451a52d9a80SChris Mason 	int set = 0;
452a52d9a80SChris Mason 
453a52d9a80SChris Mason again:
454a52d9a80SChris Mason 	if (!prealloc && (mask & __GFP_WAIT)) {
455a52d9a80SChris Mason 		prealloc = alloc_extent_state(mask);
456a52d9a80SChris Mason 		if (!prealloc)
457a52d9a80SChris Mason 			return -ENOMEM;
458a52d9a80SChris Mason 	}
459a52d9a80SChris Mason 
460a52d9a80SChris Mason 	write_lock_irq(&tree->lock);
461a52d9a80SChris Mason 	/*
462a52d9a80SChris Mason 	 * this search will find the extents that end after
463a52d9a80SChris Mason 	 * our range starts
464a52d9a80SChris Mason 	 */
465a52d9a80SChris Mason 	node = tree_search(&tree->state, start);
466a52d9a80SChris Mason 	if (!node)
467a52d9a80SChris Mason 		goto out;
468a52d9a80SChris Mason 	state = rb_entry(node, struct extent_state, rb_node);
469a52d9a80SChris Mason 	if (state->start > end)
470a52d9a80SChris Mason 		goto out;
471a52d9a80SChris Mason 	WARN_ON(state->end < start);
472a52d9a80SChris Mason 
473a52d9a80SChris Mason 	/*
474a52d9a80SChris Mason 	 *     | ---- desired range ---- |
475a52d9a80SChris Mason 	 *  | state | or
476a52d9a80SChris Mason 	 *  | ------------- state -------------- |
477a52d9a80SChris Mason 	 *
478a52d9a80SChris Mason 	 * We need to split the extent we found, and may flip
479a52d9a80SChris Mason 	 * bits on second half.
480a52d9a80SChris Mason 	 *
481a52d9a80SChris Mason 	 * If the extent we found extends past our range, we
482a52d9a80SChris Mason 	 * just split and search again.  It'll get split again
483a52d9a80SChris Mason 	 * the next time though.
484a52d9a80SChris Mason 	 *
485a52d9a80SChris Mason 	 * If the extent we found is inside our range, we clear
486a52d9a80SChris Mason 	 * the desired bit on it.
487a52d9a80SChris Mason 	 */
488a52d9a80SChris Mason 
489a52d9a80SChris Mason 	if (state->start < start) {
490a52d9a80SChris Mason 		err = split_state(tree, state, prealloc, start);
491a52d9a80SChris Mason 		BUG_ON(err == -EEXIST);
492a52d9a80SChris Mason 		prealloc = NULL;
493a52d9a80SChris Mason 		if (err)
494a52d9a80SChris Mason 			goto out;
495a52d9a80SChris Mason 		if (state->end <= end) {
496a52d9a80SChris Mason 			start = state->end + 1;
497a52d9a80SChris Mason 			set |= clear_state_bit(tree, state, bits,
498a52d9a80SChris Mason 					wake, delete);
499a52d9a80SChris Mason 		} else {
500a52d9a80SChris Mason 			start = state->start;
501a52d9a80SChris Mason 		}
502a52d9a80SChris Mason 		goto search_again;
503a52d9a80SChris Mason 	}
504a52d9a80SChris Mason 	/*
505a52d9a80SChris Mason 	 * | ---- desired range ---- |
506a52d9a80SChris Mason 	 *                        | state |
507a52d9a80SChris Mason 	 * We need to split the extent, and clear the bit
508a52d9a80SChris Mason 	 * on the first half
509a52d9a80SChris Mason 	 */
510a52d9a80SChris Mason 	if (state->start <= end && state->end > end) {
511a52d9a80SChris Mason 		err = split_state(tree, state, prealloc, end + 1);
512a52d9a80SChris Mason 		BUG_ON(err == -EEXIST);
513a52d9a80SChris Mason 
514a52d9a80SChris Mason 		if (wake)
515a52d9a80SChris Mason 			wake_up(&state->wq);
516a52d9a80SChris Mason 		set |= clear_state_bit(tree, prealloc, bits,
517a52d9a80SChris Mason 				       wake, delete);
518a52d9a80SChris Mason 		prealloc = NULL;
519a52d9a80SChris Mason 		goto out;
520a52d9a80SChris Mason 	}
521a52d9a80SChris Mason 
522a52d9a80SChris Mason 	start = state->end + 1;
523a52d9a80SChris Mason 	set |= clear_state_bit(tree, state, bits, wake, delete);
524a52d9a80SChris Mason 	goto search_again;
525a52d9a80SChris Mason 
526a52d9a80SChris Mason out:
527a52d9a80SChris Mason 	write_unlock_irq(&tree->lock);
528a52d9a80SChris Mason 	if (prealloc)
529a52d9a80SChris Mason 		free_extent_state(prealloc);
530a52d9a80SChris Mason 
531a52d9a80SChris Mason 	return set;
532a52d9a80SChris Mason 
533a52d9a80SChris Mason search_again:
534a52d9a80SChris Mason 	if (start >= end)
535a52d9a80SChris Mason 		goto out;
536a52d9a80SChris Mason 	write_unlock_irq(&tree->lock);
537a52d9a80SChris Mason 	if (mask & __GFP_WAIT)
538a52d9a80SChris Mason 		cond_resched();
539a52d9a80SChris Mason 	goto again;
540a52d9a80SChris Mason }
541a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_bit);
542a52d9a80SChris Mason 
543a52d9a80SChris Mason static int wait_on_state(struct extent_map_tree *tree,
544a52d9a80SChris Mason 			 struct extent_state *state)
545a52d9a80SChris Mason {
546a52d9a80SChris Mason 	DEFINE_WAIT(wait);
547a52d9a80SChris Mason 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
548a52d9a80SChris Mason 	read_unlock_irq(&tree->lock);
549a52d9a80SChris Mason 	schedule();
550a52d9a80SChris Mason 	read_lock_irq(&tree->lock);
551a52d9a80SChris Mason 	finish_wait(&state->wq, &wait);
552a52d9a80SChris Mason 	return 0;
553a52d9a80SChris Mason }
554a52d9a80SChris Mason 
555a52d9a80SChris Mason /*
556a52d9a80SChris Mason  * waits for one or more bits to clear on a range in the state tree.
557a52d9a80SChris Mason  * The range [start, end] is inclusive.
558a52d9a80SChris Mason  * The tree lock is taken by this function
559a52d9a80SChris Mason  */
560a52d9a80SChris Mason int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
561a52d9a80SChris Mason {
562a52d9a80SChris Mason 	struct extent_state *state;
563a52d9a80SChris Mason 	struct rb_node *node;
564a52d9a80SChris Mason 
565a52d9a80SChris Mason 	read_lock_irq(&tree->lock);
566a52d9a80SChris Mason again:
567a52d9a80SChris Mason 	while (1) {
568a52d9a80SChris Mason 		/*
569a52d9a80SChris Mason 		 * this search will find all the extents that end after
570a52d9a80SChris Mason 		 * our range starts
571a52d9a80SChris Mason 		 */
572a52d9a80SChris Mason 		node = tree_search(&tree->state, start);
573a52d9a80SChris Mason 		if (!node)
574a52d9a80SChris Mason 			break;
575a52d9a80SChris Mason 
576a52d9a80SChris Mason 		state = rb_entry(node, struct extent_state, rb_node);
577a52d9a80SChris Mason 
578a52d9a80SChris Mason 		if (state->start > end)
579a52d9a80SChris Mason 			goto out;
580a52d9a80SChris Mason 
581a52d9a80SChris Mason 		if (state->state & bits) {
582a52d9a80SChris Mason 			start = state->start;
583a52d9a80SChris Mason 			atomic_inc(&state->refs);
584a52d9a80SChris Mason 			wait_on_state(tree, state);
585a52d9a80SChris Mason 			free_extent_state(state);
586a52d9a80SChris Mason 			goto again;
587a52d9a80SChris Mason 		}
588a52d9a80SChris Mason 		start = state->end + 1;
589a52d9a80SChris Mason 
590a52d9a80SChris Mason 		if (start > end)
591a52d9a80SChris Mason 			break;
592a52d9a80SChris Mason 
593a52d9a80SChris Mason 		if (need_resched()) {
594a52d9a80SChris Mason 			read_unlock_irq(&tree->lock);
595a52d9a80SChris Mason 			cond_resched();
596a52d9a80SChris Mason 			read_lock_irq(&tree->lock);
597a52d9a80SChris Mason 		}
598a52d9a80SChris Mason 	}
599a52d9a80SChris Mason out:
600a52d9a80SChris Mason 	read_unlock_irq(&tree->lock);
601a52d9a80SChris Mason 	return 0;
602a52d9a80SChris Mason }
603a52d9a80SChris Mason EXPORT_SYMBOL(wait_extent_bit);
604a52d9a80SChris Mason 
605a52d9a80SChris Mason /*
606a52d9a80SChris Mason  * set some bits on a range in the tree.  This may require allocations
607a52d9a80SChris Mason  * or sleeping, so the gfp mask is used to indicate what is allowed.
608a52d9a80SChris Mason  *
609a52d9a80SChris Mason  * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
610a52d9a80SChris Mason  * range already has the desired bits set.  The start of the existing
611a52d9a80SChris Mason  * range is returned in failed_start in this case.
612a52d9a80SChris Mason  *
613a52d9a80SChris Mason  * [start, end] is inclusive
614a52d9a80SChris Mason  * This takes the tree lock.
615a52d9a80SChris Mason  */
616a52d9a80SChris Mason int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
617a52d9a80SChris Mason 		   int exclusive, u64 *failed_start, gfp_t mask)
618a52d9a80SChris Mason {
619a52d9a80SChris Mason 	struct extent_state *state;
620a52d9a80SChris Mason 	struct extent_state *prealloc = NULL;
621a52d9a80SChris Mason 	struct rb_node *node;
622a52d9a80SChris Mason 	int err = 0;
623a52d9a80SChris Mason 	int set;
624a52d9a80SChris Mason 	u64 last_start;
625a52d9a80SChris Mason 	u64 last_end;
626a52d9a80SChris Mason again:
627a52d9a80SChris Mason 	if (!prealloc && (mask & __GFP_WAIT)) {
628a52d9a80SChris Mason 		prealloc = alloc_extent_state(mask);
629a52d9a80SChris Mason 		if (!prealloc)
630a52d9a80SChris Mason 			return -ENOMEM;
631a52d9a80SChris Mason 	}
632a52d9a80SChris Mason 
633a52d9a80SChris Mason 	write_lock_irq(&tree->lock);
634a52d9a80SChris Mason 	/*
635a52d9a80SChris Mason 	 * this search will find all the extents that end after
636a52d9a80SChris Mason 	 * our range starts.
637a52d9a80SChris Mason 	 */
638a52d9a80SChris Mason 	node = tree_search(&tree->state, start);
639a52d9a80SChris Mason 	if (!node) {
640a52d9a80SChris Mason 		err = insert_state(tree, prealloc, start, end, bits);
641a52d9a80SChris Mason 		prealloc = NULL;
642a52d9a80SChris Mason 		BUG_ON(err == -EEXIST);
643a52d9a80SChris Mason 		goto out;
644a52d9a80SChris Mason 	}
645a52d9a80SChris Mason 
646a52d9a80SChris Mason 	state = rb_entry(node, struct extent_state, rb_node);
647a52d9a80SChris Mason 	last_start = state->start;
648a52d9a80SChris Mason 	last_end = state->end;
649a52d9a80SChris Mason 
650a52d9a80SChris Mason 	/*
651a52d9a80SChris Mason 	 * | ---- desired range ---- |
652a52d9a80SChris Mason 	 * | state |
653a52d9a80SChris Mason 	 *
654a52d9a80SChris Mason 	 * Just lock what we found and keep going
655a52d9a80SChris Mason 	 */
656a52d9a80SChris Mason 	if (state->start == start && state->end <= end) {
657a52d9a80SChris Mason 		set = state->state & bits;
658a52d9a80SChris Mason 		if (set && exclusive) {
659a52d9a80SChris Mason 			*failed_start = state->start;
660a52d9a80SChris Mason 			err = -EEXIST;
661a52d9a80SChris Mason 			goto out;
662a52d9a80SChris Mason 		}
663a52d9a80SChris Mason 		state->state |= bits;
664a52d9a80SChris Mason 		start = state->end + 1;
665a52d9a80SChris Mason 		merge_state(tree, state);
666a52d9a80SChris Mason 		goto search_again;
667a52d9a80SChris Mason 	}
668a52d9a80SChris Mason 
669a52d9a80SChris Mason 	/*
670a52d9a80SChris Mason 	 *     | ---- desired range ---- |
671a52d9a80SChris Mason 	 * | state |
672a52d9a80SChris Mason 	 *   or
673a52d9a80SChris Mason 	 * | ------------- state -------------- |
674a52d9a80SChris Mason 	 *
675a52d9a80SChris Mason 	 * We need to split the extent we found, and may flip bits on
676a52d9a80SChris Mason 	 * second half.
677a52d9a80SChris Mason 	 *
678a52d9a80SChris Mason 	 * If the extent we found extends past our
679a52d9a80SChris Mason 	 * range, we just split and search again.  It'll get split
680a52d9a80SChris Mason 	 * again the next time though.
681a52d9a80SChris Mason 	 *
682a52d9a80SChris Mason 	 * If the extent we found is inside our range, we set the
683a52d9a80SChris Mason 	 * desired bit on it.
684a52d9a80SChris Mason 	 */
685a52d9a80SChris Mason 	if (state->start < start) {
686a52d9a80SChris Mason 		set = state->state & bits;
687a52d9a80SChris Mason 		if (exclusive && set) {
688a52d9a80SChris Mason 			*failed_start = start;
689a52d9a80SChris Mason 			err = -EEXIST;
690a52d9a80SChris Mason 			goto out;
691a52d9a80SChris Mason 		}
692a52d9a80SChris Mason 		err = split_state(tree, state, prealloc, start);
693a52d9a80SChris Mason 		BUG_ON(err == -EEXIST);
694a52d9a80SChris Mason 		prealloc = NULL;
695a52d9a80SChris Mason 		if (err)
696a52d9a80SChris Mason 			goto out;
697a52d9a80SChris Mason 		if (state->end <= end) {
698a52d9a80SChris Mason 			state->state |= bits;
699a52d9a80SChris Mason 			start = state->end + 1;
700a52d9a80SChris Mason 			merge_state(tree, state);
701a52d9a80SChris Mason 		} else {
702a52d9a80SChris Mason 			start = state->start;
703a52d9a80SChris Mason 		}
704a52d9a80SChris Mason 		goto search_again;
705a52d9a80SChris Mason 	}
706a52d9a80SChris Mason 	/*
707a52d9a80SChris Mason 	 * | ---- desired range ---- |
708a52d9a80SChris Mason 	 *                        | state |
709a52d9a80SChris Mason 	 * We need to split the extent, and set the bit
710a52d9a80SChris Mason 	 * on the first half
711a52d9a80SChris Mason 	 */
712a52d9a80SChris Mason 	if (state->start <= end && state->end > end) {
713a52d9a80SChris Mason 		set = state->state & bits;
714a52d9a80SChris Mason 		if (exclusive && set) {
715a52d9a80SChris Mason 			*failed_start = start;
716a52d9a80SChris Mason 			err = -EEXIST;
717a52d9a80SChris Mason 			goto out;
718a52d9a80SChris Mason 		}
719a52d9a80SChris Mason 		err = split_state(tree, state, prealloc, end + 1);
720a52d9a80SChris Mason 		BUG_ON(err == -EEXIST);
721a52d9a80SChris Mason 
722a52d9a80SChris Mason 		prealloc->state |= bits;
723a52d9a80SChris Mason 		merge_state(tree, prealloc);
724a52d9a80SChris Mason 		prealloc = NULL;
725a52d9a80SChris Mason 		goto out;
726a52d9a80SChris Mason 	}
727a52d9a80SChris Mason 
728a52d9a80SChris Mason 	/*
729a52d9a80SChris Mason 	 * | ---- desired range ---- |
730a52d9a80SChris Mason 	 *     | state | or               | state |
731a52d9a80SChris Mason 	 *
732a52d9a80SChris Mason 	 * There's a hole, we need to insert something in it and
733a52d9a80SChris Mason 	 * ignore the extent we found.
734a52d9a80SChris Mason 	 */
735a52d9a80SChris Mason 	if (state->start > start) {
736a52d9a80SChris Mason 		u64 this_end;
737a52d9a80SChris Mason 		if (end < last_start)
738a52d9a80SChris Mason 			this_end = end;
739a52d9a80SChris Mason 		else
740a52d9a80SChris Mason 			this_end = last_start -1;
741a52d9a80SChris Mason 		err = insert_state(tree, prealloc, start, this_end,
742a52d9a80SChris Mason 				   bits);
743a52d9a80SChris Mason 		prealloc = NULL;
744a52d9a80SChris Mason 		BUG_ON(err == -EEXIST);
745a52d9a80SChris Mason 		if (err)
746a52d9a80SChris Mason 			goto out;
747a52d9a80SChris Mason 		start = this_end + 1;
748a52d9a80SChris Mason 		goto search_again;
749a52d9a80SChris Mason 	}
750a52d9a80SChris Mason 	goto search_again;
751a52d9a80SChris Mason 
752a52d9a80SChris Mason out:
753a52d9a80SChris Mason 	write_unlock_irq(&tree->lock);
754a52d9a80SChris Mason 	if (prealloc)
755a52d9a80SChris Mason 		free_extent_state(prealloc);
756a52d9a80SChris Mason 
757a52d9a80SChris Mason 	return err;
758a52d9a80SChris Mason 
759a52d9a80SChris Mason search_again:
760a52d9a80SChris Mason 	if (start > end)
761a52d9a80SChris Mason 		goto out;
762a52d9a80SChris Mason 	write_unlock_irq(&tree->lock);
763a52d9a80SChris Mason 	if (mask & __GFP_WAIT)
764a52d9a80SChris Mason 		cond_resched();
765a52d9a80SChris Mason 	goto again;
766a52d9a80SChris Mason }
767a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_bit);
768a52d9a80SChris Mason 
769a52d9a80SChris Mason /* wrappers around set/clear extent bit */
770a52d9a80SChris Mason int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
771a52d9a80SChris Mason 		     gfp_t mask)
772a52d9a80SChris Mason {
773a52d9a80SChris Mason 	return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
774a52d9a80SChris Mason 			      mask);
775a52d9a80SChris Mason }
776a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_dirty);
777a52d9a80SChris Mason 
778*b888db2bSChris Mason int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
779*b888db2bSChris Mason 		     gfp_t mask)
780*b888db2bSChris Mason {
781*b888db2bSChris Mason 	return set_extent_bit(tree, start, end,
782*b888db2bSChris Mason 			      EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
783*b888db2bSChris Mason 			      mask);
784*b888db2bSChris Mason }
785*b888db2bSChris Mason EXPORT_SYMBOL(set_extent_delalloc);
786*b888db2bSChris Mason 
787a52d9a80SChris Mason int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
788a52d9a80SChris Mason 		       gfp_t mask)
789a52d9a80SChris Mason {
790*b888db2bSChris Mason 	return clear_extent_bit(tree, start, end,
791*b888db2bSChris Mason 				EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
792a52d9a80SChris Mason }
793a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_dirty);
794a52d9a80SChris Mason 
795a52d9a80SChris Mason int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
796a52d9a80SChris Mason 		     gfp_t mask)
797a52d9a80SChris Mason {
798a52d9a80SChris Mason 	return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
799a52d9a80SChris Mason 			      mask);
800a52d9a80SChris Mason }
801a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_new);
802a52d9a80SChris Mason 
803a52d9a80SChris Mason int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
804a52d9a80SChris Mason 		       gfp_t mask)
805a52d9a80SChris Mason {
806a52d9a80SChris Mason 	return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
807a52d9a80SChris Mason }
808a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_new);
809a52d9a80SChris Mason 
810a52d9a80SChris Mason int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
811a52d9a80SChris Mason 			gfp_t mask)
812a52d9a80SChris Mason {
813a52d9a80SChris Mason 	return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
814a52d9a80SChris Mason 			      mask);
815a52d9a80SChris Mason }
816a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_uptodate);
817a52d9a80SChris Mason 
818a52d9a80SChris Mason int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
819a52d9a80SChris Mason 			  gfp_t mask)
820a52d9a80SChris Mason {
821a52d9a80SChris Mason 	return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
822a52d9a80SChris Mason }
823a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_uptodate);
824a52d9a80SChris Mason 
825a52d9a80SChris Mason int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
826a52d9a80SChris Mason 			 gfp_t mask)
827a52d9a80SChris Mason {
828a52d9a80SChris Mason 	return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
829a52d9a80SChris Mason 			      0, NULL, mask);
830a52d9a80SChris Mason }
831a52d9a80SChris Mason EXPORT_SYMBOL(set_extent_writeback);
832a52d9a80SChris Mason 
833a52d9a80SChris Mason int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
834a52d9a80SChris Mason 			   gfp_t mask)
835a52d9a80SChris Mason {
836a52d9a80SChris Mason 	return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
837a52d9a80SChris Mason }
838a52d9a80SChris Mason EXPORT_SYMBOL(clear_extent_writeback);
839a52d9a80SChris Mason 
840a52d9a80SChris Mason int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
841a52d9a80SChris Mason {
842a52d9a80SChris Mason 	return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
843a52d9a80SChris Mason }
844a52d9a80SChris Mason EXPORT_SYMBOL(wait_on_extent_writeback);
845a52d9a80SChris Mason 
846a52d9a80SChris Mason /*
847a52d9a80SChris Mason  * locks a range in ascending order, waiting for any locked regions
848a52d9a80SChris Mason  * it hits on the way.  [start,end] are inclusive, and this will sleep.
849a52d9a80SChris Mason  */
850a52d9a80SChris Mason int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
851a52d9a80SChris Mason {
852a52d9a80SChris Mason 	int err;
853a52d9a80SChris Mason 	u64 failed_start;
854a52d9a80SChris Mason 	while (1) {
855a52d9a80SChris Mason 		err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
856a52d9a80SChris Mason 				     &failed_start, mask);
857a52d9a80SChris Mason 		if (err == -EEXIST && (mask & __GFP_WAIT)) {
858a52d9a80SChris Mason 			wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
859a52d9a80SChris Mason 			start = failed_start;
860a52d9a80SChris Mason 		} else {
861a52d9a80SChris Mason 			break;
862a52d9a80SChris Mason 		}
863a52d9a80SChris Mason 		WARN_ON(start > end);
864a52d9a80SChris Mason 	}
865a52d9a80SChris Mason 	return err;
866a52d9a80SChris Mason }
867a52d9a80SChris Mason EXPORT_SYMBOL(lock_extent);
868a52d9a80SChris Mason 
869a52d9a80SChris Mason int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
870a52d9a80SChris Mason 		  gfp_t mask)
871a52d9a80SChris Mason {
872a52d9a80SChris Mason 	return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
873a52d9a80SChris Mason }
874a52d9a80SChris Mason EXPORT_SYMBOL(unlock_extent);
875a52d9a80SChris Mason 
876a52d9a80SChris Mason /*
877a52d9a80SChris Mason  * helper function to set pages and extents in the tree dirty
878a52d9a80SChris Mason  */
879a52d9a80SChris Mason int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
880a52d9a80SChris Mason {
881a52d9a80SChris Mason 	unsigned long index = start >> PAGE_CACHE_SHIFT;
882a52d9a80SChris Mason 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
883a52d9a80SChris Mason 	struct page *page;
884a52d9a80SChris Mason 
885a52d9a80SChris Mason 	while (index <= end_index) {
886a52d9a80SChris Mason 		page = find_get_page(tree->mapping, index);
887a52d9a80SChris Mason 		BUG_ON(!page);
888a52d9a80SChris Mason 		__set_page_dirty_nobuffers(page);
889a52d9a80SChris Mason 		page_cache_release(page);
890a52d9a80SChris Mason 		index++;
891a52d9a80SChris Mason 	}
892a52d9a80SChris Mason 	set_extent_dirty(tree, start, end, GFP_NOFS);
893a52d9a80SChris Mason 	return 0;
894a52d9a80SChris Mason }
895a52d9a80SChris Mason EXPORT_SYMBOL(set_range_dirty);
896a52d9a80SChris Mason 
897a52d9a80SChris Mason /*
898a52d9a80SChris Mason  * helper function to set both pages and extents in the tree writeback
899a52d9a80SChris Mason  */
900a52d9a80SChris Mason int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
901a52d9a80SChris Mason {
902a52d9a80SChris Mason 	unsigned long index = start >> PAGE_CACHE_SHIFT;
903a52d9a80SChris Mason 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
904a52d9a80SChris Mason 	struct page *page;
905a52d9a80SChris Mason 
906a52d9a80SChris Mason 	while (index <= end_index) {
907a52d9a80SChris Mason 		page = find_get_page(tree->mapping, index);
908a52d9a80SChris Mason 		BUG_ON(!page);
909a52d9a80SChris Mason 		set_page_writeback(page);
910a52d9a80SChris Mason 		page_cache_release(page);
911a52d9a80SChris Mason 		index++;
912a52d9a80SChris Mason 	}
913a52d9a80SChris Mason 	set_extent_writeback(tree, start, end, GFP_NOFS);
914a52d9a80SChris Mason 	return 0;
915a52d9a80SChris Mason }
916a52d9a80SChris Mason EXPORT_SYMBOL(set_range_writeback);
917a52d9a80SChris Mason 
918*b888db2bSChris Mason u64 find_lock_delalloc_range(struct extent_map_tree *tree,
919*b888db2bSChris Mason 			     u64 start, u64 lock_start, u64 *end, u64 max_bytes)
920*b888db2bSChris Mason {
921*b888db2bSChris Mason 	struct rb_node *node;
922*b888db2bSChris Mason 	struct extent_state *state;
923*b888db2bSChris Mason 	u64 cur_start = start;
924*b888db2bSChris Mason 	u64 found = 0;
925*b888db2bSChris Mason 	u64 total_bytes = 0;
926*b888db2bSChris Mason 
927*b888db2bSChris Mason 	write_lock_irq(&tree->lock);
928*b888db2bSChris Mason 	/*
929*b888db2bSChris Mason 	 * this search will find all the extents that end after
930*b888db2bSChris Mason 	 * our range starts.
931*b888db2bSChris Mason 	 */
932*b888db2bSChris Mason search_again:
933*b888db2bSChris Mason 	node = tree_search(&tree->state, cur_start);
934*b888db2bSChris Mason 	if (!node || IS_ERR(node)) {
935*b888db2bSChris Mason 		goto out;
936*b888db2bSChris Mason 	}
937*b888db2bSChris Mason 
938*b888db2bSChris Mason 	while(1) {
939*b888db2bSChris Mason 		state = rb_entry(node, struct extent_state, rb_node);
940*b888db2bSChris Mason 		if (state->start != cur_start) {
941*b888db2bSChris Mason 			goto out;
942*b888db2bSChris Mason 		}
943*b888db2bSChris Mason 		if (!(state->state & EXTENT_DELALLOC)) {
944*b888db2bSChris Mason 			goto out;
945*b888db2bSChris Mason 		}
946*b888db2bSChris Mason 		if (state->start >= lock_start) {
947*b888db2bSChris Mason 			if (state->state & EXTENT_LOCKED) {
948*b888db2bSChris Mason 				DEFINE_WAIT(wait);
949*b888db2bSChris Mason 				atomic_inc(&state->refs);
950*b888db2bSChris Mason 				write_unlock_irq(&tree->lock);
951*b888db2bSChris Mason 				schedule();
952*b888db2bSChris Mason 				write_lock_irq(&tree->lock);
953*b888db2bSChris Mason 				finish_wait(&state->wq, &wait);
954*b888db2bSChris Mason 				free_extent_state(state);
955*b888db2bSChris Mason 				goto search_again;
956*b888db2bSChris Mason 			}
957*b888db2bSChris Mason 			state->state |= EXTENT_LOCKED;
958*b888db2bSChris Mason 		}
959*b888db2bSChris Mason 		found++;
960*b888db2bSChris Mason 		*end = state->end;
961*b888db2bSChris Mason 		cur_start = state->end + 1;
962*b888db2bSChris Mason 		node = rb_next(node);
963*b888db2bSChris Mason 		if (!node)
964*b888db2bSChris Mason 			break;
965*b888db2bSChris Mason 		total_bytes = state->end - state->start + 1;
966*b888db2bSChris Mason 		if (total_bytes >= max_bytes)
967*b888db2bSChris Mason 			break;
968*b888db2bSChris Mason 	}
969*b888db2bSChris Mason out:
970*b888db2bSChris Mason 	write_unlock_irq(&tree->lock);
971*b888db2bSChris Mason 	return found;
972*b888db2bSChris Mason }
973*b888db2bSChris Mason 
974a52d9a80SChris Mason /*
975a52d9a80SChris Mason  * helper function to lock both pages and extents in the tree.
976a52d9a80SChris Mason  * pages must be locked first.
977a52d9a80SChris Mason  */
978a52d9a80SChris Mason int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
979a52d9a80SChris Mason {
980a52d9a80SChris Mason 	unsigned long index = start >> PAGE_CACHE_SHIFT;
981a52d9a80SChris Mason 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
982a52d9a80SChris Mason 	struct page *page;
983a52d9a80SChris Mason 	int err;
984a52d9a80SChris Mason 
985a52d9a80SChris Mason 	while (index <= end_index) {
986a52d9a80SChris Mason 		page = grab_cache_page(tree->mapping, index);
987a52d9a80SChris Mason 		if (!page) {
988a52d9a80SChris Mason 			err = -ENOMEM;
989a52d9a80SChris Mason 			goto failed;
990a52d9a80SChris Mason 		}
991a52d9a80SChris Mason 		if (IS_ERR(page)) {
992a52d9a80SChris Mason 			err = PTR_ERR(page);
993a52d9a80SChris Mason 			goto failed;
994a52d9a80SChris Mason 		}
995a52d9a80SChris Mason 		index++;
996a52d9a80SChris Mason 	}
997a52d9a80SChris Mason 	lock_extent(tree, start, end, GFP_NOFS);
998a52d9a80SChris Mason 	return 0;
999a52d9a80SChris Mason 
1000a52d9a80SChris Mason failed:
1001a52d9a80SChris Mason 	/*
1002a52d9a80SChris Mason 	 * we failed above in getting the page at 'index', so we undo here
1003a52d9a80SChris Mason 	 * up to but not including the page at 'index'
1004a52d9a80SChris Mason 	 */
1005a52d9a80SChris Mason 	end_index = index;
1006a52d9a80SChris Mason 	index = start >> PAGE_CACHE_SHIFT;
1007a52d9a80SChris Mason 	while (index < end_index) {
1008a52d9a80SChris Mason 		page = find_get_page(tree->mapping, index);
1009a52d9a80SChris Mason 		unlock_page(page);
1010a52d9a80SChris Mason 		page_cache_release(page);
1011a52d9a80SChris Mason 		index++;
1012a52d9a80SChris Mason 	}
1013a52d9a80SChris Mason 	return err;
1014a52d9a80SChris Mason }
1015a52d9a80SChris Mason EXPORT_SYMBOL(lock_range);
1016a52d9a80SChris Mason 
1017a52d9a80SChris Mason /*
1018a52d9a80SChris Mason  * helper function to unlock both pages and extents in the tree.
1019a52d9a80SChris Mason  */
1020a52d9a80SChris Mason int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1021a52d9a80SChris Mason {
1022a52d9a80SChris Mason 	unsigned long index = start >> PAGE_CACHE_SHIFT;
1023a52d9a80SChris Mason 	unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1024a52d9a80SChris Mason 	struct page *page;
1025a52d9a80SChris Mason 
1026a52d9a80SChris Mason 	while (index <= end_index) {
1027a52d9a80SChris Mason 		page = find_get_page(tree->mapping, index);
1028a52d9a80SChris Mason 		unlock_page(page);
1029a52d9a80SChris Mason 		page_cache_release(page);
1030a52d9a80SChris Mason 		index++;
1031a52d9a80SChris Mason 	}
1032a52d9a80SChris Mason 	unlock_extent(tree, start, end, GFP_NOFS);
1033a52d9a80SChris Mason 	return 0;
1034a52d9a80SChris Mason }
1035a52d9a80SChris Mason EXPORT_SYMBOL(unlock_range);
1036a52d9a80SChris Mason 
1037a52d9a80SChris Mason /*
1038a52d9a80SChris Mason  * searches a range in the state tree for a given mask.
1039a52d9a80SChris Mason  * If 'filled' == 1, this returns 1 only if ever extent in the tree
1040a52d9a80SChris Mason  * has the bits set.  Otherwise, 1 is returned if any bit in the
1041a52d9a80SChris Mason  * range is found set.
1042a52d9a80SChris Mason  */
1043a52d9a80SChris Mason static int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1044a52d9a80SChris Mason 			  int bits, int filled)
1045a52d9a80SChris Mason {
1046a52d9a80SChris Mason 	struct extent_state *state = NULL;
1047a52d9a80SChris Mason 	struct rb_node *node;
1048a52d9a80SChris Mason 	int bitset = 0;
1049a52d9a80SChris Mason 
1050a52d9a80SChris Mason 	read_lock_irq(&tree->lock);
1051a52d9a80SChris Mason 	node = tree_search(&tree->state, start);
1052a52d9a80SChris Mason 	while (node && start <= end) {
1053a52d9a80SChris Mason 		state = rb_entry(node, struct extent_state, rb_node);
1054a52d9a80SChris Mason 		if (state->start > end)
1055a52d9a80SChris Mason 			break;
1056a52d9a80SChris Mason 
1057a52d9a80SChris Mason 		if (filled && state->start > start) {
1058a52d9a80SChris Mason 			bitset = 0;
1059a52d9a80SChris Mason 			break;
1060a52d9a80SChris Mason 		}
1061a52d9a80SChris Mason 		if (state->state & bits) {
1062a52d9a80SChris Mason 			bitset = 1;
1063a52d9a80SChris Mason 			if (!filled)
1064a52d9a80SChris Mason 				break;
1065a52d9a80SChris Mason 		} else if (filled) {
1066a52d9a80SChris Mason 			bitset = 0;
1067a52d9a80SChris Mason 			break;
1068a52d9a80SChris Mason 		}
1069a52d9a80SChris Mason 		start = state->end + 1;
1070a52d9a80SChris Mason 		if (start > end)
1071a52d9a80SChris Mason 			break;
1072a52d9a80SChris Mason 		node = rb_next(node);
1073a52d9a80SChris Mason 	}
1074a52d9a80SChris Mason 	read_unlock_irq(&tree->lock);
1075a52d9a80SChris Mason 	return bitset;
1076a52d9a80SChris Mason }
1077a52d9a80SChris Mason 
1078a52d9a80SChris Mason /*
1079a52d9a80SChris Mason  * helper function to set a given page up to date if all the
1080a52d9a80SChris Mason  * extents in the tree for that page are up to date
1081a52d9a80SChris Mason  */
1082a52d9a80SChris Mason static int check_page_uptodate(struct extent_map_tree *tree,
1083a52d9a80SChris Mason 			       struct page *page)
1084a52d9a80SChris Mason {
1085a52d9a80SChris Mason 	u64 start = page->index << PAGE_CACHE_SHIFT;
1086a52d9a80SChris Mason 	u64 end = start + PAGE_CACHE_SIZE - 1;
1087a52d9a80SChris Mason 	if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1088a52d9a80SChris Mason 		SetPageUptodate(page);
1089a52d9a80SChris Mason 	return 0;
1090a52d9a80SChris Mason }
1091a52d9a80SChris Mason 
1092a52d9a80SChris Mason /*
1093a52d9a80SChris Mason  * helper function to unlock a page if all the extents in the tree
1094a52d9a80SChris Mason  * for that page are unlocked
1095a52d9a80SChris Mason  */
1096a52d9a80SChris Mason static int check_page_locked(struct extent_map_tree *tree,
1097a52d9a80SChris Mason 			     struct page *page)
1098a52d9a80SChris Mason {
1099a52d9a80SChris Mason 	u64 start = page->index << PAGE_CACHE_SHIFT;
1100a52d9a80SChris Mason 	u64 end = start + PAGE_CACHE_SIZE - 1;
1101a52d9a80SChris Mason 	if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1102a52d9a80SChris Mason 		unlock_page(page);
1103a52d9a80SChris Mason 	return 0;
1104a52d9a80SChris Mason }
1105a52d9a80SChris Mason 
1106a52d9a80SChris Mason /*
1107a52d9a80SChris Mason  * helper function to end page writeback if all the extents
1108a52d9a80SChris Mason  * in the tree for that page are done with writeback
1109a52d9a80SChris Mason  */
1110a52d9a80SChris Mason static int check_page_writeback(struct extent_map_tree *tree,
1111a52d9a80SChris Mason 			     struct page *page)
1112a52d9a80SChris Mason {
1113a52d9a80SChris Mason 	u64 start = page->index << PAGE_CACHE_SHIFT;
1114a52d9a80SChris Mason 	u64 end = start + PAGE_CACHE_SIZE - 1;
1115a52d9a80SChris Mason 	if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1116a52d9a80SChris Mason 		end_page_writeback(page);
1117a52d9a80SChris Mason 	return 0;
1118a52d9a80SChris Mason }
1119a52d9a80SChris Mason 
1120a52d9a80SChris Mason /* lots and lots of room for performance fixes in the end_bio funcs */
1121a52d9a80SChris Mason 
1122a52d9a80SChris Mason /*
1123a52d9a80SChris Mason  * after a writepage IO is done, we need to:
1124a52d9a80SChris Mason  * clear the uptodate bits on error
1125a52d9a80SChris Mason  * clear the writeback bits in the extent tree for this IO
1126a52d9a80SChris Mason  * end_page_writeback if the page has no more pending IO
1127a52d9a80SChris Mason  *
1128a52d9a80SChris Mason  * Scheduling is not allowed, so the extent state tree is expected
1129a52d9a80SChris Mason  * to have one and only one object corresponding to this IO.
1130a52d9a80SChris Mason  */
1131a52d9a80SChris Mason static int end_bio_extent_writepage(struct bio *bio,
1132a52d9a80SChris Mason 				   unsigned int bytes_done, int err)
1133a52d9a80SChris Mason {
1134a52d9a80SChris Mason 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1135a52d9a80SChris Mason 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1136a52d9a80SChris Mason 	struct extent_map_tree *tree = bio->bi_private;
1137a52d9a80SChris Mason 	u64 start;
1138a52d9a80SChris Mason 	u64 end;
1139a52d9a80SChris Mason 	int whole_page;
1140a52d9a80SChris Mason 
1141a52d9a80SChris Mason 	if (bio->bi_size)
1142a52d9a80SChris Mason 		return 1;
1143a52d9a80SChris Mason 
1144a52d9a80SChris Mason 	do {
1145a52d9a80SChris Mason 		struct page *page = bvec->bv_page;
1146a52d9a80SChris Mason 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1147a52d9a80SChris Mason 		end = start + bvec->bv_len - 1;
1148a52d9a80SChris Mason 
1149a52d9a80SChris Mason 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1150a52d9a80SChris Mason 			whole_page = 1;
1151a52d9a80SChris Mason 		else
1152a52d9a80SChris Mason 			whole_page = 0;
1153a52d9a80SChris Mason 
1154a52d9a80SChris Mason 		if (--bvec >= bio->bi_io_vec)
1155a52d9a80SChris Mason 			prefetchw(&bvec->bv_page->flags);
1156a52d9a80SChris Mason 
1157a52d9a80SChris Mason 		if (!uptodate) {
1158a52d9a80SChris Mason 			clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1159a52d9a80SChris Mason 			ClearPageUptodate(page);
1160a52d9a80SChris Mason 			SetPageError(page);
1161a52d9a80SChris Mason 		}
1162a52d9a80SChris Mason 		clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1163a52d9a80SChris Mason 
1164a52d9a80SChris Mason 		if (whole_page)
1165a52d9a80SChris Mason 			end_page_writeback(page);
1166a52d9a80SChris Mason 		else
1167a52d9a80SChris Mason 			check_page_writeback(tree, page);
1168a52d9a80SChris Mason 	} while (bvec >= bio->bi_io_vec);
1169a52d9a80SChris Mason 
1170a52d9a80SChris Mason 	bio_put(bio);
1171a52d9a80SChris Mason 	return 0;
1172a52d9a80SChris Mason }
1173a52d9a80SChris Mason 
1174a52d9a80SChris Mason /*
1175a52d9a80SChris Mason  * after a readpage IO is done, we need to:
1176a52d9a80SChris Mason  * clear the uptodate bits on error
1177a52d9a80SChris Mason  * set the uptodate bits if things worked
1178a52d9a80SChris Mason  * set the page up to date if all extents in the tree are uptodate
1179a52d9a80SChris Mason  * clear the lock bit in the extent tree
1180a52d9a80SChris Mason  * unlock the page if there are no other extents locked for it
1181a52d9a80SChris Mason  *
1182a52d9a80SChris Mason  * Scheduling is not allowed, so the extent state tree is expected
1183a52d9a80SChris Mason  * to have one and only one object corresponding to this IO.
1184a52d9a80SChris Mason  */
1185a52d9a80SChris Mason static int end_bio_extent_readpage(struct bio *bio,
1186a52d9a80SChris Mason 				   unsigned int bytes_done, int err)
1187a52d9a80SChris Mason {
1188a52d9a80SChris Mason 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1189a52d9a80SChris Mason 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1190a52d9a80SChris Mason 	struct extent_map_tree *tree = bio->bi_private;
1191a52d9a80SChris Mason 	u64 start;
1192a52d9a80SChris Mason 	u64 end;
1193a52d9a80SChris Mason 	int whole_page;
1194a52d9a80SChris Mason 
1195a52d9a80SChris Mason 	if (bio->bi_size)
1196a52d9a80SChris Mason 		return 1;
1197a52d9a80SChris Mason 
1198a52d9a80SChris Mason 	do {
1199a52d9a80SChris Mason 		struct page *page = bvec->bv_page;
1200a52d9a80SChris Mason 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1201a52d9a80SChris Mason 		end = start + bvec->bv_len - 1;
1202a52d9a80SChris Mason 
1203a52d9a80SChris Mason 		if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1204a52d9a80SChris Mason 			whole_page = 1;
1205a52d9a80SChris Mason 		else
1206a52d9a80SChris Mason 			whole_page = 0;
1207a52d9a80SChris Mason 
1208a52d9a80SChris Mason 		if (--bvec >= bio->bi_io_vec)
1209a52d9a80SChris Mason 			prefetchw(&bvec->bv_page->flags);
1210a52d9a80SChris Mason 
1211a52d9a80SChris Mason 		if (uptodate) {
1212a52d9a80SChris Mason 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1213a52d9a80SChris Mason 			if (whole_page)
1214a52d9a80SChris Mason 				SetPageUptodate(page);
1215a52d9a80SChris Mason 			else
1216a52d9a80SChris Mason 				check_page_uptodate(tree, page);
1217a52d9a80SChris Mason 		} else {
1218a52d9a80SChris Mason 			ClearPageUptodate(page);
1219a52d9a80SChris Mason 			SetPageError(page);
1220a52d9a80SChris Mason 		}
1221a52d9a80SChris Mason 
1222a52d9a80SChris Mason 		unlock_extent(tree, start, end, GFP_ATOMIC);
1223a52d9a80SChris Mason 
1224a52d9a80SChris Mason 		if (whole_page)
1225a52d9a80SChris Mason 			unlock_page(page);
1226a52d9a80SChris Mason 		else
1227a52d9a80SChris Mason 			check_page_locked(tree, page);
1228a52d9a80SChris Mason 	} while (bvec >= bio->bi_io_vec);
1229a52d9a80SChris Mason 
1230a52d9a80SChris Mason 	bio_put(bio);
1231a52d9a80SChris Mason 	return 0;
1232a52d9a80SChris Mason }
1233a52d9a80SChris Mason 
1234a52d9a80SChris Mason /*
1235a52d9a80SChris Mason  * IO done from prepare_write is pretty simple, we just unlock
1236a52d9a80SChris Mason  * the structs in the extent tree when done, and set the uptodate bits
1237a52d9a80SChris Mason  * as appropriate.
1238a52d9a80SChris Mason  */
1239a52d9a80SChris Mason static int end_bio_extent_preparewrite(struct bio *bio,
1240a52d9a80SChris Mason 				       unsigned int bytes_done, int err)
1241a52d9a80SChris Mason {
1242a52d9a80SChris Mason 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1243a52d9a80SChris Mason 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1244a52d9a80SChris Mason 	struct extent_map_tree *tree = bio->bi_private;
1245a52d9a80SChris Mason 	u64 start;
1246a52d9a80SChris Mason 	u64 end;
1247a52d9a80SChris Mason 
1248a52d9a80SChris Mason 	if (bio->bi_size)
1249a52d9a80SChris Mason 		return 1;
1250a52d9a80SChris Mason 
1251a52d9a80SChris Mason 	do {
1252a52d9a80SChris Mason 		struct page *page = bvec->bv_page;
1253a52d9a80SChris Mason 		start = (page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
1254a52d9a80SChris Mason 		end = start + bvec->bv_len - 1;
1255a52d9a80SChris Mason 
1256a52d9a80SChris Mason 		if (--bvec >= bio->bi_io_vec)
1257a52d9a80SChris Mason 			prefetchw(&bvec->bv_page->flags);
1258a52d9a80SChris Mason 
1259a52d9a80SChris Mason 		if (uptodate) {
1260a52d9a80SChris Mason 			set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1261a52d9a80SChris Mason 		} else {
1262a52d9a80SChris Mason 			ClearPageUptodate(page);
1263a52d9a80SChris Mason 			SetPageError(page);
1264a52d9a80SChris Mason 		}
1265a52d9a80SChris Mason 
1266a52d9a80SChris Mason 		unlock_extent(tree, start, end, GFP_ATOMIC);
1267a52d9a80SChris Mason 
1268a52d9a80SChris Mason 	} while (bvec >= bio->bi_io_vec);
1269a52d9a80SChris Mason 
1270a52d9a80SChris Mason 	bio_put(bio);
1271a52d9a80SChris Mason 	return 0;
1272a52d9a80SChris Mason }
1273a52d9a80SChris Mason 
1274a52d9a80SChris Mason static int submit_extent_page(int rw, struct extent_map_tree *tree,
1275a52d9a80SChris Mason 			      struct page *page, sector_t sector,
1276a52d9a80SChris Mason 			      size_t size, unsigned long offset,
1277a52d9a80SChris Mason 			      struct block_device *bdev,
1278a52d9a80SChris Mason 			      bio_end_io_t end_io_func)
1279a52d9a80SChris Mason {
1280a52d9a80SChris Mason 	struct bio *bio;
1281a52d9a80SChris Mason 	int ret = 0;
1282a52d9a80SChris Mason 
1283a52d9a80SChris Mason 	bio = bio_alloc(GFP_NOIO, 1);
1284a52d9a80SChris Mason 
1285a52d9a80SChris Mason 	bio->bi_sector = sector;
1286a52d9a80SChris Mason 	bio->bi_bdev = bdev;
1287a52d9a80SChris Mason 	bio->bi_io_vec[0].bv_page = page;
1288a52d9a80SChris Mason 	bio->bi_io_vec[0].bv_len = size;
1289a52d9a80SChris Mason 	bio->bi_io_vec[0].bv_offset = offset;
1290a52d9a80SChris Mason 
1291a52d9a80SChris Mason 	bio->bi_vcnt = 1;
1292a52d9a80SChris Mason 	bio->bi_idx = 0;
1293a52d9a80SChris Mason 	bio->bi_size = size;
1294a52d9a80SChris Mason 
1295a52d9a80SChris Mason 	bio->bi_end_io = end_io_func;
1296a52d9a80SChris Mason 	bio->bi_private = tree;
1297a52d9a80SChris Mason 
1298a52d9a80SChris Mason 	bio_get(bio);
1299a52d9a80SChris Mason 	submit_bio(rw, bio);
1300a52d9a80SChris Mason 
1301a52d9a80SChris Mason 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
1302a52d9a80SChris Mason 		ret = -EOPNOTSUPP;
1303a52d9a80SChris Mason 
1304a52d9a80SChris Mason 	bio_put(bio);
1305a52d9a80SChris Mason 	return ret;
1306a52d9a80SChris Mason }
1307a52d9a80SChris Mason 
1308a52d9a80SChris Mason /*
1309a52d9a80SChris Mason  * basic readpage implementation.  Locked extent state structs are inserted
1310a52d9a80SChris Mason  * into the tree that are removed when the IO is done (by the end_io
1311a52d9a80SChris Mason  * handlers)
1312a52d9a80SChris Mason  */
1313a52d9a80SChris Mason int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1314a52d9a80SChris Mason 			  get_extent_t *get_extent)
1315a52d9a80SChris Mason {
1316a52d9a80SChris Mason 	struct inode *inode = page->mapping->host;
1317a52d9a80SChris Mason 	u64 start = page->index << PAGE_CACHE_SHIFT;
1318a52d9a80SChris Mason 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1319a52d9a80SChris Mason 	u64 end;
1320a52d9a80SChris Mason 	u64 cur = start;
1321a52d9a80SChris Mason 	u64 extent_offset;
1322a52d9a80SChris Mason 	u64 last_byte = i_size_read(inode);
1323a52d9a80SChris Mason 	u64 block_start;
1324a52d9a80SChris Mason 	u64 cur_end;
1325a52d9a80SChris Mason 	sector_t sector;
1326a52d9a80SChris Mason 	struct extent_map *em;
1327a52d9a80SChris Mason 	struct block_device *bdev;
1328a52d9a80SChris Mason 	int ret;
1329a52d9a80SChris Mason 	int nr = 0;
1330a52d9a80SChris Mason 	size_t page_offset = 0;
1331a52d9a80SChris Mason 	size_t iosize;
1332a52d9a80SChris Mason 	size_t blocksize = inode->i_sb->s_blocksize;
1333a52d9a80SChris Mason 
1334a52d9a80SChris Mason 	if (!PagePrivate(page)) {
1335a52d9a80SChris Mason 		SetPagePrivate(page);
1336a52d9a80SChris Mason 		set_page_private(page, 1);
1337*b888db2bSChris Mason 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1338a52d9a80SChris Mason 		page_cache_get(page);
1339a52d9a80SChris Mason 	}
1340a52d9a80SChris Mason 
1341a52d9a80SChris Mason 	end = page_end;
1342a52d9a80SChris Mason 	lock_extent(tree, start, end, GFP_NOFS);
1343a52d9a80SChris Mason 
1344a52d9a80SChris Mason 	while (cur <= end) {
1345a52d9a80SChris Mason 		if (cur >= last_byte) {
1346a52d9a80SChris Mason 			iosize = PAGE_CACHE_SIZE - page_offset;
1347a52d9a80SChris Mason 			zero_user_page(page, page_offset, iosize, KM_USER0);
1348a52d9a80SChris Mason 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1349a52d9a80SChris Mason 					    GFP_NOFS);
1350a52d9a80SChris Mason 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1351a52d9a80SChris Mason 			break;
1352a52d9a80SChris Mason 		}
1353a52d9a80SChris Mason 		em = get_extent(inode, page, page_offset, cur, end, 0);
1354a52d9a80SChris Mason 		if (IS_ERR(em) || !em) {
1355a52d9a80SChris Mason 			SetPageError(page);
1356a52d9a80SChris Mason 			unlock_extent(tree, cur, end, GFP_NOFS);
1357a52d9a80SChris Mason 			break;
1358a52d9a80SChris Mason 		}
1359a52d9a80SChris Mason 
1360a52d9a80SChris Mason 		extent_offset = cur - em->start;
1361a52d9a80SChris Mason 		BUG_ON(em->end < cur);
1362a52d9a80SChris Mason 		BUG_ON(end < cur);
1363a52d9a80SChris Mason 
1364a52d9a80SChris Mason 		iosize = min(em->end - cur, end - cur) + 1;
1365a52d9a80SChris Mason 		cur_end = min(em->end, end);
1366a52d9a80SChris Mason 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1367a52d9a80SChris Mason 		sector = (em->block_start + extent_offset) >> 9;
1368a52d9a80SChris Mason 		bdev = em->bdev;
1369a52d9a80SChris Mason 		block_start = em->block_start;
1370a52d9a80SChris Mason 		free_extent_map(em);
1371a52d9a80SChris Mason 		em = NULL;
1372a52d9a80SChris Mason 
1373a52d9a80SChris Mason 		/* we've found a hole, just zero and go on */
1374a52d9a80SChris Mason 		if (block_start == 0) {
1375a52d9a80SChris Mason 			zero_user_page(page, page_offset, iosize, KM_USER0);
1376a52d9a80SChris Mason 			set_extent_uptodate(tree, cur, cur + iosize - 1,
1377a52d9a80SChris Mason 					    GFP_NOFS);
1378a52d9a80SChris Mason 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1379a52d9a80SChris Mason 			cur = cur + iosize;
1380a52d9a80SChris Mason 			page_offset += iosize;
1381a52d9a80SChris Mason 			continue;
1382a52d9a80SChris Mason 		}
1383a52d9a80SChris Mason 		/* the get_extent function already copied into the page */
1384a52d9a80SChris Mason 		if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1385a52d9a80SChris Mason 			unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1386a52d9a80SChris Mason 			cur = cur + iosize;
1387a52d9a80SChris Mason 			page_offset += iosize;
1388a52d9a80SChris Mason 			continue;
1389a52d9a80SChris Mason 		}
1390a52d9a80SChris Mason 
1391a52d9a80SChris Mason 		ret = submit_extent_page(READ, tree, page,
1392a52d9a80SChris Mason 					 sector, iosize, page_offset, bdev,
1393a52d9a80SChris Mason 					 end_bio_extent_readpage);
1394a52d9a80SChris Mason 		if (ret)
1395a52d9a80SChris Mason 			SetPageError(page);
1396a52d9a80SChris Mason 		cur = cur + iosize;
1397a52d9a80SChris Mason 		page_offset += iosize;
1398a52d9a80SChris Mason 		nr++;
1399a52d9a80SChris Mason 	}
1400a52d9a80SChris Mason 	if (!nr) {
1401a52d9a80SChris Mason 		if (!PageError(page))
1402a52d9a80SChris Mason 			SetPageUptodate(page);
1403a52d9a80SChris Mason 		unlock_page(page);
1404a52d9a80SChris Mason 	}
1405a52d9a80SChris Mason 	return 0;
1406a52d9a80SChris Mason }
1407a52d9a80SChris Mason EXPORT_SYMBOL(extent_read_full_page);
1408a52d9a80SChris Mason 
1409a52d9a80SChris Mason /*
1410a52d9a80SChris Mason  * the writepage semantics are similar to regular writepage.  extent
1411a52d9a80SChris Mason  * records are inserted to lock ranges in the tree, and as dirty areas
1412a52d9a80SChris Mason  * are found, they are marked writeback.  Then the lock bits are removed
1413a52d9a80SChris Mason  * and the end_io handler clears the writeback ranges
1414a52d9a80SChris Mason  */
1415a52d9a80SChris Mason int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1416a52d9a80SChris Mason 			  get_extent_t *get_extent,
1417a52d9a80SChris Mason 			  struct writeback_control *wbc)
1418a52d9a80SChris Mason {
1419a52d9a80SChris Mason 	struct inode *inode = page->mapping->host;
1420a52d9a80SChris Mason 	u64 start = page->index << PAGE_CACHE_SHIFT;
1421a52d9a80SChris Mason 	u64 page_end = start + PAGE_CACHE_SIZE - 1;
1422a52d9a80SChris Mason 	u64 end;
1423a52d9a80SChris Mason 	u64 cur = start;
1424a52d9a80SChris Mason 	u64 extent_offset;
1425a52d9a80SChris Mason 	u64 last_byte = i_size_read(inode);
1426a52d9a80SChris Mason 	u64 block_start;
1427a52d9a80SChris Mason 	sector_t sector;
1428a52d9a80SChris Mason 	struct extent_map *em;
1429a52d9a80SChris Mason 	struct block_device *bdev;
1430a52d9a80SChris Mason 	int ret;
1431a52d9a80SChris Mason 	int nr = 0;
1432a52d9a80SChris Mason 	size_t page_offset = 0;
1433a52d9a80SChris Mason 	size_t iosize;
1434a52d9a80SChris Mason 	size_t blocksize;
1435a52d9a80SChris Mason 	loff_t i_size = i_size_read(inode);
1436a52d9a80SChris Mason 	unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1437*b888db2bSChris Mason 	u64 nr_delalloc;
1438*b888db2bSChris Mason 	u64 delalloc_end;
1439a52d9a80SChris Mason 
1440*b888db2bSChris Mason 	WARN_ON(!PageLocked(page));
1441a52d9a80SChris Mason 	if (page->index > end_index) {
1442a52d9a80SChris Mason 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1443a52d9a80SChris Mason 		unlock_page(page);
1444a52d9a80SChris Mason 		return 0;
1445a52d9a80SChris Mason 	}
1446a52d9a80SChris Mason 
1447a52d9a80SChris Mason 	if (page->index == end_index) {
1448a52d9a80SChris Mason 		size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1449a52d9a80SChris Mason 		zero_user_page(page, offset,
1450a52d9a80SChris Mason 			       PAGE_CACHE_SIZE - offset, KM_USER0);
1451a52d9a80SChris Mason 	}
1452a52d9a80SChris Mason 
1453a52d9a80SChris Mason 	if (!PagePrivate(page)) {
1454a52d9a80SChris Mason 		SetPagePrivate(page);
1455a52d9a80SChris Mason 		set_page_private(page, 1);
1456*b888db2bSChris Mason 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1457a52d9a80SChris Mason 		page_cache_get(page);
1458a52d9a80SChris Mason 	}
1459a52d9a80SChris Mason 
1460a52d9a80SChris Mason 	lock_extent(tree, start, page_end, GFP_NOFS);
1461*b888db2bSChris Mason 	nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1462*b888db2bSChris Mason 					       &delalloc_end,
1463*b888db2bSChris Mason 					       128 * 1024 * 1024);
1464*b888db2bSChris Mason 	if (nr_delalloc) {
1465*b888db2bSChris Mason 		tree->fill_delalloc(inode, start, delalloc_end);
1466*b888db2bSChris Mason 		if (delalloc_end >= page_end + 1) {
1467*b888db2bSChris Mason 			clear_extent_bit(tree, page_end + 1, delalloc_end,
1468*b888db2bSChris Mason 					 EXTENT_LOCKED | EXTENT_DELALLOC,
1469*b888db2bSChris Mason 					 1, 0, GFP_NOFS);
1470*b888db2bSChris Mason 		}
1471*b888db2bSChris Mason 		clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1472*b888db2bSChris Mason 				 0, 0, GFP_NOFS);
1473*b888db2bSChris Mason 		if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1474*b888db2bSChris Mason 			printk("found delalloc bits after clear extent_bit\n");
1475*b888db2bSChris Mason 		}
1476*b888db2bSChris Mason 	} else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1477*b888db2bSChris Mason 		printk("found delalloc bits after find_delalloc_range returns 0\n");
1478*b888db2bSChris Mason 	}
1479*b888db2bSChris Mason 
1480*b888db2bSChris Mason 	end = page_end;
1481*b888db2bSChris Mason 	if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1482*b888db2bSChris Mason 		printk("found delalloc bits after lock_extent\n");
1483*b888db2bSChris Mason 	}
1484a52d9a80SChris Mason 
1485a52d9a80SChris Mason 	if (last_byte <= start) {
1486a52d9a80SChris Mason 		clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1487a52d9a80SChris Mason 		goto done;
1488a52d9a80SChris Mason 	}
1489a52d9a80SChris Mason 
1490a52d9a80SChris Mason 	set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1491a52d9a80SChris Mason 	blocksize = inode->i_sb->s_blocksize;
1492a52d9a80SChris Mason 
1493a52d9a80SChris Mason 	while (cur <= end) {
1494a52d9a80SChris Mason 		if (cur >= last_byte) {
1495a52d9a80SChris Mason 			clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1496a52d9a80SChris Mason 			break;
1497a52d9a80SChris Mason 		}
1498*b888db2bSChris Mason 		em = get_extent(inode, page, page_offset, cur, end, 0);
1499a52d9a80SChris Mason 		if (IS_ERR(em) || !em) {
1500a52d9a80SChris Mason 			SetPageError(page);
1501a52d9a80SChris Mason 			break;
1502a52d9a80SChris Mason 		}
1503a52d9a80SChris Mason 
1504a52d9a80SChris Mason 		extent_offset = cur - em->start;
1505a52d9a80SChris Mason 		BUG_ON(em->end < cur);
1506a52d9a80SChris Mason 		BUG_ON(end < cur);
1507a52d9a80SChris Mason 		iosize = min(em->end - cur, end - cur) + 1;
1508a52d9a80SChris Mason 		iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1509a52d9a80SChris Mason 		sector = (em->block_start + extent_offset) >> 9;
1510a52d9a80SChris Mason 		bdev = em->bdev;
1511a52d9a80SChris Mason 		block_start = em->block_start;
1512a52d9a80SChris Mason 		free_extent_map(em);
1513a52d9a80SChris Mason 		em = NULL;
1514a52d9a80SChris Mason 
1515a52d9a80SChris Mason 		if (block_start == 0 || block_start == EXTENT_MAP_INLINE) {
1516a52d9a80SChris Mason 			clear_extent_dirty(tree, cur,
1517a52d9a80SChris Mason 					   cur + iosize - 1, GFP_NOFS);
1518a52d9a80SChris Mason 			cur = cur + iosize;
1519a52d9a80SChris Mason 			page_offset += iosize;
1520a52d9a80SChris Mason 			continue;
1521a52d9a80SChris Mason 		}
1522a52d9a80SChris Mason 
1523a52d9a80SChris Mason 		/* leave this out until we have a page_mkwrite call */
1524a52d9a80SChris Mason 		if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1525a52d9a80SChris Mason 				   EXTENT_DIRTY, 0)) {
1526a52d9a80SChris Mason 			cur = cur + iosize;
1527a52d9a80SChris Mason 			page_offset += iosize;
1528a52d9a80SChris Mason 			continue;
1529a52d9a80SChris Mason 		}
1530a52d9a80SChris Mason 		clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1531a52d9a80SChris Mason 		set_range_writeback(tree, cur, cur + iosize - 1);
1532a52d9a80SChris Mason 		ret = submit_extent_page(WRITE, tree, page,
1533a52d9a80SChris Mason 					 sector, iosize, page_offset, bdev,
1534a52d9a80SChris Mason 					 end_bio_extent_writepage);
1535a52d9a80SChris Mason 		if (ret)
1536a52d9a80SChris Mason 			SetPageError(page);
1537a52d9a80SChris Mason 		cur = cur + iosize;
1538a52d9a80SChris Mason 		page_offset += iosize;
1539a52d9a80SChris Mason 		nr++;
1540a52d9a80SChris Mason 	}
1541a52d9a80SChris Mason done:
1542a52d9a80SChris Mason 	WARN_ON(test_range_bit(tree, start, page_end, EXTENT_DIRTY, 0));
1543a52d9a80SChris Mason 	unlock_extent(tree, start, page_end, GFP_NOFS);
1544a52d9a80SChris Mason 	unlock_page(page);
1545a52d9a80SChris Mason 	return 0;
1546a52d9a80SChris Mason }
1547a52d9a80SChris Mason EXPORT_SYMBOL(extent_write_full_page);
1548a52d9a80SChris Mason 
1549a52d9a80SChris Mason /*
1550a52d9a80SChris Mason  * basic invalidatepage code, this waits on any locked or writeback
1551a52d9a80SChris Mason  * ranges corresponding to the page, and then deletes any extent state
1552a52d9a80SChris Mason  * records from the tree
1553a52d9a80SChris Mason  */
1554a52d9a80SChris Mason int extent_invalidatepage(struct extent_map_tree *tree,
1555a52d9a80SChris Mason 			  struct page *page, unsigned long offset)
1556a52d9a80SChris Mason {
1557a52d9a80SChris Mason 	u64 start = (page->index << PAGE_CACHE_SHIFT);
1558a52d9a80SChris Mason 	u64 end = start + PAGE_CACHE_SIZE - 1;
1559a52d9a80SChris Mason 	size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1560a52d9a80SChris Mason 
1561a52d9a80SChris Mason 	start += (offset + blocksize -1) & ~(blocksize - 1);
1562a52d9a80SChris Mason 	if (start > end)
1563a52d9a80SChris Mason 		return 0;
1564a52d9a80SChris Mason 
1565a52d9a80SChris Mason 	lock_extent(tree, start, end, GFP_NOFS);
1566a52d9a80SChris Mason 	wait_on_extent_writeback(tree, start, end);
1567a52d9a80SChris Mason 	clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DIRTY,
1568a52d9a80SChris Mason 			 1, 1, GFP_NOFS);
1569a52d9a80SChris Mason 	return 0;
1570a52d9a80SChris Mason }
1571a52d9a80SChris Mason EXPORT_SYMBOL(extent_invalidatepage);
1572a52d9a80SChris Mason 
1573a52d9a80SChris Mason /*
1574a52d9a80SChris Mason  * simple commit_write call, set_range_dirty is used to mark both
1575a52d9a80SChris Mason  * the pages and the extent records as dirty
1576a52d9a80SChris Mason  */
1577a52d9a80SChris Mason int extent_commit_write(struct extent_map_tree *tree,
1578a52d9a80SChris Mason 			struct inode *inode, struct page *page,
1579a52d9a80SChris Mason 			unsigned from, unsigned to)
1580a52d9a80SChris Mason {
1581a52d9a80SChris Mason 	loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1582a52d9a80SChris Mason 
1583a52d9a80SChris Mason 	if (!PagePrivate(page)) {
1584a52d9a80SChris Mason 		SetPagePrivate(page);
1585a52d9a80SChris Mason 		set_page_private(page, 1);
1586*b888db2bSChris Mason 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1587a52d9a80SChris Mason 		page_cache_get(page);
1588a52d9a80SChris Mason 	}
1589a52d9a80SChris Mason 
1590a52d9a80SChris Mason 	set_page_dirty(page);
1591a52d9a80SChris Mason 
1592a52d9a80SChris Mason 	if (pos > inode->i_size) {
1593a52d9a80SChris Mason 		i_size_write(inode, pos);
1594a52d9a80SChris Mason 		mark_inode_dirty(inode);
1595a52d9a80SChris Mason 	}
1596a52d9a80SChris Mason 	return 0;
1597a52d9a80SChris Mason }
1598a52d9a80SChris Mason EXPORT_SYMBOL(extent_commit_write);
1599a52d9a80SChris Mason 
1600a52d9a80SChris Mason int extent_prepare_write(struct extent_map_tree *tree,
1601a52d9a80SChris Mason 			 struct inode *inode, struct page *page,
1602a52d9a80SChris Mason 			 unsigned from, unsigned to, get_extent_t *get_extent)
1603a52d9a80SChris Mason {
1604a52d9a80SChris Mason 	u64 page_start = page->index << PAGE_CACHE_SHIFT;
1605a52d9a80SChris Mason 	u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1606a52d9a80SChris Mason 	u64 block_start;
1607a52d9a80SChris Mason 	u64 orig_block_start;
1608a52d9a80SChris Mason 	u64 block_end;
1609a52d9a80SChris Mason 	u64 cur_end;
1610a52d9a80SChris Mason 	struct extent_map *em;
1611a52d9a80SChris Mason 	unsigned blocksize = 1 << inode->i_blkbits;
1612a52d9a80SChris Mason 	size_t page_offset = 0;
1613a52d9a80SChris Mason 	size_t block_off_start;
1614a52d9a80SChris Mason 	size_t block_off_end;
1615a52d9a80SChris Mason 	int err = 0;
1616a52d9a80SChris Mason 	int iocount = 0;
1617a52d9a80SChris Mason 	int ret = 0;
1618a52d9a80SChris Mason 	int isnew;
1619a52d9a80SChris Mason 
1620a52d9a80SChris Mason 	if (!PagePrivate(page)) {
1621a52d9a80SChris Mason 		SetPagePrivate(page);
1622a52d9a80SChris Mason 		set_page_private(page, 1);
1623*b888db2bSChris Mason 		WARN_ON(!page->mapping->a_ops->invalidatepage);
1624a52d9a80SChris Mason 		page_cache_get(page);
1625a52d9a80SChris Mason 	}
1626a52d9a80SChris Mason 	block_start = (page_start + from) & ~((u64)blocksize - 1);
1627a52d9a80SChris Mason 	block_end = (page_start + to - 1) | (blocksize - 1);
1628a52d9a80SChris Mason 	orig_block_start = block_start;
1629a52d9a80SChris Mason 
1630a52d9a80SChris Mason 	lock_extent(tree, page_start, page_end, GFP_NOFS);
1631a52d9a80SChris Mason 	while(block_start <= block_end) {
1632a52d9a80SChris Mason 		em = get_extent(inode, page, page_offset, block_start,
1633a52d9a80SChris Mason 				block_end, 1);
1634a52d9a80SChris Mason 		if (IS_ERR(em) || !em) {
1635a52d9a80SChris Mason 			goto err;
1636a52d9a80SChris Mason 		}
1637a52d9a80SChris Mason 		cur_end = min(block_end, em->end);
1638a52d9a80SChris Mason 		block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1639a52d9a80SChris Mason 		block_off_end = block_off_start + blocksize;
1640a52d9a80SChris Mason 		isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1641a52d9a80SChris Mason 
1642a52d9a80SChris Mason 		if (!PageUptodate(page) && isnew &&
1643a52d9a80SChris Mason 		    (block_off_end > to || block_off_start < from)) {
1644a52d9a80SChris Mason 			void *kaddr;
1645a52d9a80SChris Mason 
1646a52d9a80SChris Mason 			kaddr = kmap_atomic(page, KM_USER0);
1647a52d9a80SChris Mason 			if (block_off_end > to)
1648a52d9a80SChris Mason 				memset(kaddr + to, 0, block_off_end - to);
1649a52d9a80SChris Mason 			if (block_off_start < from)
1650a52d9a80SChris Mason 				memset(kaddr + block_off_start, 0,
1651a52d9a80SChris Mason 				       from - block_off_start);
1652a52d9a80SChris Mason 			flush_dcache_page(page);
1653a52d9a80SChris Mason 			kunmap_atomic(kaddr, KM_USER0);
1654a52d9a80SChris Mason 		}
1655a52d9a80SChris Mason 		if (!isnew && !PageUptodate(page) &&
1656a52d9a80SChris Mason 		    (block_off_end > to || block_off_start < from) &&
1657a52d9a80SChris Mason 		    !test_range_bit(tree, block_start, cur_end,
1658a52d9a80SChris Mason 				    EXTENT_UPTODATE, 1)) {
1659a52d9a80SChris Mason 			u64 sector;
1660a52d9a80SChris Mason 			u64 extent_offset = block_start - em->start;
1661a52d9a80SChris Mason 			size_t iosize;
1662a52d9a80SChris Mason 			sector = (em->block_start + extent_offset) >> 9;
1663a52d9a80SChris Mason 			iosize = (cur_end - block_start + blocksize - 1) &
1664a52d9a80SChris Mason 				~((u64)blocksize - 1);
1665a52d9a80SChris Mason 			/*
1666a52d9a80SChris Mason 			 * we've already got the extent locked, but we
1667a52d9a80SChris Mason 			 * need to split the state such that our end_bio
1668a52d9a80SChris Mason 			 * handler can clear the lock.
1669a52d9a80SChris Mason 			 */
1670a52d9a80SChris Mason 			set_extent_bit(tree, block_start,
1671a52d9a80SChris Mason 				       block_start + iosize - 1,
1672a52d9a80SChris Mason 				       EXTENT_LOCKED, 0, NULL, GFP_NOFS);
1673a52d9a80SChris Mason 			ret = submit_extent_page(READ, tree, page,
1674a52d9a80SChris Mason 					 sector, iosize, page_offset, em->bdev,
1675a52d9a80SChris Mason 					 end_bio_extent_preparewrite);
1676a52d9a80SChris Mason 			iocount++;
1677a52d9a80SChris Mason 			block_start = block_start + iosize;
1678a52d9a80SChris Mason 		} else {
1679a52d9a80SChris Mason 			set_extent_uptodate(tree, block_start, cur_end,
1680a52d9a80SChris Mason 					    GFP_NOFS);
1681a52d9a80SChris Mason 			unlock_extent(tree, block_start, cur_end, GFP_NOFS);
1682a52d9a80SChris Mason 			block_start = cur_end + 1;
1683a52d9a80SChris Mason 		}
1684a52d9a80SChris Mason 		page_offset = block_start & (PAGE_CACHE_SIZE - 1);
1685a52d9a80SChris Mason 		free_extent_map(em);
1686a52d9a80SChris Mason 	}
1687a52d9a80SChris Mason 	if (iocount) {
1688a52d9a80SChris Mason 		wait_extent_bit(tree, orig_block_start,
1689a52d9a80SChris Mason 				block_end, EXTENT_LOCKED);
1690a52d9a80SChris Mason 	}
1691a52d9a80SChris Mason 	check_page_uptodate(tree, page);
1692a52d9a80SChris Mason err:
1693a52d9a80SChris Mason 	/* FIXME, zero out newly allocated blocks on error */
1694a52d9a80SChris Mason 	return err;
1695a52d9a80SChris Mason }
1696a52d9a80SChris Mason EXPORT_SYMBOL(extent_prepare_write);
1697a52d9a80SChris Mason 
1698a52d9a80SChris Mason /*
1699a52d9a80SChris Mason  * a helper for releasepage.  As long as there are no locked extents
1700a52d9a80SChris Mason  * in the range corresponding to the page, both state records and extent
1701a52d9a80SChris Mason  * map records are removed
1702a52d9a80SChris Mason  */
1703a52d9a80SChris Mason int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
1704a52d9a80SChris Mason {
1705a52d9a80SChris Mason 	struct extent_map *em;
1706a52d9a80SChris Mason 	u64 start = page->index << PAGE_CACHE_SHIFT;
1707a52d9a80SChris Mason 	u64 end = start + PAGE_CACHE_SIZE - 1;
1708a52d9a80SChris Mason 	u64 orig_start = start;
1709*b888db2bSChris Mason 	int ret = 1;
1710a52d9a80SChris Mason 
1711a52d9a80SChris Mason 	while (start <= end) {
1712a52d9a80SChris Mason 		em = lookup_extent_mapping(tree, start, end);
1713a52d9a80SChris Mason 		if (!em || IS_ERR(em))
1714a52d9a80SChris Mason 			break;
1715*b888db2bSChris Mason 		if (!test_range_bit(tree, em->start, em->end,
1716a52d9a80SChris Mason 				    EXTENT_LOCKED, 0)) {
1717a52d9a80SChris Mason 			remove_extent_mapping(tree, em);
1718a52d9a80SChris Mason 			/* once for the rb tree */
1719a52d9a80SChris Mason 			free_extent_map(em);
1720*b888db2bSChris Mason 		}
1721*b888db2bSChris Mason 		start = em->end + 1;
1722a52d9a80SChris Mason 		/* once for us */
1723a52d9a80SChris Mason 		free_extent_map(em);
1724a52d9a80SChris Mason 	}
1725*b888db2bSChris Mason 	if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
1726*b888db2bSChris Mason 		ret = 0;
1727*b888db2bSChris Mason 	else
1728a52d9a80SChris Mason 		clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
1729a52d9a80SChris Mason 				 1, 1, GFP_NOFS);
1730*b888db2bSChris Mason 	return ret;
1731a52d9a80SChris Mason }
1732a52d9a80SChris Mason EXPORT_SYMBOL(try_release_extent_mapping);
1733a52d9a80SChris Mason 
1734