xref: /openbmc/linux/fs/btrfs/file.c (revision 82ced6fd)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include "ctree.h"
33 #include "disk-io.h"
34 #include "transaction.h"
35 #include "btrfs_inode.h"
36 #include "ioctl.h"
37 #include "print-tree.h"
38 #include "tree-log.h"
39 #include "locking.h"
40 #include "compat.h"
41 
42 
43 /* simple helper to fault in pages and copy.  This should go away
44  * and be replaced with calls into generic code.
45  */
46 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
47 					 int write_bytes,
48 					 struct page **prepared_pages,
49 					 const char __user *buf)
50 {
51 	long page_fault = 0;
52 	int i;
53 	int offset = pos & (PAGE_CACHE_SIZE - 1);
54 
55 	for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
56 		size_t count = min_t(size_t,
57 				     PAGE_CACHE_SIZE - offset, write_bytes);
58 		struct page *page = prepared_pages[i];
59 		fault_in_pages_readable(buf, count);
60 
61 		/* Copy data from userspace to the current page */
62 		kmap(page);
63 		page_fault = __copy_from_user(page_address(page) + offset,
64 					      buf, count);
65 		/* Flush processor's dcache for this page */
66 		flush_dcache_page(page);
67 		kunmap(page);
68 		buf += count;
69 		write_bytes -= count;
70 
71 		if (page_fault)
72 			break;
73 	}
74 	return page_fault ? -EFAULT : 0;
75 }
76 
77 /*
78  * unlocks pages after btrfs_file_write is done with them
79  */
80 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
81 {
82 	size_t i;
83 	for (i = 0; i < num_pages; i++) {
84 		if (!pages[i])
85 			break;
86 		/* page checked is some magic around finding pages that
87 		 * have been modified without going through btrfs_set_page_dirty
88 		 * clear it here
89 		 */
90 		ClearPageChecked(pages[i]);
91 		unlock_page(pages[i]);
92 		mark_page_accessed(pages[i]);
93 		page_cache_release(pages[i]);
94 	}
95 }
96 
97 /*
98  * after copy_from_user, pages need to be dirtied and we need to make
99  * sure holes are created between the current EOF and the start of
100  * any next extents (if required).
101  *
102  * this also makes the decision about creating an inline extent vs
103  * doing real data extents, marking pages dirty and delalloc as required.
104  */
105 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
106 				   struct btrfs_root *root,
107 				   struct file *file,
108 				   struct page **pages,
109 				   size_t num_pages,
110 				   loff_t pos,
111 				   size_t write_bytes)
112 {
113 	int err = 0;
114 	int i;
115 	struct inode *inode = fdentry(file)->d_inode;
116 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
117 	u64 hint_byte;
118 	u64 num_bytes;
119 	u64 start_pos;
120 	u64 end_of_last_block;
121 	u64 end_pos = pos + write_bytes;
122 	loff_t isize = i_size_read(inode);
123 
124 	start_pos = pos & ~((u64)root->sectorsize - 1);
125 	num_bytes = (write_bytes + pos - start_pos +
126 		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
127 
128 	end_of_last_block = start_pos + num_bytes - 1;
129 
130 	lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
131 	trans = btrfs_join_transaction(root, 1);
132 	if (!trans) {
133 		err = -ENOMEM;
134 		goto out_unlock;
135 	}
136 	btrfs_set_trans_block_group(trans, inode);
137 	hint_byte = 0;
138 
139 	set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
140 
141 	/* check for reserved extents on each page, we don't want
142 	 * to reset the delalloc bit on things that already have
143 	 * extents reserved.
144 	 */
145 	btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
146 	for (i = 0; i < num_pages; i++) {
147 		struct page *p = pages[i];
148 		SetPageUptodate(p);
149 		ClearPageChecked(p);
150 		set_page_dirty(p);
151 	}
152 	if (end_pos > isize) {
153 		i_size_write(inode, end_pos);
154 		btrfs_update_inode(trans, root, inode);
155 	}
156 	err = btrfs_end_transaction(trans, root);
157 out_unlock:
158 	unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
159 	return err;
160 }
161 
162 /*
163  * this drops all the extents in the cache that intersect the range
164  * [start, end].  Existing extents are split as required.
165  */
166 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
167 			    int skip_pinned)
168 {
169 	struct extent_map *em;
170 	struct extent_map *split = NULL;
171 	struct extent_map *split2 = NULL;
172 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
173 	u64 len = end - start + 1;
174 	int ret;
175 	int testend = 1;
176 	unsigned long flags;
177 	int compressed = 0;
178 
179 	WARN_ON(end < start);
180 	if (end == (u64)-1) {
181 		len = (u64)-1;
182 		testend = 0;
183 	}
184 	while (1) {
185 		if (!split)
186 			split = alloc_extent_map(GFP_NOFS);
187 		if (!split2)
188 			split2 = alloc_extent_map(GFP_NOFS);
189 
190 		spin_lock(&em_tree->lock);
191 		em = lookup_extent_mapping(em_tree, start, len);
192 		if (!em) {
193 			spin_unlock(&em_tree->lock);
194 			break;
195 		}
196 		flags = em->flags;
197 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
198 			spin_unlock(&em_tree->lock);
199 			if (em->start <= start &&
200 			    (!testend || em->start + em->len >= start + len)) {
201 				free_extent_map(em);
202 				break;
203 			}
204 			if (start < em->start) {
205 				len = em->start - start;
206 			} else {
207 				len = start + len - (em->start + em->len);
208 				start = em->start + em->len;
209 			}
210 			free_extent_map(em);
211 			continue;
212 		}
213 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
214 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
215 		remove_extent_mapping(em_tree, em);
216 
217 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
218 		    em->start < start) {
219 			split->start = em->start;
220 			split->len = start - em->start;
221 			split->orig_start = em->orig_start;
222 			split->block_start = em->block_start;
223 
224 			if (compressed)
225 				split->block_len = em->block_len;
226 			else
227 				split->block_len = split->len;
228 
229 			split->bdev = em->bdev;
230 			split->flags = flags;
231 			ret = add_extent_mapping(em_tree, split);
232 			BUG_ON(ret);
233 			free_extent_map(split);
234 			split = split2;
235 			split2 = NULL;
236 		}
237 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
238 		    testend && em->start + em->len > start + len) {
239 			u64 diff = start + len - em->start;
240 
241 			split->start = start + len;
242 			split->len = em->start + em->len - (start + len);
243 			split->bdev = em->bdev;
244 			split->flags = flags;
245 
246 			if (compressed) {
247 				split->block_len = em->block_len;
248 				split->block_start = em->block_start;
249 				split->orig_start = em->orig_start;
250 			} else {
251 				split->block_len = split->len;
252 				split->block_start = em->block_start + diff;
253 				split->orig_start = split->start;
254 			}
255 
256 			ret = add_extent_mapping(em_tree, split);
257 			BUG_ON(ret);
258 			free_extent_map(split);
259 			split = NULL;
260 		}
261 		spin_unlock(&em_tree->lock);
262 
263 		/* once for us */
264 		free_extent_map(em);
265 		/* once for the tree*/
266 		free_extent_map(em);
267 	}
268 	if (split)
269 		free_extent_map(split);
270 	if (split2)
271 		free_extent_map(split2);
272 	return 0;
273 }
274 
275 /*
276  * this is very complex, but the basic idea is to drop all extents
277  * in the range start - end.  hint_block is filled in with a block number
278  * that would be a good hint to the block allocator for this file.
279  *
280  * If an extent intersects the range but is not entirely inside the range
281  * it is either truncated or split.  Anything entirely inside the range
282  * is deleted from the tree.
283  *
284  * inline_limit is used to tell this code which offsets in the file to keep
285  * if they contain inline extents.
286  */
287 noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
288 		       struct btrfs_root *root, struct inode *inode,
289 		       u64 start, u64 end, u64 locked_end,
290 		       u64 inline_limit, u64 *hint_byte)
291 {
292 	u64 extent_end = 0;
293 	u64 search_start = start;
294 	u64 leaf_start;
295 	u64 ram_bytes = 0;
296 	u64 orig_parent = 0;
297 	u64 disk_bytenr = 0;
298 	u64 orig_locked_end = locked_end;
299 	u8 compression;
300 	u8 encryption;
301 	u16 other_encoding = 0;
302 	u64 root_gen;
303 	u64 root_owner;
304 	struct extent_buffer *leaf;
305 	struct btrfs_file_extent_item *extent;
306 	struct btrfs_path *path;
307 	struct btrfs_key key;
308 	struct btrfs_file_extent_item old;
309 	int keep;
310 	int slot;
311 	int bookend;
312 	int found_type = 0;
313 	int found_extent;
314 	int found_inline;
315 	int recow;
316 	int ret;
317 
318 	inline_limit = 0;
319 	btrfs_drop_extent_cache(inode, start, end - 1, 0);
320 
321 	path = btrfs_alloc_path();
322 	if (!path)
323 		return -ENOMEM;
324 	while (1) {
325 		recow = 0;
326 		btrfs_release_path(root, path);
327 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
328 					       search_start, -1);
329 		if (ret < 0)
330 			goto out;
331 		if (ret > 0) {
332 			if (path->slots[0] == 0) {
333 				ret = 0;
334 				goto out;
335 			}
336 			path->slots[0]--;
337 		}
338 next_slot:
339 		keep = 0;
340 		bookend = 0;
341 		found_extent = 0;
342 		found_inline = 0;
343 		leaf_start = 0;
344 		root_gen = 0;
345 		root_owner = 0;
346 		compression = 0;
347 		encryption = 0;
348 		extent = NULL;
349 		leaf = path->nodes[0];
350 		slot = path->slots[0];
351 		ret = 0;
352 		btrfs_item_key_to_cpu(leaf, &key, slot);
353 		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
354 		    key.offset >= end) {
355 			goto out;
356 		}
357 		if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
358 		    key.objectid != inode->i_ino) {
359 			goto out;
360 		}
361 		if (recow) {
362 			search_start = max(key.offset, start);
363 			continue;
364 		}
365 		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
366 			extent = btrfs_item_ptr(leaf, slot,
367 						struct btrfs_file_extent_item);
368 			found_type = btrfs_file_extent_type(leaf, extent);
369 			compression = btrfs_file_extent_compression(leaf,
370 								    extent);
371 			encryption = btrfs_file_extent_encryption(leaf,
372 								  extent);
373 			other_encoding = btrfs_file_extent_other_encoding(leaf,
374 								  extent);
375 			if (found_type == BTRFS_FILE_EXTENT_REG ||
376 			    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
377 				extent_end =
378 				     btrfs_file_extent_disk_bytenr(leaf,
379 								   extent);
380 				if (extent_end)
381 					*hint_byte = extent_end;
382 
383 				extent_end = key.offset +
384 				     btrfs_file_extent_num_bytes(leaf, extent);
385 				ram_bytes = btrfs_file_extent_ram_bytes(leaf,
386 								extent);
387 				found_extent = 1;
388 			} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
389 				found_inline = 1;
390 				extent_end = key.offset +
391 				     btrfs_file_extent_inline_len(leaf, extent);
392 			}
393 		} else {
394 			extent_end = search_start;
395 		}
396 
397 		/* we found nothing we can drop */
398 		if ((!found_extent && !found_inline) ||
399 		    search_start >= extent_end) {
400 			int nextret;
401 			u32 nritems;
402 			nritems = btrfs_header_nritems(leaf);
403 			if (slot >= nritems - 1) {
404 				nextret = btrfs_next_leaf(root, path);
405 				if (nextret)
406 					goto out;
407 				recow = 1;
408 			} else {
409 				path->slots[0]++;
410 			}
411 			goto next_slot;
412 		}
413 
414 		if (end <= extent_end && start >= key.offset && found_inline)
415 			*hint_byte = EXTENT_MAP_INLINE;
416 
417 		if (found_extent) {
418 			read_extent_buffer(leaf, &old, (unsigned long)extent,
419 					   sizeof(old));
420 			root_gen = btrfs_header_generation(leaf);
421 			root_owner = btrfs_header_owner(leaf);
422 			leaf_start = leaf->start;
423 		}
424 
425 		if (end < extent_end && end >= key.offset) {
426 			bookend = 1;
427 			if (found_inline && start <= key.offset)
428 				keep = 1;
429 		}
430 
431 		if (bookend && found_extent) {
432 			if (locked_end < extent_end) {
433 				ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
434 						locked_end, extent_end - 1,
435 						GFP_NOFS);
436 				if (!ret) {
437 					btrfs_release_path(root, path);
438 					lock_extent(&BTRFS_I(inode)->io_tree,
439 						locked_end, extent_end - 1,
440 						GFP_NOFS);
441 					locked_end = extent_end;
442 					continue;
443 				}
444 				locked_end = extent_end;
445 			}
446 			orig_parent = path->nodes[0]->start;
447 			disk_bytenr = le64_to_cpu(old.disk_bytenr);
448 			if (disk_bytenr != 0) {
449 				ret = btrfs_inc_extent_ref(trans, root,
450 					   disk_bytenr,
451 					   le64_to_cpu(old.disk_num_bytes),
452 					   orig_parent, root->root_key.objectid,
453 					   trans->transid, inode->i_ino);
454 				BUG_ON(ret);
455 			}
456 		}
457 
458 		if (found_inline) {
459 			u64 mask = root->sectorsize - 1;
460 			search_start = (extent_end + mask) & ~mask;
461 		} else
462 			search_start = extent_end;
463 
464 		/* truncate existing extent */
465 		if (start > key.offset) {
466 			u64 new_num;
467 			u64 old_num;
468 			keep = 1;
469 			WARN_ON(start & (root->sectorsize - 1));
470 			if (found_extent) {
471 				new_num = start - key.offset;
472 				old_num = btrfs_file_extent_num_bytes(leaf,
473 								      extent);
474 				*hint_byte =
475 					btrfs_file_extent_disk_bytenr(leaf,
476 								      extent);
477 				if (btrfs_file_extent_disk_bytenr(leaf,
478 								  extent)) {
479 					inode_sub_bytes(inode, old_num -
480 							new_num);
481 				}
482 				btrfs_set_file_extent_num_bytes(leaf,
483 							extent, new_num);
484 				btrfs_mark_buffer_dirty(leaf);
485 			} else if (key.offset < inline_limit &&
486 				   (end > extent_end) &&
487 				   (inline_limit < extent_end)) {
488 				u32 new_size;
489 				new_size = btrfs_file_extent_calc_inline_size(
490 						   inline_limit - key.offset);
491 				inode_sub_bytes(inode, extent_end -
492 						inline_limit);
493 				btrfs_set_file_extent_ram_bytes(leaf, extent,
494 							new_size);
495 				if (!compression && !encryption) {
496 					btrfs_truncate_item(trans, root, path,
497 							    new_size, 1);
498 				}
499 			}
500 		}
501 		/* delete the entire extent */
502 		if (!keep) {
503 			if (found_inline)
504 				inode_sub_bytes(inode, extent_end -
505 						key.offset);
506 			ret = btrfs_del_item(trans, root, path);
507 			/* TODO update progress marker and return */
508 			BUG_ON(ret);
509 			extent = NULL;
510 			btrfs_release_path(root, path);
511 			/* the extent will be freed later */
512 		}
513 		if (bookend && found_inline && start <= key.offset) {
514 			u32 new_size;
515 			new_size = btrfs_file_extent_calc_inline_size(
516 						   extent_end - end);
517 			inode_sub_bytes(inode, end - key.offset);
518 			btrfs_set_file_extent_ram_bytes(leaf, extent,
519 							new_size);
520 			if (!compression && !encryption)
521 				ret = btrfs_truncate_item(trans, root, path,
522 							  new_size, 0);
523 			BUG_ON(ret);
524 		}
525 		/* create bookend, splitting the extent in two */
526 		if (bookend && found_extent) {
527 			struct btrfs_key ins;
528 			ins.objectid = inode->i_ino;
529 			ins.offset = end;
530 			btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
531 
532 			btrfs_release_path(root, path);
533 			path->leave_spinning = 1;
534 			ret = btrfs_insert_empty_item(trans, root, path, &ins,
535 						      sizeof(*extent));
536 			BUG_ON(ret);
537 
538 			leaf = path->nodes[0];
539 			extent = btrfs_item_ptr(leaf, path->slots[0],
540 						struct btrfs_file_extent_item);
541 			write_extent_buffer(leaf, &old,
542 					    (unsigned long)extent, sizeof(old));
543 
544 			btrfs_set_file_extent_compression(leaf, extent,
545 							  compression);
546 			btrfs_set_file_extent_encryption(leaf, extent,
547 							 encryption);
548 			btrfs_set_file_extent_other_encoding(leaf, extent,
549 							     other_encoding);
550 			btrfs_set_file_extent_offset(leaf, extent,
551 				    le64_to_cpu(old.offset) + end - key.offset);
552 			WARN_ON(le64_to_cpu(old.num_bytes) <
553 				(extent_end - end));
554 			btrfs_set_file_extent_num_bytes(leaf, extent,
555 							extent_end - end);
556 
557 			/*
558 			 * set the ram bytes to the size of the full extent
559 			 * before splitting.  This is a worst case flag,
560 			 * but its the best we can do because we don't know
561 			 * how splitting affects compression
562 			 */
563 			btrfs_set_file_extent_ram_bytes(leaf, extent,
564 							ram_bytes);
565 			btrfs_set_file_extent_type(leaf, extent, found_type);
566 
567 			btrfs_unlock_up_safe(path, 1);
568 			btrfs_mark_buffer_dirty(path->nodes[0]);
569 			btrfs_set_lock_blocking(path->nodes[0]);
570 
571 			if (disk_bytenr != 0) {
572 				ret = btrfs_update_extent_ref(trans, root,
573 						disk_bytenr,
574 						le64_to_cpu(old.disk_num_bytes),
575 						orig_parent,
576 						leaf->start,
577 						root->root_key.objectid,
578 						trans->transid, ins.objectid);
579 
580 				BUG_ON(ret);
581 			}
582 			path->leave_spinning = 0;
583 			btrfs_release_path(root, path);
584 			if (disk_bytenr != 0)
585 				inode_add_bytes(inode, extent_end - end);
586 		}
587 
588 		if (found_extent && !keep) {
589 			u64 old_disk_bytenr = le64_to_cpu(old.disk_bytenr);
590 
591 			if (old_disk_bytenr != 0) {
592 				inode_sub_bytes(inode,
593 						le64_to_cpu(old.num_bytes));
594 				ret = btrfs_free_extent(trans, root,
595 						old_disk_bytenr,
596 						le64_to_cpu(old.disk_num_bytes),
597 						leaf_start, root_owner,
598 						root_gen, key.objectid, 0);
599 				BUG_ON(ret);
600 				*hint_byte = old_disk_bytenr;
601 			}
602 		}
603 
604 		if (search_start >= end) {
605 			ret = 0;
606 			goto out;
607 		}
608 	}
609 out:
610 	btrfs_free_path(path);
611 	if (locked_end > orig_locked_end) {
612 		unlock_extent(&BTRFS_I(inode)->io_tree, orig_locked_end,
613 			      locked_end - 1, GFP_NOFS);
614 	}
615 	return ret;
616 }
617 
618 static int extent_mergeable(struct extent_buffer *leaf, int slot,
619 			    u64 objectid, u64 bytenr, u64 *start, u64 *end)
620 {
621 	struct btrfs_file_extent_item *fi;
622 	struct btrfs_key key;
623 	u64 extent_end;
624 
625 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
626 		return 0;
627 
628 	btrfs_item_key_to_cpu(leaf, &key, slot);
629 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
630 		return 0;
631 
632 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
633 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
634 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
635 	    btrfs_file_extent_compression(leaf, fi) ||
636 	    btrfs_file_extent_encryption(leaf, fi) ||
637 	    btrfs_file_extent_other_encoding(leaf, fi))
638 		return 0;
639 
640 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
641 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
642 		return 0;
643 
644 	*start = key.offset;
645 	*end = extent_end;
646 	return 1;
647 }
648 
649 /*
650  * Mark extent in the range start - end as written.
651  *
652  * This changes extent type from 'pre-allocated' to 'regular'. If only
653  * part of extent is marked as written, the extent will be split into
654  * two or three.
655  */
656 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
657 			      struct btrfs_root *root,
658 			      struct inode *inode, u64 start, u64 end)
659 {
660 	struct extent_buffer *leaf;
661 	struct btrfs_path *path;
662 	struct btrfs_file_extent_item *fi;
663 	struct btrfs_key key;
664 	u64 bytenr;
665 	u64 num_bytes;
666 	u64 extent_end;
667 	u64 extent_offset;
668 	u64 other_start;
669 	u64 other_end;
670 	u64 split = start;
671 	u64 locked_end = end;
672 	u64 orig_parent;
673 	int extent_type;
674 	int split_end = 1;
675 	int ret;
676 
677 	btrfs_drop_extent_cache(inode, start, end - 1, 0);
678 
679 	path = btrfs_alloc_path();
680 	BUG_ON(!path);
681 again:
682 	key.objectid = inode->i_ino;
683 	key.type = BTRFS_EXTENT_DATA_KEY;
684 	if (split == start)
685 		key.offset = split;
686 	else
687 		key.offset = split - 1;
688 
689 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
690 	if (ret > 0 && path->slots[0] > 0)
691 		path->slots[0]--;
692 
693 	leaf = path->nodes[0];
694 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
695 	BUG_ON(key.objectid != inode->i_ino ||
696 	       key.type != BTRFS_EXTENT_DATA_KEY);
697 	fi = btrfs_item_ptr(leaf, path->slots[0],
698 			    struct btrfs_file_extent_item);
699 	extent_type = btrfs_file_extent_type(leaf, fi);
700 	BUG_ON(extent_type != BTRFS_FILE_EXTENT_PREALLOC);
701 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
702 	BUG_ON(key.offset > start || extent_end < end);
703 
704 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
705 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
706 	extent_offset = btrfs_file_extent_offset(leaf, fi);
707 
708 	if (key.offset == start)
709 		split = end;
710 
711 	if (key.offset == start && extent_end == end) {
712 		int del_nr = 0;
713 		int del_slot = 0;
714 		u64 leaf_owner = btrfs_header_owner(leaf);
715 		u64 leaf_gen = btrfs_header_generation(leaf);
716 		other_start = end;
717 		other_end = 0;
718 		if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
719 				     bytenr, &other_start, &other_end)) {
720 			extent_end = other_end;
721 			del_slot = path->slots[0] + 1;
722 			del_nr++;
723 			ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
724 						leaf->start, leaf_owner,
725 						leaf_gen, inode->i_ino, 0);
726 			BUG_ON(ret);
727 		}
728 		other_start = 0;
729 		other_end = start;
730 		if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
731 				     bytenr, &other_start, &other_end)) {
732 			key.offset = other_start;
733 			del_slot = path->slots[0];
734 			del_nr++;
735 			ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
736 						leaf->start, leaf_owner,
737 						leaf_gen, inode->i_ino, 0);
738 			BUG_ON(ret);
739 		}
740 		split_end = 0;
741 		if (del_nr == 0) {
742 			btrfs_set_file_extent_type(leaf, fi,
743 						   BTRFS_FILE_EXTENT_REG);
744 			goto done;
745 		}
746 
747 		fi = btrfs_item_ptr(leaf, del_slot - 1,
748 				    struct btrfs_file_extent_item);
749 		btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
750 		btrfs_set_file_extent_num_bytes(leaf, fi,
751 						extent_end - key.offset);
752 		btrfs_mark_buffer_dirty(leaf);
753 
754 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
755 		BUG_ON(ret);
756 		goto release;
757 	} else if (split == start) {
758 		if (locked_end < extent_end) {
759 			ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
760 					locked_end, extent_end - 1, GFP_NOFS);
761 			if (!ret) {
762 				btrfs_release_path(root, path);
763 				lock_extent(&BTRFS_I(inode)->io_tree,
764 					locked_end, extent_end - 1, GFP_NOFS);
765 				locked_end = extent_end;
766 				goto again;
767 			}
768 			locked_end = extent_end;
769 		}
770 		btrfs_set_file_extent_num_bytes(leaf, fi, split - key.offset);
771 		extent_offset += split - key.offset;
772 	} else  {
773 		BUG_ON(key.offset != start);
774 		btrfs_set_file_extent_offset(leaf, fi, extent_offset +
775 					     split - key.offset);
776 		btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - split);
777 		key.offset = split;
778 		btrfs_set_item_key_safe(trans, root, path, &key);
779 		extent_end = split;
780 	}
781 
782 	if (extent_end == end) {
783 		split_end = 0;
784 		extent_type = BTRFS_FILE_EXTENT_REG;
785 	}
786 	if (extent_end == end && split == start) {
787 		other_start = end;
788 		other_end = 0;
789 		if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
790 				     bytenr, &other_start, &other_end)) {
791 			path->slots[0]++;
792 			fi = btrfs_item_ptr(leaf, path->slots[0],
793 					    struct btrfs_file_extent_item);
794 			key.offset = split;
795 			btrfs_set_item_key_safe(trans, root, path, &key);
796 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
797 			btrfs_set_file_extent_num_bytes(leaf, fi,
798 							other_end - split);
799 			goto done;
800 		}
801 	}
802 	if (extent_end == end && split == end) {
803 		other_start = 0;
804 		other_end = start;
805 		if (extent_mergeable(leaf, path->slots[0] - 1 , inode->i_ino,
806 				     bytenr, &other_start, &other_end)) {
807 			path->slots[0]--;
808 			fi = btrfs_item_ptr(leaf, path->slots[0],
809 					    struct btrfs_file_extent_item);
810 			btrfs_set_file_extent_num_bytes(leaf, fi, extent_end -
811 							other_start);
812 			goto done;
813 		}
814 	}
815 
816 	btrfs_mark_buffer_dirty(leaf);
817 
818 	orig_parent = leaf->start;
819 	ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
820 				   orig_parent, root->root_key.objectid,
821 				   trans->transid, inode->i_ino);
822 	BUG_ON(ret);
823 	btrfs_release_path(root, path);
824 
825 	key.offset = start;
826 	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*fi));
827 	BUG_ON(ret);
828 
829 	leaf = path->nodes[0];
830 	fi = btrfs_item_ptr(leaf, path->slots[0],
831 			    struct btrfs_file_extent_item);
832 	btrfs_set_file_extent_generation(leaf, fi, trans->transid);
833 	btrfs_set_file_extent_type(leaf, fi, extent_type);
834 	btrfs_set_file_extent_disk_bytenr(leaf, fi, bytenr);
835 	btrfs_set_file_extent_disk_num_bytes(leaf, fi, num_bytes);
836 	btrfs_set_file_extent_offset(leaf, fi, extent_offset);
837 	btrfs_set_file_extent_num_bytes(leaf, fi, extent_end - key.offset);
838 	btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
839 	btrfs_set_file_extent_compression(leaf, fi, 0);
840 	btrfs_set_file_extent_encryption(leaf, fi, 0);
841 	btrfs_set_file_extent_other_encoding(leaf, fi, 0);
842 
843 	if (orig_parent != leaf->start) {
844 		ret = btrfs_update_extent_ref(trans, root, bytenr, num_bytes,
845 					      orig_parent, leaf->start,
846 					      root->root_key.objectid,
847 					      trans->transid, inode->i_ino);
848 		BUG_ON(ret);
849 	}
850 done:
851 	btrfs_mark_buffer_dirty(leaf);
852 
853 release:
854 	btrfs_release_path(root, path);
855 	if (split_end && split == start) {
856 		split = end;
857 		goto again;
858 	}
859 	if (locked_end > end) {
860 		unlock_extent(&BTRFS_I(inode)->io_tree, end, locked_end - 1,
861 			      GFP_NOFS);
862 	}
863 	btrfs_free_path(path);
864 	return 0;
865 }
866 
867 /*
868  * this gets pages into the page cache and locks them down, it also properly
869  * waits for data=ordered extents to finish before allowing the pages to be
870  * modified.
871  */
872 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
873 			 struct page **pages, size_t num_pages,
874 			 loff_t pos, unsigned long first_index,
875 			 unsigned long last_index, size_t write_bytes)
876 {
877 	int i;
878 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
879 	struct inode *inode = fdentry(file)->d_inode;
880 	int err = 0;
881 	u64 start_pos;
882 	u64 last_pos;
883 
884 	start_pos = pos & ~((u64)root->sectorsize - 1);
885 	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
886 
887 	if (start_pos > inode->i_size) {
888 		err = btrfs_cont_expand(inode, start_pos);
889 		if (err)
890 			return err;
891 	}
892 
893 	memset(pages, 0, num_pages * sizeof(struct page *));
894 again:
895 	for (i = 0; i < num_pages; i++) {
896 		pages[i] = grab_cache_page(inode->i_mapping, index + i);
897 		if (!pages[i]) {
898 			err = -ENOMEM;
899 			BUG_ON(1);
900 		}
901 		wait_on_page_writeback(pages[i]);
902 	}
903 	if (start_pos < inode->i_size) {
904 		struct btrfs_ordered_extent *ordered;
905 		lock_extent(&BTRFS_I(inode)->io_tree,
906 			    start_pos, last_pos - 1, GFP_NOFS);
907 		ordered = btrfs_lookup_first_ordered_extent(inode,
908 							    last_pos - 1);
909 		if (ordered &&
910 		    ordered->file_offset + ordered->len > start_pos &&
911 		    ordered->file_offset < last_pos) {
912 			btrfs_put_ordered_extent(ordered);
913 			unlock_extent(&BTRFS_I(inode)->io_tree,
914 				      start_pos, last_pos - 1, GFP_NOFS);
915 			for (i = 0; i < num_pages; i++) {
916 				unlock_page(pages[i]);
917 				page_cache_release(pages[i]);
918 			}
919 			btrfs_wait_ordered_range(inode, start_pos,
920 						 last_pos - start_pos);
921 			goto again;
922 		}
923 		if (ordered)
924 			btrfs_put_ordered_extent(ordered);
925 
926 		clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
927 				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
928 				  GFP_NOFS);
929 		unlock_extent(&BTRFS_I(inode)->io_tree,
930 			      start_pos, last_pos - 1, GFP_NOFS);
931 	}
932 	for (i = 0; i < num_pages; i++) {
933 		clear_page_dirty_for_io(pages[i]);
934 		set_page_extent_mapped(pages[i]);
935 		WARN_ON(!PageLocked(pages[i]));
936 	}
937 	return 0;
938 }
939 
940 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
941 				size_t count, loff_t *ppos)
942 {
943 	loff_t pos;
944 	loff_t start_pos;
945 	ssize_t num_written = 0;
946 	ssize_t err = 0;
947 	int ret = 0;
948 	struct inode *inode = fdentry(file)->d_inode;
949 	struct btrfs_root *root = BTRFS_I(inode)->root;
950 	struct page **pages = NULL;
951 	int nrptrs;
952 	struct page *pinned[2];
953 	unsigned long first_index;
954 	unsigned long last_index;
955 	int will_write;
956 
957 	will_write = ((file->f_flags & O_SYNC) || IS_SYNC(inode) ||
958 		      (file->f_flags & O_DIRECT));
959 
960 	nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
961 		     PAGE_CACHE_SIZE / (sizeof(struct page *)));
962 	pinned[0] = NULL;
963 	pinned[1] = NULL;
964 
965 	pos = *ppos;
966 	start_pos = pos;
967 
968 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
969 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
970 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
971 	if (err)
972 		goto out_nolock;
973 	if (count == 0)
974 		goto out_nolock;
975 
976 	err = file_remove_suid(file);
977 	if (err)
978 		goto out_nolock;
979 	file_update_time(file);
980 
981 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
982 
983 	mutex_lock(&inode->i_mutex);
984 	BTRFS_I(inode)->sequence++;
985 	first_index = pos >> PAGE_CACHE_SHIFT;
986 	last_index = (pos + count) >> PAGE_CACHE_SHIFT;
987 
988 	/*
989 	 * there are lots of better ways to do this, but this code
990 	 * makes sure the first and last page in the file range are
991 	 * up to date and ready for cow
992 	 */
993 	if ((pos & (PAGE_CACHE_SIZE - 1))) {
994 		pinned[0] = grab_cache_page(inode->i_mapping, first_index);
995 		if (!PageUptodate(pinned[0])) {
996 			ret = btrfs_readpage(NULL, pinned[0]);
997 			BUG_ON(ret);
998 			wait_on_page_locked(pinned[0]);
999 		} else {
1000 			unlock_page(pinned[0]);
1001 		}
1002 	}
1003 	if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
1004 		pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1005 		if (!PageUptodate(pinned[1])) {
1006 			ret = btrfs_readpage(NULL, pinned[1]);
1007 			BUG_ON(ret);
1008 			wait_on_page_locked(pinned[1]);
1009 		} else {
1010 			unlock_page(pinned[1]);
1011 		}
1012 	}
1013 
1014 	while (count > 0) {
1015 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1016 		size_t write_bytes = min(count, nrptrs *
1017 					(size_t)PAGE_CACHE_SIZE -
1018 					 offset);
1019 		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
1020 					PAGE_CACHE_SHIFT;
1021 
1022 		WARN_ON(num_pages > nrptrs);
1023 		memset(pages, 0, sizeof(struct page *) * nrptrs);
1024 
1025 		ret = btrfs_check_data_free_space(root, inode, write_bytes);
1026 		if (ret)
1027 			goto out;
1028 
1029 		ret = prepare_pages(root, file, pages, num_pages,
1030 				    pos, first_index, last_index,
1031 				    write_bytes);
1032 		if (ret) {
1033 			btrfs_free_reserved_data_space(root, inode,
1034 						       write_bytes);
1035 			goto out;
1036 		}
1037 
1038 		ret = btrfs_copy_from_user(pos, num_pages,
1039 					   write_bytes, pages, buf);
1040 		if (ret) {
1041 			btrfs_free_reserved_data_space(root, inode,
1042 						       write_bytes);
1043 			btrfs_drop_pages(pages, num_pages);
1044 			goto out;
1045 		}
1046 
1047 		ret = dirty_and_release_pages(NULL, root, file, pages,
1048 					      num_pages, pos, write_bytes);
1049 		btrfs_drop_pages(pages, num_pages);
1050 		if (ret) {
1051 			btrfs_free_reserved_data_space(root, inode,
1052 						       write_bytes);
1053 			goto out;
1054 		}
1055 
1056 		if (will_write) {
1057 			btrfs_fdatawrite_range(inode->i_mapping, pos,
1058 					       pos + write_bytes - 1,
1059 					       WB_SYNC_ALL);
1060 		} else {
1061 			balance_dirty_pages_ratelimited_nr(inode->i_mapping,
1062 							   num_pages);
1063 			if (num_pages <
1064 			    (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1065 				btrfs_btree_balance_dirty(root, 1);
1066 			btrfs_throttle(root);
1067 		}
1068 
1069 		buf += write_bytes;
1070 		count -= write_bytes;
1071 		pos += write_bytes;
1072 		num_written += write_bytes;
1073 
1074 		cond_resched();
1075 	}
1076 out:
1077 	mutex_unlock(&inode->i_mutex);
1078 	if (ret)
1079 		err = ret;
1080 
1081 out_nolock:
1082 	kfree(pages);
1083 	if (pinned[0])
1084 		page_cache_release(pinned[0]);
1085 	if (pinned[1])
1086 		page_cache_release(pinned[1]);
1087 	*ppos = pos;
1088 
1089 	/*
1090 	 * we want to make sure fsync finds this change
1091 	 * but we haven't joined a transaction running right now.
1092 	 *
1093 	 * Later on, someone is sure to update the inode and get the
1094 	 * real transid recorded.
1095 	 *
1096 	 * We set last_trans now to the fs_info generation + 1,
1097 	 * this will either be one more than the running transaction
1098 	 * or the generation used for the next transaction if there isn't
1099 	 * one running right now.
1100 	 */
1101 	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
1102 
1103 	if (num_written > 0 && will_write) {
1104 		struct btrfs_trans_handle *trans;
1105 
1106 		err = btrfs_wait_ordered_range(inode, start_pos, num_written);
1107 		if (err)
1108 			num_written = err;
1109 
1110 		if ((file->f_flags & O_SYNC) || IS_SYNC(inode)) {
1111 			trans = btrfs_start_transaction(root, 1);
1112 			ret = btrfs_log_dentry_safe(trans, root,
1113 						    file->f_dentry);
1114 			if (ret == 0) {
1115 				ret = btrfs_sync_log(trans, root);
1116 				if (ret == 0)
1117 					btrfs_end_transaction(trans, root);
1118 				else
1119 					btrfs_commit_transaction(trans, root);
1120 			} else {
1121 				btrfs_commit_transaction(trans, root);
1122 			}
1123 		}
1124 		if (file->f_flags & O_DIRECT) {
1125 			invalidate_mapping_pages(inode->i_mapping,
1126 			      start_pos >> PAGE_CACHE_SHIFT,
1127 			     (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1128 		}
1129 	}
1130 	current->backing_dev_info = NULL;
1131 	return num_written ? num_written : err;
1132 }
1133 
1134 int btrfs_release_file(struct inode *inode, struct file *filp)
1135 {
1136 	/*
1137 	 * ordered_data_close is set by settattr when we are about to truncate
1138 	 * a file from a non-zero size to a zero size.  This tries to
1139 	 * flush down new bytes that may have been written if the
1140 	 * application were using truncate to replace a file in place.
1141 	 */
1142 	if (BTRFS_I(inode)->ordered_data_close) {
1143 		BTRFS_I(inode)->ordered_data_close = 0;
1144 		btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1145 		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1146 			filemap_flush(inode->i_mapping);
1147 	}
1148 	if (filp->private_data)
1149 		btrfs_ioctl_trans_end(filp);
1150 	return 0;
1151 }
1152 
1153 /*
1154  * fsync call for both files and directories.  This logs the inode into
1155  * the tree log instead of forcing full commits whenever possible.
1156  *
1157  * It needs to call filemap_fdatawait so that all ordered extent updates are
1158  * in the metadata btree are up to date for copying to the log.
1159  *
1160  * It drops the inode mutex before doing the tree log commit.  This is an
1161  * important optimization for directories because holding the mutex prevents
1162  * new operations on the dir while we write to disk.
1163  */
1164 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1165 {
1166 	struct inode *inode = dentry->d_inode;
1167 	struct btrfs_root *root = BTRFS_I(inode)->root;
1168 	int ret = 0;
1169 	struct btrfs_trans_handle *trans;
1170 
1171 	/*
1172 	 * check the transaction that last modified this inode
1173 	 * and see if its already been committed
1174 	 */
1175 	if (!BTRFS_I(inode)->last_trans)
1176 		goto out;
1177 
1178 	mutex_lock(&root->fs_info->trans_mutex);
1179 	if (BTRFS_I(inode)->last_trans <=
1180 	    root->fs_info->last_trans_committed) {
1181 		BTRFS_I(inode)->last_trans = 0;
1182 		mutex_unlock(&root->fs_info->trans_mutex);
1183 		goto out;
1184 	}
1185 	mutex_unlock(&root->fs_info->trans_mutex);
1186 
1187 	root->log_batch++;
1188 	filemap_fdatawrite(inode->i_mapping);
1189 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
1190 	root->log_batch++;
1191 
1192 	/*
1193 	 * ok we haven't committed the transaction yet, lets do a commit
1194 	 */
1195 	if (file && file->private_data)
1196 		btrfs_ioctl_trans_end(file);
1197 
1198 	trans = btrfs_start_transaction(root, 1);
1199 	if (!trans) {
1200 		ret = -ENOMEM;
1201 		goto out;
1202 	}
1203 
1204 	ret = btrfs_log_dentry_safe(trans, root, dentry);
1205 	if (ret < 0)
1206 		goto out;
1207 
1208 	/* we've logged all the items and now have a consistent
1209 	 * version of the file in the log.  It is possible that
1210 	 * someone will come in and modify the file, but that's
1211 	 * fine because the log is consistent on disk, and we
1212 	 * have references to all of the file's extents
1213 	 *
1214 	 * It is possible that someone will come in and log the
1215 	 * file again, but that will end up using the synchronization
1216 	 * inside btrfs_sync_log to keep things safe.
1217 	 */
1218 	mutex_unlock(&dentry->d_inode->i_mutex);
1219 
1220 	if (ret > 0) {
1221 		ret = btrfs_commit_transaction(trans, root);
1222 	} else {
1223 		ret = btrfs_sync_log(trans, root);
1224 		if (ret == 0)
1225 			ret = btrfs_end_transaction(trans, root);
1226 		else
1227 			ret = btrfs_commit_transaction(trans, root);
1228 	}
1229 	mutex_lock(&dentry->d_inode->i_mutex);
1230 out:
1231 	return ret > 0 ? EIO : ret;
1232 }
1233 
1234 static struct vm_operations_struct btrfs_file_vm_ops = {
1235 	.fault		= filemap_fault,
1236 	.page_mkwrite	= btrfs_page_mkwrite,
1237 };
1238 
1239 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1240 {
1241 	vma->vm_ops = &btrfs_file_vm_ops;
1242 	file_accessed(filp);
1243 	return 0;
1244 }
1245 
1246 struct file_operations btrfs_file_operations = {
1247 	.llseek		= generic_file_llseek,
1248 	.read		= do_sync_read,
1249 	.aio_read       = generic_file_aio_read,
1250 	.splice_read	= generic_file_splice_read,
1251 	.write		= btrfs_file_write,
1252 	.mmap		= btrfs_file_mmap,
1253 	.open		= generic_file_open,
1254 	.release	= btrfs_release_file,
1255 	.fsync		= btrfs_sync_file,
1256 	.unlocked_ioctl	= btrfs_ioctl,
1257 #ifdef CONFIG_COMPAT
1258 	.compat_ioctl	= btrfs_ioctl,
1259 #endif
1260 };
1261