xref: /openbmc/linux/fs/btrfs/file.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/swap.h>
28 #include <linux/writeback.h>
29 #include <linux/statfs.h>
30 #include <linux/compat.h>
31 #include "ctree.h"
32 #include "disk-io.h"
33 #include "transaction.h"
34 #include "btrfs_inode.h"
35 #include "ioctl.h"
36 #include "print-tree.h"
37 #include "tree-log.h"
38 #include "locking.h"
39 #include "compat.h"
40 
41 
42 /* simple helper to fault in pages and copy.  This should go away
43  * and be replaced with calls into generic code.
44  */
45 static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
46 					 int write_bytes,
47 					 struct page **prepared_pages,
48 					 const char __user *buf)
49 {
50 	long page_fault = 0;
51 	int i;
52 	int offset = pos & (PAGE_CACHE_SIZE - 1);
53 
54 	for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
55 		size_t count = min_t(size_t,
56 				     PAGE_CACHE_SIZE - offset, write_bytes);
57 		struct page *page = prepared_pages[i];
58 		fault_in_pages_readable(buf, count);
59 
60 		/* Copy data from userspace to the current page */
61 		kmap(page);
62 		page_fault = __copy_from_user(page_address(page) + offset,
63 					      buf, count);
64 		/* Flush processor's dcache for this page */
65 		flush_dcache_page(page);
66 		kunmap(page);
67 		buf += count;
68 		write_bytes -= count;
69 
70 		if (page_fault)
71 			break;
72 	}
73 	return page_fault ? -EFAULT : 0;
74 }
75 
76 /*
77  * unlocks pages after btrfs_file_write is done with them
78  */
79 static noinline void btrfs_drop_pages(struct page **pages, size_t num_pages)
80 {
81 	size_t i;
82 	for (i = 0; i < num_pages; i++) {
83 		if (!pages[i])
84 			break;
85 		/* page checked is some magic around finding pages that
86 		 * have been modified without going through btrfs_set_page_dirty
87 		 * clear it here
88 		 */
89 		ClearPageChecked(pages[i]);
90 		unlock_page(pages[i]);
91 		mark_page_accessed(pages[i]);
92 		page_cache_release(pages[i]);
93 	}
94 }
95 
96 /*
97  * after copy_from_user, pages need to be dirtied and we need to make
98  * sure holes are created between the current EOF and the start of
99  * any next extents (if required).
100  *
101  * this also makes the decision about creating an inline extent vs
102  * doing real data extents, marking pages dirty and delalloc as required.
103  */
104 static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
105 				   struct btrfs_root *root,
106 				   struct file *file,
107 				   struct page **pages,
108 				   size_t num_pages,
109 				   loff_t pos,
110 				   size_t write_bytes)
111 {
112 	int err = 0;
113 	int i;
114 	struct inode *inode = fdentry(file)->d_inode;
115 	u64 num_bytes;
116 	u64 start_pos;
117 	u64 end_of_last_block;
118 	u64 end_pos = pos + write_bytes;
119 	loff_t isize = i_size_read(inode);
120 
121 	start_pos = pos & ~((u64)root->sectorsize - 1);
122 	num_bytes = (write_bytes + pos - start_pos +
123 		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
124 
125 	end_of_last_block = start_pos + num_bytes - 1;
126 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
127 	if (err)
128 		return err;
129 
130 	for (i = 0; i < num_pages; i++) {
131 		struct page *p = pages[i];
132 		SetPageUptodate(p);
133 		ClearPageChecked(p);
134 		set_page_dirty(p);
135 	}
136 	if (end_pos > isize) {
137 		i_size_write(inode, end_pos);
138 		/* we've only changed i_size in ram, and we haven't updated
139 		 * the disk i_size.  There is no need to log the inode
140 		 * at this time.
141 		 */
142 	}
143 	return err;
144 }
145 
146 /*
147  * this drops all the extents in the cache that intersect the range
148  * [start, end].  Existing extents are split as required.
149  */
150 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
151 			    int skip_pinned)
152 {
153 	struct extent_map *em;
154 	struct extent_map *split = NULL;
155 	struct extent_map *split2 = NULL;
156 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
157 	u64 len = end - start + 1;
158 	int ret;
159 	int testend = 1;
160 	unsigned long flags;
161 	int compressed = 0;
162 
163 	WARN_ON(end < start);
164 	if (end == (u64)-1) {
165 		len = (u64)-1;
166 		testend = 0;
167 	}
168 	while (1) {
169 		if (!split)
170 			split = alloc_extent_map(GFP_NOFS);
171 		if (!split2)
172 			split2 = alloc_extent_map(GFP_NOFS);
173 
174 		write_lock(&em_tree->lock);
175 		em = lookup_extent_mapping(em_tree, start, len);
176 		if (!em) {
177 			write_unlock(&em_tree->lock);
178 			break;
179 		}
180 		flags = em->flags;
181 		if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
182 			if (testend && em->start + em->len >= start + len) {
183 				free_extent_map(em);
184 				write_unlock(&em_tree->lock);
185 				break;
186 			}
187 			start = em->start + em->len;
188 			if (testend)
189 				len = start + len - (em->start + em->len);
190 			free_extent_map(em);
191 			write_unlock(&em_tree->lock);
192 			continue;
193 		}
194 		compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
195 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
196 		remove_extent_mapping(em_tree, em);
197 
198 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
199 		    em->start < start) {
200 			split->start = em->start;
201 			split->len = start - em->start;
202 			split->orig_start = em->orig_start;
203 			split->block_start = em->block_start;
204 
205 			if (compressed)
206 				split->block_len = em->block_len;
207 			else
208 				split->block_len = split->len;
209 
210 			split->bdev = em->bdev;
211 			split->flags = flags;
212 			ret = add_extent_mapping(em_tree, split);
213 			BUG_ON(ret);
214 			free_extent_map(split);
215 			split = split2;
216 			split2 = NULL;
217 		}
218 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
219 		    testend && em->start + em->len > start + len) {
220 			u64 diff = start + len - em->start;
221 
222 			split->start = start + len;
223 			split->len = em->start + em->len - (start + len);
224 			split->bdev = em->bdev;
225 			split->flags = flags;
226 
227 			if (compressed) {
228 				split->block_len = em->block_len;
229 				split->block_start = em->block_start;
230 				split->orig_start = em->orig_start;
231 			} else {
232 				split->block_len = split->len;
233 				split->block_start = em->block_start + diff;
234 				split->orig_start = split->start;
235 			}
236 
237 			ret = add_extent_mapping(em_tree, split);
238 			BUG_ON(ret);
239 			free_extent_map(split);
240 			split = NULL;
241 		}
242 		write_unlock(&em_tree->lock);
243 
244 		/* once for us */
245 		free_extent_map(em);
246 		/* once for the tree*/
247 		free_extent_map(em);
248 	}
249 	if (split)
250 		free_extent_map(split);
251 	if (split2)
252 		free_extent_map(split2);
253 	return 0;
254 }
255 
256 /*
257  * this is very complex, but the basic idea is to drop all extents
258  * in the range start - end.  hint_block is filled in with a block number
259  * that would be a good hint to the block allocator for this file.
260  *
261  * If an extent intersects the range but is not entirely inside the range
262  * it is either truncated or split.  Anything entirely inside the range
263  * is deleted from the tree.
264  */
265 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
266 		       u64 start, u64 end, u64 *hint_byte, int drop_cache)
267 {
268 	struct btrfs_root *root = BTRFS_I(inode)->root;
269 	struct extent_buffer *leaf;
270 	struct btrfs_file_extent_item *fi;
271 	struct btrfs_path *path;
272 	struct btrfs_key key;
273 	struct btrfs_key new_key;
274 	u64 search_start = start;
275 	u64 disk_bytenr = 0;
276 	u64 num_bytes = 0;
277 	u64 extent_offset = 0;
278 	u64 extent_end = 0;
279 	int del_nr = 0;
280 	int del_slot = 0;
281 	int extent_type;
282 	int recow;
283 	int ret;
284 
285 	if (drop_cache)
286 		btrfs_drop_extent_cache(inode, start, end - 1, 0);
287 
288 	path = btrfs_alloc_path();
289 	if (!path)
290 		return -ENOMEM;
291 
292 	while (1) {
293 		recow = 0;
294 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
295 					       search_start, -1);
296 		if (ret < 0)
297 			break;
298 		if (ret > 0 && path->slots[0] > 0 && search_start == start) {
299 			leaf = path->nodes[0];
300 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
301 			if (key.objectid == inode->i_ino &&
302 			    key.type == BTRFS_EXTENT_DATA_KEY)
303 				path->slots[0]--;
304 		}
305 		ret = 0;
306 next_slot:
307 		leaf = path->nodes[0];
308 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
309 			BUG_ON(del_nr > 0);
310 			ret = btrfs_next_leaf(root, path);
311 			if (ret < 0)
312 				break;
313 			if (ret > 0) {
314 				ret = 0;
315 				break;
316 			}
317 			leaf = path->nodes[0];
318 			recow = 1;
319 		}
320 
321 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
322 		if (key.objectid > inode->i_ino ||
323 		    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
324 			break;
325 
326 		fi = btrfs_item_ptr(leaf, path->slots[0],
327 				    struct btrfs_file_extent_item);
328 		extent_type = btrfs_file_extent_type(leaf, fi);
329 
330 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
331 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
332 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
333 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
334 			extent_offset = btrfs_file_extent_offset(leaf, fi);
335 			extent_end = key.offset +
336 				btrfs_file_extent_num_bytes(leaf, fi);
337 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
338 			extent_end = key.offset +
339 				btrfs_file_extent_inline_len(leaf, fi);
340 		} else {
341 			WARN_ON(1);
342 			extent_end = search_start;
343 		}
344 
345 		if (extent_end <= search_start) {
346 			path->slots[0]++;
347 			goto next_slot;
348 		}
349 
350 		search_start = max(key.offset, start);
351 		if (recow) {
352 			btrfs_release_path(root, path);
353 			continue;
354 		}
355 
356 		/*
357 		 *     | - range to drop - |
358 		 *  | -------- extent -------- |
359 		 */
360 		if (start > key.offset && end < extent_end) {
361 			BUG_ON(del_nr > 0);
362 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
363 
364 			memcpy(&new_key, &key, sizeof(new_key));
365 			new_key.offset = start;
366 			ret = btrfs_duplicate_item(trans, root, path,
367 						   &new_key);
368 			if (ret == -EAGAIN) {
369 				btrfs_release_path(root, path);
370 				continue;
371 			}
372 			if (ret < 0)
373 				break;
374 
375 			leaf = path->nodes[0];
376 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
377 					    struct btrfs_file_extent_item);
378 			btrfs_set_file_extent_num_bytes(leaf, fi,
379 							start - key.offset);
380 
381 			fi = btrfs_item_ptr(leaf, path->slots[0],
382 					    struct btrfs_file_extent_item);
383 
384 			extent_offset += start - key.offset;
385 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
386 			btrfs_set_file_extent_num_bytes(leaf, fi,
387 							extent_end - start);
388 			btrfs_mark_buffer_dirty(leaf);
389 
390 			if (disk_bytenr > 0) {
391 				ret = btrfs_inc_extent_ref(trans, root,
392 						disk_bytenr, num_bytes, 0,
393 						root->root_key.objectid,
394 						new_key.objectid,
395 						start - extent_offset);
396 				BUG_ON(ret);
397 				*hint_byte = disk_bytenr;
398 			}
399 			key.offset = start;
400 		}
401 		/*
402 		 *  | ---- range to drop ----- |
403 		 *      | -------- extent -------- |
404 		 */
405 		if (start <= key.offset && end < extent_end) {
406 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
407 
408 			memcpy(&new_key, &key, sizeof(new_key));
409 			new_key.offset = end;
410 			btrfs_set_item_key_safe(trans, root, path, &new_key);
411 
412 			extent_offset += end - key.offset;
413 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
414 			btrfs_set_file_extent_num_bytes(leaf, fi,
415 							extent_end - end);
416 			btrfs_mark_buffer_dirty(leaf);
417 			if (disk_bytenr > 0) {
418 				inode_sub_bytes(inode, end - key.offset);
419 				*hint_byte = disk_bytenr;
420 			}
421 			break;
422 		}
423 
424 		search_start = extent_end;
425 		/*
426 		 *       | ---- range to drop ----- |
427 		 *  | -------- extent -------- |
428 		 */
429 		if (start > key.offset && end >= extent_end) {
430 			BUG_ON(del_nr > 0);
431 			BUG_ON(extent_type == BTRFS_FILE_EXTENT_INLINE);
432 
433 			btrfs_set_file_extent_num_bytes(leaf, fi,
434 							start - key.offset);
435 			btrfs_mark_buffer_dirty(leaf);
436 			if (disk_bytenr > 0) {
437 				inode_sub_bytes(inode, extent_end - start);
438 				*hint_byte = disk_bytenr;
439 			}
440 			if (end == extent_end)
441 				break;
442 
443 			path->slots[0]++;
444 			goto next_slot;
445 		}
446 
447 		/*
448 		 *  | ---- range to drop ----- |
449 		 *    | ------ extent ------ |
450 		 */
451 		if (start <= key.offset && end >= extent_end) {
452 			if (del_nr == 0) {
453 				del_slot = path->slots[0];
454 				del_nr = 1;
455 			} else {
456 				BUG_ON(del_slot + del_nr != path->slots[0]);
457 				del_nr++;
458 			}
459 
460 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
461 				inode_sub_bytes(inode,
462 						extent_end - key.offset);
463 				extent_end = ALIGN(extent_end,
464 						   root->sectorsize);
465 			} else if (disk_bytenr > 0) {
466 				ret = btrfs_free_extent(trans, root,
467 						disk_bytenr, num_bytes, 0,
468 						root->root_key.objectid,
469 						key.objectid, key.offset -
470 						extent_offset);
471 				BUG_ON(ret);
472 				inode_sub_bytes(inode,
473 						extent_end - key.offset);
474 				*hint_byte = disk_bytenr;
475 			}
476 
477 			if (end == extent_end)
478 				break;
479 
480 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
481 				path->slots[0]++;
482 				goto next_slot;
483 			}
484 
485 			ret = btrfs_del_items(trans, root, path, del_slot,
486 					      del_nr);
487 			BUG_ON(ret);
488 
489 			del_nr = 0;
490 			del_slot = 0;
491 
492 			btrfs_release_path(root, path);
493 			continue;
494 		}
495 
496 		BUG_ON(1);
497 	}
498 
499 	if (del_nr > 0) {
500 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
501 		BUG_ON(ret);
502 	}
503 
504 	btrfs_free_path(path);
505 	return ret;
506 }
507 
508 static int extent_mergeable(struct extent_buffer *leaf, int slot,
509 			    u64 objectid, u64 bytenr, u64 *start, u64 *end)
510 {
511 	struct btrfs_file_extent_item *fi;
512 	struct btrfs_key key;
513 	u64 extent_end;
514 
515 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
516 		return 0;
517 
518 	btrfs_item_key_to_cpu(leaf, &key, slot);
519 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
520 		return 0;
521 
522 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
523 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
524 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
525 	    btrfs_file_extent_compression(leaf, fi) ||
526 	    btrfs_file_extent_encryption(leaf, fi) ||
527 	    btrfs_file_extent_other_encoding(leaf, fi))
528 		return 0;
529 
530 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
531 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
532 		return 0;
533 
534 	*start = key.offset;
535 	*end = extent_end;
536 	return 1;
537 }
538 
539 /*
540  * Mark extent in the range start - end as written.
541  *
542  * This changes extent type from 'pre-allocated' to 'regular'. If only
543  * part of extent is marked as written, the extent will be split into
544  * two or three.
545  */
546 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
547 			      struct inode *inode, u64 start, u64 end)
548 {
549 	struct btrfs_root *root = BTRFS_I(inode)->root;
550 	struct extent_buffer *leaf;
551 	struct btrfs_path *path;
552 	struct btrfs_file_extent_item *fi;
553 	struct btrfs_key key;
554 	struct btrfs_key new_key;
555 	u64 bytenr;
556 	u64 num_bytes;
557 	u64 extent_end;
558 	u64 orig_offset;
559 	u64 other_start;
560 	u64 other_end;
561 	u64 split;
562 	int del_nr = 0;
563 	int del_slot = 0;
564 	int ret;
565 
566 	btrfs_drop_extent_cache(inode, start, end - 1, 0);
567 
568 	path = btrfs_alloc_path();
569 	BUG_ON(!path);
570 again:
571 	split = start;
572 	key.objectid = inode->i_ino;
573 	key.type = BTRFS_EXTENT_DATA_KEY;
574 	key.offset = split;
575 
576 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
577 	if (ret > 0 && path->slots[0] > 0)
578 		path->slots[0]--;
579 
580 	leaf = path->nodes[0];
581 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
582 	BUG_ON(key.objectid != inode->i_ino ||
583 	       key.type != BTRFS_EXTENT_DATA_KEY);
584 	fi = btrfs_item_ptr(leaf, path->slots[0],
585 			    struct btrfs_file_extent_item);
586 	BUG_ON(btrfs_file_extent_type(leaf, fi) !=
587 	       BTRFS_FILE_EXTENT_PREALLOC);
588 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
589 	BUG_ON(key.offset > start || extent_end < end);
590 
591 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
592 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
593 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
594 
595 	while (start > key.offset || end < extent_end) {
596 		if (key.offset == start)
597 			split = end;
598 
599 		memcpy(&new_key, &key, sizeof(new_key));
600 		new_key.offset = split;
601 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
602 		if (ret == -EAGAIN) {
603 			btrfs_release_path(root, path);
604 			goto again;
605 		}
606 		BUG_ON(ret < 0);
607 
608 		leaf = path->nodes[0];
609 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
610 				    struct btrfs_file_extent_item);
611 		btrfs_set_file_extent_num_bytes(leaf, fi,
612 						split - key.offset);
613 
614 		fi = btrfs_item_ptr(leaf, path->slots[0],
615 				    struct btrfs_file_extent_item);
616 
617 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
618 		btrfs_set_file_extent_num_bytes(leaf, fi,
619 						extent_end - split);
620 		btrfs_mark_buffer_dirty(leaf);
621 
622 		ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
623 					   root->root_key.objectid,
624 					   inode->i_ino, orig_offset);
625 		BUG_ON(ret);
626 
627 		if (split == start) {
628 			key.offset = start;
629 		} else {
630 			BUG_ON(start != key.offset);
631 			path->slots[0]--;
632 			extent_end = end;
633 		}
634 	}
635 
636 	fi = btrfs_item_ptr(leaf, path->slots[0],
637 			    struct btrfs_file_extent_item);
638 
639 	other_start = end;
640 	other_end = 0;
641 	if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
642 			     bytenr, &other_start, &other_end)) {
643 		extent_end = other_end;
644 		del_slot = path->slots[0] + 1;
645 		del_nr++;
646 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
647 					0, root->root_key.objectid,
648 					inode->i_ino, orig_offset);
649 		BUG_ON(ret);
650 	}
651 	other_start = 0;
652 	other_end = start;
653 	if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
654 			     bytenr, &other_start, &other_end)) {
655 		key.offset = other_start;
656 		del_slot = path->slots[0];
657 		del_nr++;
658 		ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
659 					0, root->root_key.objectid,
660 					inode->i_ino, orig_offset);
661 		BUG_ON(ret);
662 	}
663 	if (del_nr == 0) {
664 		btrfs_set_file_extent_type(leaf, fi,
665 					   BTRFS_FILE_EXTENT_REG);
666 		btrfs_mark_buffer_dirty(leaf);
667 		goto out;
668 	}
669 
670 	fi = btrfs_item_ptr(leaf, del_slot - 1,
671 			    struct btrfs_file_extent_item);
672 	btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
673 	btrfs_set_file_extent_num_bytes(leaf, fi,
674 					extent_end - key.offset);
675 	btrfs_mark_buffer_dirty(leaf);
676 
677 	ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
678 	BUG_ON(ret);
679 out:
680 	btrfs_free_path(path);
681 	return 0;
682 }
683 
684 /*
685  * this gets pages into the page cache and locks them down, it also properly
686  * waits for data=ordered extents to finish before allowing the pages to be
687  * modified.
688  */
689 static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
690 			 struct page **pages, size_t num_pages,
691 			 loff_t pos, unsigned long first_index,
692 			 unsigned long last_index, size_t write_bytes)
693 {
694 	int i;
695 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
696 	struct inode *inode = fdentry(file)->d_inode;
697 	int err = 0;
698 	u64 start_pos;
699 	u64 last_pos;
700 
701 	start_pos = pos & ~((u64)root->sectorsize - 1);
702 	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
703 
704 	if (start_pos > inode->i_size) {
705 		err = btrfs_cont_expand(inode, start_pos);
706 		if (err)
707 			return err;
708 	}
709 
710 	memset(pages, 0, num_pages * sizeof(struct page *));
711 again:
712 	for (i = 0; i < num_pages; i++) {
713 		pages[i] = grab_cache_page(inode->i_mapping, index + i);
714 		if (!pages[i]) {
715 			err = -ENOMEM;
716 			BUG_ON(1);
717 		}
718 		wait_on_page_writeback(pages[i]);
719 	}
720 	if (start_pos < inode->i_size) {
721 		struct btrfs_ordered_extent *ordered;
722 		lock_extent(&BTRFS_I(inode)->io_tree,
723 			    start_pos, last_pos - 1, GFP_NOFS);
724 		ordered = btrfs_lookup_first_ordered_extent(inode,
725 							    last_pos - 1);
726 		if (ordered &&
727 		    ordered->file_offset + ordered->len > start_pos &&
728 		    ordered->file_offset < last_pos) {
729 			btrfs_put_ordered_extent(ordered);
730 			unlock_extent(&BTRFS_I(inode)->io_tree,
731 				      start_pos, last_pos - 1, GFP_NOFS);
732 			for (i = 0; i < num_pages; i++) {
733 				unlock_page(pages[i]);
734 				page_cache_release(pages[i]);
735 			}
736 			btrfs_wait_ordered_range(inode, start_pos,
737 						 last_pos - start_pos);
738 			goto again;
739 		}
740 		if (ordered)
741 			btrfs_put_ordered_extent(ordered);
742 
743 		clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
744 				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
745 				  EXTENT_DO_ACCOUNTING,
746 				  GFP_NOFS);
747 		unlock_extent(&BTRFS_I(inode)->io_tree,
748 			      start_pos, last_pos - 1, GFP_NOFS);
749 	}
750 	for (i = 0; i < num_pages; i++) {
751 		clear_page_dirty_for_io(pages[i]);
752 		set_page_extent_mapped(pages[i]);
753 		WARN_ON(!PageLocked(pages[i]));
754 	}
755 	return 0;
756 }
757 
758 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
759 				size_t count, loff_t *ppos)
760 {
761 	loff_t pos;
762 	loff_t start_pos;
763 	ssize_t num_written = 0;
764 	ssize_t err = 0;
765 	int ret = 0;
766 	struct inode *inode = fdentry(file)->d_inode;
767 	struct btrfs_root *root = BTRFS_I(inode)->root;
768 	struct page **pages = NULL;
769 	int nrptrs;
770 	struct page *pinned[2];
771 	unsigned long first_index;
772 	unsigned long last_index;
773 	int will_write;
774 
775 	will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
776 		      (file->f_flags & O_DIRECT));
777 
778 	nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
779 		     PAGE_CACHE_SIZE / (sizeof(struct page *)));
780 	pinned[0] = NULL;
781 	pinned[1] = NULL;
782 
783 	pos = *ppos;
784 	start_pos = pos;
785 
786 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
787 
788 	/* do the reserve before the mutex lock in case we have to do some
789 	 * flushing.  We wouldn't deadlock, but this is more polite.
790 	 */
791 	err = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
792 	if (err)
793 		goto out_nolock;
794 
795 	mutex_lock(&inode->i_mutex);
796 
797 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
798 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
799 	if (err)
800 		goto out;
801 
802 	if (count == 0)
803 		goto out;
804 
805 	err = file_remove_suid(file);
806 	if (err)
807 		goto out;
808 
809 	file_update_time(file);
810 
811 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
812 
813 	/* generic_write_checks can change our pos */
814 	start_pos = pos;
815 
816 	BTRFS_I(inode)->sequence++;
817 	first_index = pos >> PAGE_CACHE_SHIFT;
818 	last_index = (pos + count) >> PAGE_CACHE_SHIFT;
819 
820 	/*
821 	 * there are lots of better ways to do this, but this code
822 	 * makes sure the first and last page in the file range are
823 	 * up to date and ready for cow
824 	 */
825 	if ((pos & (PAGE_CACHE_SIZE - 1))) {
826 		pinned[0] = grab_cache_page(inode->i_mapping, first_index);
827 		if (!PageUptodate(pinned[0])) {
828 			ret = btrfs_readpage(NULL, pinned[0]);
829 			BUG_ON(ret);
830 			wait_on_page_locked(pinned[0]);
831 		} else {
832 			unlock_page(pinned[0]);
833 		}
834 	}
835 	if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
836 		pinned[1] = grab_cache_page(inode->i_mapping, last_index);
837 		if (!PageUptodate(pinned[1])) {
838 			ret = btrfs_readpage(NULL, pinned[1]);
839 			BUG_ON(ret);
840 			wait_on_page_locked(pinned[1]);
841 		} else {
842 			unlock_page(pinned[1]);
843 		}
844 	}
845 
846 	while (count > 0) {
847 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
848 		size_t write_bytes = min(count, nrptrs *
849 					(size_t)PAGE_CACHE_SIZE -
850 					 offset);
851 		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
852 					PAGE_CACHE_SHIFT;
853 
854 		WARN_ON(num_pages > nrptrs);
855 		memset(pages, 0, sizeof(struct page *) * nrptrs);
856 
857 		ret = btrfs_check_data_free_space(root, inode, write_bytes);
858 		if (ret)
859 			goto out;
860 
861 		ret = prepare_pages(root, file, pages, num_pages,
862 				    pos, first_index, last_index,
863 				    write_bytes);
864 		if (ret) {
865 			btrfs_free_reserved_data_space(root, inode,
866 						       write_bytes);
867 			goto out;
868 		}
869 
870 		ret = btrfs_copy_from_user(pos, num_pages,
871 					   write_bytes, pages, buf);
872 		if (ret) {
873 			btrfs_free_reserved_data_space(root, inode,
874 						       write_bytes);
875 			btrfs_drop_pages(pages, num_pages);
876 			goto out;
877 		}
878 
879 		ret = dirty_and_release_pages(NULL, root, file, pages,
880 					      num_pages, pos, write_bytes);
881 		btrfs_drop_pages(pages, num_pages);
882 		if (ret) {
883 			btrfs_free_reserved_data_space(root, inode,
884 						       write_bytes);
885 			goto out;
886 		}
887 
888 		if (will_write) {
889 			filemap_fdatawrite_range(inode->i_mapping, pos,
890 						 pos + write_bytes - 1);
891 		} else {
892 			balance_dirty_pages_ratelimited_nr(inode->i_mapping,
893 							   num_pages);
894 			if (num_pages <
895 			    (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
896 				btrfs_btree_balance_dirty(root, 1);
897 			btrfs_throttle(root);
898 		}
899 
900 		buf += write_bytes;
901 		count -= write_bytes;
902 		pos += write_bytes;
903 		num_written += write_bytes;
904 
905 		cond_resched();
906 	}
907 out:
908 	mutex_unlock(&inode->i_mutex);
909 	if (ret)
910 		err = ret;
911 	btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
912 
913 out_nolock:
914 	kfree(pages);
915 	if (pinned[0])
916 		page_cache_release(pinned[0]);
917 	if (pinned[1])
918 		page_cache_release(pinned[1]);
919 	*ppos = pos;
920 
921 	/*
922 	 * we want to make sure fsync finds this change
923 	 * but we haven't joined a transaction running right now.
924 	 *
925 	 * Later on, someone is sure to update the inode and get the
926 	 * real transid recorded.
927 	 *
928 	 * We set last_trans now to the fs_info generation + 1,
929 	 * this will either be one more than the running transaction
930 	 * or the generation used for the next transaction if there isn't
931 	 * one running right now.
932 	 */
933 	BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
934 
935 	if (num_written > 0 && will_write) {
936 		struct btrfs_trans_handle *trans;
937 
938 		err = btrfs_wait_ordered_range(inode, start_pos, num_written);
939 		if (err)
940 			num_written = err;
941 
942 		if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
943 			trans = btrfs_start_transaction(root, 1);
944 			ret = btrfs_log_dentry_safe(trans, root,
945 						    file->f_dentry);
946 			if (ret == 0) {
947 				ret = btrfs_sync_log(trans, root);
948 				if (ret == 0)
949 					btrfs_end_transaction(trans, root);
950 				else
951 					btrfs_commit_transaction(trans, root);
952 			} else if (ret != BTRFS_NO_LOG_SYNC) {
953 				btrfs_commit_transaction(trans, root);
954 			} else {
955 				btrfs_end_transaction(trans, root);
956 			}
957 		}
958 		if (file->f_flags & O_DIRECT) {
959 			invalidate_mapping_pages(inode->i_mapping,
960 			      start_pos >> PAGE_CACHE_SHIFT,
961 			     (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
962 		}
963 	}
964 	current->backing_dev_info = NULL;
965 	return num_written ? num_written : err;
966 }
967 
968 int btrfs_release_file(struct inode *inode, struct file *filp)
969 {
970 	/*
971 	 * ordered_data_close is set by settattr when we are about to truncate
972 	 * a file from a non-zero size to a zero size.  This tries to
973 	 * flush down new bytes that may have been written if the
974 	 * application were using truncate to replace a file in place.
975 	 */
976 	if (BTRFS_I(inode)->ordered_data_close) {
977 		BTRFS_I(inode)->ordered_data_close = 0;
978 		btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
979 		if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
980 			filemap_flush(inode->i_mapping);
981 	}
982 	if (filp->private_data)
983 		btrfs_ioctl_trans_end(filp);
984 	return 0;
985 }
986 
987 /*
988  * fsync call for both files and directories.  This logs the inode into
989  * the tree log instead of forcing full commits whenever possible.
990  *
991  * It needs to call filemap_fdatawait so that all ordered extent updates are
992  * in the metadata btree are up to date for copying to the log.
993  *
994  * It drops the inode mutex before doing the tree log commit.  This is an
995  * important optimization for directories because holding the mutex prevents
996  * new operations on the dir while we write to disk.
997  */
998 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
999 {
1000 	struct inode *inode = dentry->d_inode;
1001 	struct btrfs_root *root = BTRFS_I(inode)->root;
1002 	int ret = 0;
1003 	struct btrfs_trans_handle *trans;
1004 
1005 
1006 	/* we wait first, since the writeback may change the inode */
1007 	root->log_batch++;
1008 	/* the VFS called filemap_fdatawrite for us */
1009 	btrfs_wait_ordered_range(inode, 0, (u64)-1);
1010 	root->log_batch++;
1011 
1012 	/*
1013 	 * check the transaction that last modified this inode
1014 	 * and see if its already been committed
1015 	 */
1016 	if (!BTRFS_I(inode)->last_trans)
1017 		goto out;
1018 
1019 	/*
1020 	 * if the last transaction that changed this file was before
1021 	 * the current transaction, we can bail out now without any
1022 	 * syncing
1023 	 */
1024 	mutex_lock(&root->fs_info->trans_mutex);
1025 	if (BTRFS_I(inode)->last_trans <=
1026 	    root->fs_info->last_trans_committed) {
1027 		BTRFS_I(inode)->last_trans = 0;
1028 		mutex_unlock(&root->fs_info->trans_mutex);
1029 		goto out;
1030 	}
1031 	mutex_unlock(&root->fs_info->trans_mutex);
1032 
1033 	/*
1034 	 * ok we haven't committed the transaction yet, lets do a commit
1035 	 */
1036 	if (file && file->private_data)
1037 		btrfs_ioctl_trans_end(file);
1038 
1039 	trans = btrfs_start_transaction(root, 1);
1040 	if (!trans) {
1041 		ret = -ENOMEM;
1042 		goto out;
1043 	}
1044 
1045 	ret = btrfs_log_dentry_safe(trans, root, dentry);
1046 	if (ret < 0)
1047 		goto out;
1048 
1049 	/* we've logged all the items and now have a consistent
1050 	 * version of the file in the log.  It is possible that
1051 	 * someone will come in and modify the file, but that's
1052 	 * fine because the log is consistent on disk, and we
1053 	 * have references to all of the file's extents
1054 	 *
1055 	 * It is possible that someone will come in and log the
1056 	 * file again, but that will end up using the synchronization
1057 	 * inside btrfs_sync_log to keep things safe.
1058 	 */
1059 	mutex_unlock(&dentry->d_inode->i_mutex);
1060 
1061 	if (ret != BTRFS_NO_LOG_SYNC) {
1062 		if (ret > 0) {
1063 			ret = btrfs_commit_transaction(trans, root);
1064 		} else {
1065 			ret = btrfs_sync_log(trans, root);
1066 			if (ret == 0)
1067 				ret = btrfs_end_transaction(trans, root);
1068 			else
1069 				ret = btrfs_commit_transaction(trans, root);
1070 		}
1071 	} else {
1072 		ret = btrfs_end_transaction(trans, root);
1073 	}
1074 	mutex_lock(&dentry->d_inode->i_mutex);
1075 out:
1076 	return ret > 0 ? EIO : ret;
1077 }
1078 
1079 static const struct vm_operations_struct btrfs_file_vm_ops = {
1080 	.fault		= filemap_fault,
1081 	.page_mkwrite	= btrfs_page_mkwrite,
1082 };
1083 
1084 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1085 {
1086 	vma->vm_ops = &btrfs_file_vm_ops;
1087 	file_accessed(filp);
1088 	return 0;
1089 }
1090 
1091 const struct file_operations btrfs_file_operations = {
1092 	.llseek		= generic_file_llseek,
1093 	.read		= do_sync_read,
1094 	.aio_read       = generic_file_aio_read,
1095 	.splice_read	= generic_file_splice_read,
1096 	.write		= btrfs_file_write,
1097 	.mmap		= btrfs_file_mmap,
1098 	.open		= generic_file_open,
1099 	.release	= btrfs_release_file,
1100 	.fsync		= btrfs_sync_file,
1101 	.unlocked_ioctl	= btrfs_ioctl,
1102 #ifdef CONFIG_COMPAT
1103 	.compat_ioctl	= btrfs_ioctl,
1104 #endif
1105 };
1106