xref: /openbmc/linux/fs/btrfs/file.c (revision e02119d5a7b4396c5a872582fddc8bd6d305a70a)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ioctl.h"
38 #include "print-tree.h"
39 #include "tree-log.h"
40 #include "locking.h"
41 #include "compat.h"
42 
43 
44 static int noinline btrfs_copy_from_user(loff_t pos, int num_pages,
45 					 int write_bytes,
46 					 struct page **prepared_pages,
47 					 const char __user * buf)
48 {
49 	long page_fault = 0;
50 	int i;
51 	int offset = pos & (PAGE_CACHE_SIZE - 1);
52 
53 	for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
54 		size_t count = min_t(size_t,
55 				     PAGE_CACHE_SIZE - offset, write_bytes);
56 		struct page *page = prepared_pages[i];
57 		fault_in_pages_readable(buf, count);
58 
59 		/* Copy data from userspace to the current page */
60 		kmap(page);
61 		page_fault = __copy_from_user(page_address(page) + offset,
62 					      buf, count);
63 		/* Flush processor's dcache for this page */
64 		flush_dcache_page(page);
65 		kunmap(page);
66 		buf += count;
67 		write_bytes -= count;
68 
69 		if (page_fault)
70 			break;
71 	}
72 	return page_fault ? -EFAULT : 0;
73 }
74 
75 static void noinline btrfs_drop_pages(struct page **pages, size_t num_pages)
76 {
77 	size_t i;
78 	for (i = 0; i < num_pages; i++) {
79 		if (!pages[i])
80 			break;
81 		ClearPageChecked(pages[i]);
82 		unlock_page(pages[i]);
83 		mark_page_accessed(pages[i]);
84 		page_cache_release(pages[i]);
85 	}
86 }
87 
88 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
89 				struct btrfs_root *root, struct inode *inode,
90 				u64 offset, size_t size,
91 				struct page **pages, size_t page_offset,
92 				int num_pages)
93 {
94 	struct btrfs_key key;
95 	struct btrfs_path *path;
96 	struct extent_buffer *leaf;
97 	char *kaddr;
98 	unsigned long ptr;
99 	struct btrfs_file_extent_item *ei;
100 	struct page *page;
101 	u32 datasize;
102 	int err = 0;
103 	int ret;
104 	int i;
105 	ssize_t cur_size;
106 
107 	path = btrfs_alloc_path();
108 	if (!path)
109 		return -ENOMEM;
110 
111 	btrfs_set_trans_block_group(trans, inode);
112 
113 	key.objectid = inode->i_ino;
114 	key.offset = offset;
115 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
116 
117 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
118 	if (ret < 0) {
119 		err = ret;
120 		goto fail;
121 	}
122 	if (ret == 1) {
123 		struct btrfs_key found_key;
124 
125 		if (path->slots[0] == 0)
126 			goto insert;
127 
128 		path->slots[0]--;
129 		leaf = path->nodes[0];
130 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
131 
132 		if (found_key.objectid != inode->i_ino)
133 			goto insert;
134 
135 		if (found_key.type != BTRFS_EXTENT_DATA_KEY)
136 			goto insert;
137 		ei = btrfs_item_ptr(leaf, path->slots[0],
138 				    struct btrfs_file_extent_item);
139 
140 		if (btrfs_file_extent_type(leaf, ei) !=
141 		    BTRFS_FILE_EXTENT_INLINE) {
142 			goto insert;
143 		}
144 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
145 		ret = 0;
146 	}
147 	if (ret == 0) {
148 		u32 found_size;
149 		u64 found_end;
150 
151 		leaf = path->nodes[0];
152 		ei = btrfs_item_ptr(leaf, path->slots[0],
153 				    struct btrfs_file_extent_item);
154 
155 		if (btrfs_file_extent_type(leaf, ei) !=
156 		    BTRFS_FILE_EXTENT_INLINE) {
157 			err = ret;
158 			btrfs_print_leaf(root, leaf);
159 			printk("found wasn't inline offset %Lu inode %lu\n",
160 			       offset, inode->i_ino);
161 			goto fail;
162 		}
163 		found_size = btrfs_file_extent_inline_len(leaf,
164 					  btrfs_item_nr(leaf, path->slots[0]));
165 		found_end = key.offset + found_size;
166 
167 		if (found_end < offset + size) {
168 			btrfs_release_path(root, path);
169 			ret = btrfs_search_slot(trans, root, &key, path,
170 						offset + size - found_end, 1);
171 			BUG_ON(ret != 0);
172 
173 			ret = btrfs_extend_item(trans, root, path,
174 						offset + size - found_end);
175 			if (ret) {
176 				err = ret;
177 				goto fail;
178 			}
179 			leaf = path->nodes[0];
180 			ei = btrfs_item_ptr(leaf, path->slots[0],
181 					    struct btrfs_file_extent_item);
182 			inode->i_blocks += (offset + size - found_end) >> 9;
183 		}
184 		if (found_end < offset) {
185 			ptr = btrfs_file_extent_inline_start(ei) + found_size;
186 			memset_extent_buffer(leaf, 0, ptr, offset - found_end);
187 		}
188 	} else {
189 insert:
190 		btrfs_release_path(root, path);
191 		datasize = offset + size - key.offset;
192 		inode->i_blocks += datasize >> 9;
193 		datasize = btrfs_file_extent_calc_inline_size(datasize);
194 		ret = btrfs_insert_empty_item(trans, root, path, &key,
195 					      datasize);
196 		if (ret) {
197 			err = ret;
198 			printk("got bad ret %d\n", ret);
199 			goto fail;
200 		}
201 		leaf = path->nodes[0];
202 		ei = btrfs_item_ptr(leaf, path->slots[0],
203 				    struct btrfs_file_extent_item);
204 		btrfs_set_file_extent_generation(leaf, ei, trans->transid);
205 		btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
206 	}
207 	ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
208 
209 	cur_size = size;
210 	i = 0;
211 	while (size > 0) {
212 		page = pages[i];
213 		kaddr = kmap_atomic(page, KM_USER0);
214 		cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
215 		write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
216 		kunmap_atomic(kaddr, KM_USER0);
217 		page_offset = 0;
218 		ptr += cur_size;
219 		size -= cur_size;
220 		if (i >= num_pages) {
221 			printk("i %d num_pages %d\n", i, num_pages);
222 		}
223 		i++;
224 	}
225 	btrfs_mark_buffer_dirty(leaf);
226 fail:
227 	btrfs_free_path(path);
228 	return err;
229 }
230 
231 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
232 				   struct btrfs_root *root,
233 				   struct file *file,
234 				   struct page **pages,
235 				   size_t num_pages,
236 				   loff_t pos,
237 				   size_t write_bytes)
238 {
239 	int err = 0;
240 	int i;
241 	struct inode *inode = fdentry(file)->d_inode;
242 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
243 	u64 hint_byte;
244 	u64 num_bytes;
245 	u64 start_pos;
246 	u64 end_of_last_block;
247 	u64 end_pos = pos + write_bytes;
248 	u64 inline_size;
249 	int did_inline = 0;
250 	loff_t isize = i_size_read(inode);
251 
252 	start_pos = pos & ~((u64)root->sectorsize - 1);
253 	num_bytes = (write_bytes + pos - start_pos +
254 		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
255 
256 	end_of_last_block = start_pos + num_bytes - 1;
257 
258 	lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
259 	trans = btrfs_join_transaction(root, 1);
260 	if (!trans) {
261 		err = -ENOMEM;
262 		goto out_unlock;
263 	}
264 	btrfs_set_trans_block_group(trans, inode);
265 	hint_byte = 0;
266 
267 	if ((end_of_last_block & 4095) == 0) {
268 		printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
269 	}
270 	set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
271 
272 	/* FIXME...EIEIO, ENOSPC and more */
273 	/* insert any holes we need to create */
274 	if (isize < start_pos) {
275 		u64 last_pos_in_file;
276 		u64 hole_size;
277 		u64 mask = root->sectorsize - 1;
278 		last_pos_in_file = (isize + mask) & ~mask;
279 		hole_size = (start_pos - last_pos_in_file + mask) & ~mask;
280 		if (hole_size > 0) {
281 			btrfs_wait_ordered_range(inode, last_pos_in_file,
282 						 last_pos_in_file + hole_size);
283 			mutex_lock(&BTRFS_I(inode)->extent_mutex);
284 			err = btrfs_drop_extents(trans, root, inode,
285 						 last_pos_in_file,
286 						 last_pos_in_file + hole_size,
287 						 last_pos_in_file,
288 						 &hint_byte);
289 			if (err)
290 				goto failed;
291 
292 			err = btrfs_insert_file_extent(trans, root,
293 						       inode->i_ino,
294 						       last_pos_in_file,
295 						       0, 0, hole_size, 0);
296 			btrfs_drop_extent_cache(inode, last_pos_in_file,
297 					last_pos_in_file + hole_size -1);
298 			mutex_unlock(&BTRFS_I(inode)->extent_mutex);
299 			btrfs_check_file(root, inode);
300 		}
301 		if (err)
302 			goto failed;
303 	}
304 
305 	/*
306 	 * either allocate an extent for the new bytes or setup the key
307 	 * to show we are doing inline data in the extent
308 	 */
309 	inline_size = end_pos;
310 	if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
311 	    inline_size > root->fs_info->max_inline ||
312 	    (inline_size & (root->sectorsize -1)) == 0 ||
313 	    inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
314 		/* check for reserved extents on each page, we don't want
315 		 * to reset the delalloc bit on things that already have
316 		 * extents reserved.
317 		 */
318 		btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block);
319 		for (i = 0; i < num_pages; i++) {
320 			struct page *p = pages[i];
321 			SetPageUptodate(p);
322 			ClearPageChecked(p);
323 			set_page_dirty(p);
324 		}
325 	} else {
326 		u64 aligned_end;
327 		/* step one, delete the existing extents in this range */
328 		aligned_end = (pos + write_bytes + root->sectorsize - 1) &
329 			~((u64)root->sectorsize - 1);
330 		mutex_lock(&BTRFS_I(inode)->extent_mutex);
331 		err = btrfs_drop_extents(trans, root, inode, start_pos,
332 					 aligned_end, aligned_end, &hint_byte);
333 		if (err)
334 			goto failed;
335 		if (isize > inline_size)
336 			inline_size = min_t(u64, isize, aligned_end);
337 		inline_size -= start_pos;
338 		err = insert_inline_extent(trans, root, inode, start_pos,
339 					   inline_size, pages, 0, num_pages);
340 		btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
341 		BUG_ON(err);
342 		mutex_unlock(&BTRFS_I(inode)->extent_mutex);
343 
344 		/*
345 		 * an ugly way to do all the prop accounting around
346 		 * the page bits and mapping tags
347 		 */
348 		set_page_writeback(pages[0]);
349 		end_page_writeback(pages[0]);
350 		did_inline = 1;
351 	}
352 	if (end_pos > isize) {
353 		i_size_write(inode, end_pos);
354 		if (did_inline)
355 			BTRFS_I(inode)->disk_i_size = end_pos;
356 		btrfs_update_inode(trans, root, inode);
357 	}
358 failed:
359 	err = btrfs_end_transaction(trans, root);
360 out_unlock:
361 	unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
362 	return err;
363 }
364 
365 int noinline btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
366 {
367 	struct extent_map *em;
368 	struct extent_map *split = NULL;
369 	struct extent_map *split2 = NULL;
370 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
371 	u64 len = end - start + 1;
372 	int ret;
373 	int testend = 1;
374 
375 	WARN_ON(end < start);
376 	if (end == (u64)-1) {
377 		len = (u64)-1;
378 		testend = 0;
379 	}
380 	while(1) {
381 		if (!split)
382 			split = alloc_extent_map(GFP_NOFS);
383 		if (!split2)
384 			split2 = alloc_extent_map(GFP_NOFS);
385 
386 		spin_lock(&em_tree->lock);
387 		em = lookup_extent_mapping(em_tree, start, len);
388 		if (!em) {
389 			spin_unlock(&em_tree->lock);
390 			break;
391 		}
392 		clear_bit(EXTENT_FLAG_PINNED, &em->flags);
393 		remove_extent_mapping(em_tree, em);
394 
395 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
396 		    em->start < start) {
397 			split->start = em->start;
398 			split->len = start - em->start;
399 			split->block_start = em->block_start;
400 			split->bdev = em->bdev;
401 			split->flags = em->flags;
402 			ret = add_extent_mapping(em_tree, split);
403 			BUG_ON(ret);
404 			free_extent_map(split);
405 			split = split2;
406 			split2 = NULL;
407 		}
408 		if (em->block_start < EXTENT_MAP_LAST_BYTE &&
409 		    testend && em->start + em->len > start + len) {
410 			u64 diff = start + len - em->start;
411 
412 			split->start = start + len;
413 			split->len = em->start + em->len - (start + len);
414 			split->bdev = em->bdev;
415 			split->flags = em->flags;
416 
417 			split->block_start = em->block_start + diff;
418 
419 			ret = add_extent_mapping(em_tree, split);
420 			BUG_ON(ret);
421 			free_extent_map(split);
422 			split = NULL;
423 		}
424 		spin_unlock(&em_tree->lock);
425 
426 		/* once for us */
427 		free_extent_map(em);
428 		/* once for the tree*/
429 		free_extent_map(em);
430 	}
431 	if (split)
432 		free_extent_map(split);
433 	if (split2)
434 		free_extent_map(split2);
435 	return 0;
436 }
437 
438 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
439 {
440 	return 0;
441 #if 0
442 	struct btrfs_path *path;
443 	struct btrfs_key found_key;
444 	struct extent_buffer *leaf;
445 	struct btrfs_file_extent_item *extent;
446 	u64 last_offset = 0;
447 	int nritems;
448 	int slot;
449 	int found_type;
450 	int ret;
451 	int err = 0;
452 	u64 extent_end = 0;
453 
454 	path = btrfs_alloc_path();
455 	ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
456 				       last_offset, 0);
457 	while(1) {
458 		nritems = btrfs_header_nritems(path->nodes[0]);
459 		if (path->slots[0] >= nritems) {
460 			ret = btrfs_next_leaf(root, path);
461 			if (ret)
462 				goto out;
463 			nritems = btrfs_header_nritems(path->nodes[0]);
464 		}
465 		slot = path->slots[0];
466 		leaf = path->nodes[0];
467 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
468 		if (found_key.objectid != inode->i_ino)
469 			break;
470 		if (found_key.type != BTRFS_EXTENT_DATA_KEY)
471 			goto out;
472 
473 		if (found_key.offset < last_offset) {
474 			WARN_ON(1);
475 			btrfs_print_leaf(root, leaf);
476 			printk("inode %lu found offset %Lu expected %Lu\n",
477 			       inode->i_ino, found_key.offset, last_offset);
478 			err = 1;
479 			goto out;
480 		}
481 		extent = btrfs_item_ptr(leaf, slot,
482 					struct btrfs_file_extent_item);
483 		found_type = btrfs_file_extent_type(leaf, extent);
484 		if (found_type == BTRFS_FILE_EXTENT_REG) {
485 			extent_end = found_key.offset +
486 			     btrfs_file_extent_num_bytes(leaf, extent);
487 		} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
488 			struct btrfs_item *item;
489 			item = btrfs_item_nr(leaf, slot);
490 			extent_end = found_key.offset +
491 			     btrfs_file_extent_inline_len(leaf, item);
492 			extent_end = (extent_end + root->sectorsize - 1) &
493 				~((u64)root->sectorsize -1 );
494 		}
495 		last_offset = extent_end;
496 		path->slots[0]++;
497 	}
498 	if (0 && last_offset < inode->i_size) {
499 		WARN_ON(1);
500 		btrfs_print_leaf(root, leaf);
501 		printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
502 		       last_offset, inode->i_size);
503 		err = 1;
504 
505 	}
506 out:
507 	btrfs_free_path(path);
508 	return err;
509 #endif
510 }
511 
512 /*
513  * this is very complex, but the basic idea is to drop all extents
514  * in the range start - end.  hint_block is filled in with a block number
515  * that would be a good hint to the block allocator for this file.
516  *
517  * If an extent intersects the range but is not entirely inside the range
518  * it is either truncated or split.  Anything entirely inside the range
519  * is deleted from the tree.
520  */
521 int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
522 		       struct btrfs_root *root, struct inode *inode,
523 		       u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
524 {
525 	u64 extent_end = 0;
526 	u64 search_start = start;
527 	struct extent_buffer *leaf;
528 	struct btrfs_file_extent_item *extent;
529 	struct btrfs_path *path;
530 	struct btrfs_key key;
531 	struct btrfs_file_extent_item old;
532 	int keep;
533 	int slot;
534 	int bookend;
535 	int found_type;
536 	int found_extent;
537 	int found_inline;
538 	int recow;
539 	int ret;
540 
541 	btrfs_drop_extent_cache(inode, start, end - 1);
542 
543 	path = btrfs_alloc_path();
544 	if (!path)
545 		return -ENOMEM;
546 	while(1) {
547 		recow = 0;
548 		btrfs_release_path(root, path);
549 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
550 					       search_start, -1);
551 		if (ret < 0)
552 			goto out;
553 		if (ret > 0) {
554 			if (path->slots[0] == 0) {
555 				ret = 0;
556 				goto out;
557 			}
558 			path->slots[0]--;
559 		}
560 next_slot:
561 		keep = 0;
562 		bookend = 0;
563 		found_extent = 0;
564 		found_inline = 0;
565 		extent = NULL;
566 		leaf = path->nodes[0];
567 		slot = path->slots[0];
568 		ret = 0;
569 		btrfs_item_key_to_cpu(leaf, &key, slot);
570 		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY &&
571 		    key.offset >= end) {
572 			goto out;
573 		}
574 		if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
575 		    key.objectid != inode->i_ino) {
576 			goto out;
577 		}
578 		if (recow) {
579 			search_start = key.offset;
580 			continue;
581 		}
582 		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
583 			extent = btrfs_item_ptr(leaf, slot,
584 						struct btrfs_file_extent_item);
585 			found_type = btrfs_file_extent_type(leaf, extent);
586 			if (found_type == BTRFS_FILE_EXTENT_REG) {
587 				extent_end =
588 				     btrfs_file_extent_disk_bytenr(leaf,
589 								   extent);
590 				if (extent_end)
591 					*hint_byte = extent_end;
592 
593 				extent_end = key.offset +
594 				     btrfs_file_extent_num_bytes(leaf, extent);
595 				found_extent = 1;
596 			} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
597 				struct btrfs_item *item;
598 				item = btrfs_item_nr(leaf, slot);
599 				found_inline = 1;
600 				extent_end = key.offset +
601 				     btrfs_file_extent_inline_len(leaf, item);
602 			}
603 		} else {
604 			extent_end = search_start;
605 		}
606 
607 		/* we found nothing we can drop */
608 		if ((!found_extent && !found_inline) ||
609 		    search_start >= extent_end) {
610 			int nextret;
611 			u32 nritems;
612 			nritems = btrfs_header_nritems(leaf);
613 			if (slot >= nritems - 1) {
614 				nextret = btrfs_next_leaf(root, path);
615 				if (nextret)
616 					goto out;
617 				recow = 1;
618 			} else {
619 				path->slots[0]++;
620 			}
621 			goto next_slot;
622 		}
623 
624 		if (found_inline) {
625 			u64 mask = root->sectorsize - 1;
626 			search_start = (extent_end + mask) & ~mask;
627 		} else
628 			search_start = extent_end;
629 		if (end <= extent_end && start >= key.offset && found_inline) {
630 			*hint_byte = EXTENT_MAP_INLINE;
631 			continue;
632 		}
633 		if (end < extent_end && end >= key.offset) {
634 			if (found_extent) {
635 				u64 disk_bytenr =
636 				    btrfs_file_extent_disk_bytenr(leaf, extent);
637 				u64 disk_num_bytes =
638 				    btrfs_file_extent_disk_num_bytes(leaf,
639 								      extent);
640 				read_extent_buffer(leaf, &old,
641 						   (unsigned long)extent,
642 						   sizeof(old));
643 				if (disk_bytenr != 0) {
644 					ret = btrfs_inc_extent_ref(trans, root,
645 					         disk_bytenr, disk_num_bytes,
646 						 root->root_key.objectid,
647 						 trans->transid,
648 						 key.objectid, end);
649 					BUG_ON(ret);
650 				}
651 			}
652 			bookend = 1;
653 			if (found_inline && start <= key.offset)
654 				keep = 1;
655 		}
656 		/* truncate existing extent */
657 		if (start > key.offset) {
658 			u64 new_num;
659 			u64 old_num;
660 			keep = 1;
661 			WARN_ON(start & (root->sectorsize - 1));
662 			if (found_extent) {
663 				new_num = start - key.offset;
664 				old_num = btrfs_file_extent_num_bytes(leaf,
665 								      extent);
666 				*hint_byte =
667 					btrfs_file_extent_disk_bytenr(leaf,
668 								      extent);
669 				if (btrfs_file_extent_disk_bytenr(leaf,
670 								  extent)) {
671 					dec_i_blocks(inode, old_num - new_num);
672 				}
673 				btrfs_set_file_extent_num_bytes(leaf, extent,
674 								new_num);
675 				btrfs_mark_buffer_dirty(leaf);
676 			} else if (key.offset < inline_limit &&
677 				   (end > extent_end) &&
678 				   (inline_limit < extent_end)) {
679 				u32 new_size;
680 				new_size = btrfs_file_extent_calc_inline_size(
681 						   inline_limit - key.offset);
682 				dec_i_blocks(inode, (extent_end - key.offset) -
683 					(inline_limit - key.offset));
684 				btrfs_truncate_item(trans, root, path,
685 						    new_size, 1);
686 			}
687 		}
688 		/* delete the entire extent */
689 		if (!keep) {
690 			u64 disk_bytenr = 0;
691 			u64 disk_num_bytes = 0;
692 			u64 extent_num_bytes = 0;
693 			u64 root_gen;
694 			u64 root_owner;
695 
696 			root_gen = btrfs_header_generation(leaf);
697 			root_owner = btrfs_header_owner(leaf);
698 			if (found_extent) {
699 				disk_bytenr =
700 				      btrfs_file_extent_disk_bytenr(leaf,
701 								     extent);
702 				disk_num_bytes =
703 				      btrfs_file_extent_disk_num_bytes(leaf,
704 								       extent);
705 				extent_num_bytes =
706 				      btrfs_file_extent_num_bytes(leaf, extent);
707 				*hint_byte =
708 					btrfs_file_extent_disk_bytenr(leaf,
709 								      extent);
710 			}
711 			ret = btrfs_del_item(trans, root, path);
712 			/* TODO update progress marker and return */
713 			BUG_ON(ret);
714 			btrfs_release_path(root, path);
715 			extent = NULL;
716 			if (found_extent && disk_bytenr != 0) {
717 				dec_i_blocks(inode, extent_num_bytes);
718 				ret = btrfs_free_extent(trans, root,
719 						disk_bytenr,
720 						disk_num_bytes,
721 						root_owner,
722 						root_gen, inode->i_ino,
723 						key.offset, 0);
724 			}
725 
726 			BUG_ON(ret);
727 			if (!bookend && search_start >= end) {
728 				ret = 0;
729 				goto out;
730 			}
731 			if (!bookend)
732 				continue;
733 		}
734 		if (bookend && found_inline && start <= key.offset) {
735 			u32 new_size;
736 			new_size = btrfs_file_extent_calc_inline_size(
737 						   extent_end - end);
738 			dec_i_blocks(inode, (extent_end - key.offset) -
739 					(extent_end - end));
740 			btrfs_truncate_item(trans, root, path, new_size, 0);
741 		}
742 		/* create bookend, splitting the extent in two */
743 		if (bookend && found_extent) {
744 			struct btrfs_key ins;
745 			ins.objectid = inode->i_ino;
746 			ins.offset = end;
747 			btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
748 			btrfs_release_path(root, path);
749 			ret = btrfs_insert_empty_item(trans, root, path, &ins,
750 						      sizeof(*extent));
751 
752 			leaf = path->nodes[0];
753 			if (ret) {
754 				btrfs_print_leaf(root, leaf);
755 				printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
756 			}
757 			BUG_ON(ret);
758 			extent = btrfs_item_ptr(leaf, path->slots[0],
759 						struct btrfs_file_extent_item);
760 			write_extent_buffer(leaf, &old,
761 					    (unsigned long)extent, sizeof(old));
762 
763 			btrfs_set_file_extent_offset(leaf, extent,
764 				    le64_to_cpu(old.offset) + end - key.offset);
765 			WARN_ON(le64_to_cpu(old.num_bytes) <
766 				(extent_end - end));
767 			btrfs_set_file_extent_num_bytes(leaf, extent,
768 							extent_end - end);
769 			btrfs_set_file_extent_type(leaf, extent,
770 						   BTRFS_FILE_EXTENT_REG);
771 
772 			btrfs_mark_buffer_dirty(path->nodes[0]);
773 			if (le64_to_cpu(old.disk_bytenr) != 0) {
774 				inode->i_blocks +=
775 				      btrfs_file_extent_num_bytes(leaf,
776 								  extent) >> 9;
777 			}
778 			ret = 0;
779 			goto out;
780 		}
781 	}
782 out:
783 	btrfs_free_path(path);
784 	btrfs_check_file(root, inode);
785 	return ret;
786 }
787 
788 /*
789  * this gets pages into the page cache and locks them down
790  */
791 static int noinline prepare_pages(struct btrfs_root *root, struct file *file,
792 			 struct page **pages, size_t num_pages,
793 			 loff_t pos, unsigned long first_index,
794 			 unsigned long last_index, size_t write_bytes)
795 {
796 	int i;
797 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
798 	struct inode *inode = fdentry(file)->d_inode;
799 	int err = 0;
800 	u64 start_pos;
801 	u64 last_pos;
802 
803 	start_pos = pos & ~((u64)root->sectorsize - 1);
804 	last_pos = ((u64)index + num_pages) << PAGE_CACHE_SHIFT;
805 
806 	memset(pages, 0, num_pages * sizeof(struct page *));
807 again:
808 	for (i = 0; i < num_pages; i++) {
809 		pages[i] = grab_cache_page(inode->i_mapping, index + i);
810 		if (!pages[i]) {
811 			err = -ENOMEM;
812 			BUG_ON(1);
813 		}
814 		wait_on_page_writeback(pages[i]);
815 	}
816 	if (start_pos < inode->i_size) {
817 		struct btrfs_ordered_extent *ordered;
818 		lock_extent(&BTRFS_I(inode)->io_tree,
819 			    start_pos, last_pos - 1, GFP_NOFS);
820 		ordered = btrfs_lookup_first_ordered_extent(inode, last_pos -1);
821 		if (ordered &&
822 		    ordered->file_offset + ordered->len > start_pos &&
823 		    ordered->file_offset < last_pos) {
824 			btrfs_put_ordered_extent(ordered);
825 			unlock_extent(&BTRFS_I(inode)->io_tree,
826 				      start_pos, last_pos - 1, GFP_NOFS);
827 			for (i = 0; i < num_pages; i++) {
828 				unlock_page(pages[i]);
829 				page_cache_release(pages[i]);
830 			}
831 			btrfs_wait_ordered_range(inode, start_pos,
832 						 last_pos - start_pos);
833 			goto again;
834 		}
835 		if (ordered)
836 			btrfs_put_ordered_extent(ordered);
837 
838 		clear_extent_bits(&BTRFS_I(inode)->io_tree, start_pos,
839 				  last_pos - 1, EXTENT_DIRTY | EXTENT_DELALLOC,
840 				  GFP_NOFS);
841 		unlock_extent(&BTRFS_I(inode)->io_tree,
842 			      start_pos, last_pos - 1, GFP_NOFS);
843 	}
844 	for (i = 0; i < num_pages; i++) {
845 		clear_page_dirty_for_io(pages[i]);
846 		set_page_extent_mapped(pages[i]);
847 		WARN_ON(!PageLocked(pages[i]));
848 	}
849 	return 0;
850 }
851 
852 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
853 				size_t count, loff_t *ppos)
854 {
855 	loff_t pos;
856 	loff_t start_pos;
857 	ssize_t num_written = 0;
858 	ssize_t err = 0;
859 	int ret = 0;
860 	struct inode *inode = fdentry(file)->d_inode;
861 	struct btrfs_root *root = BTRFS_I(inode)->root;
862 	struct page **pages = NULL;
863 	int nrptrs;
864 	struct page *pinned[2];
865 	unsigned long first_index;
866 	unsigned long last_index;
867 
868 	nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
869 		     PAGE_CACHE_SIZE / (sizeof(struct page *)));
870 	pinned[0] = NULL;
871 	pinned[1] = NULL;
872 
873 	pos = *ppos;
874 	start_pos = pos;
875 
876 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
877 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
878 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
879 	if (err)
880 		goto out_nolock;
881 	if (count == 0)
882 		goto out_nolock;
883 #ifdef REMOVE_SUID_PATH
884 	err = remove_suid(&file->f_path);
885 #else
886 # if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26)
887 	err = file_remove_suid(file);
888 # else
889 	err = remove_suid(fdentry(file));
890 # endif
891 #endif
892 	if (err)
893 		goto out_nolock;
894 	file_update_time(file);
895 
896 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
897 
898 	mutex_lock(&inode->i_mutex);
899 	first_index = pos >> PAGE_CACHE_SHIFT;
900 	last_index = (pos + count) >> PAGE_CACHE_SHIFT;
901 
902 	/*
903 	 * if this is a nodatasum mount, force summing off for the inode
904 	 * all the time.  That way a later mount with summing on won't
905 	 * get confused
906 	 */
907 	if (btrfs_test_opt(root, NODATASUM))
908 		btrfs_set_flag(inode, NODATASUM);
909 
910 	/*
911 	 * there are lots of better ways to do this, but this code
912 	 * makes sure the first and last page in the file range are
913 	 * up to date and ready for cow
914 	 */
915 	if ((pos & (PAGE_CACHE_SIZE - 1))) {
916 		pinned[0] = grab_cache_page(inode->i_mapping, first_index);
917 		if (!PageUptodate(pinned[0])) {
918 			ret = btrfs_readpage(NULL, pinned[0]);
919 			BUG_ON(ret);
920 			wait_on_page_locked(pinned[0]);
921 		} else {
922 			unlock_page(pinned[0]);
923 		}
924 	}
925 	if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
926 		pinned[1] = grab_cache_page(inode->i_mapping, last_index);
927 		if (!PageUptodate(pinned[1])) {
928 			ret = btrfs_readpage(NULL, pinned[1]);
929 			BUG_ON(ret);
930 			wait_on_page_locked(pinned[1]);
931 		} else {
932 			unlock_page(pinned[1]);
933 		}
934 	}
935 
936 	while(count > 0) {
937 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
938 		size_t write_bytes = min(count, nrptrs *
939 					(size_t)PAGE_CACHE_SIZE -
940 					 offset);
941 		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
942 					PAGE_CACHE_SHIFT;
943 
944 		WARN_ON(num_pages > nrptrs);
945 		memset(pages, 0, sizeof(pages));
946 
947 		ret = btrfs_check_free_space(root, write_bytes, 0);
948 		if (ret)
949 			goto out;
950 
951 		ret = prepare_pages(root, file, pages, num_pages,
952 				    pos, first_index, last_index,
953 				    write_bytes);
954 		if (ret)
955 			goto out;
956 
957 		ret = btrfs_copy_from_user(pos, num_pages,
958 					   write_bytes, pages, buf);
959 		if (ret) {
960 			btrfs_drop_pages(pages, num_pages);
961 			goto out;
962 		}
963 
964 		ret = dirty_and_release_pages(NULL, root, file, pages,
965 					      num_pages, pos, write_bytes);
966 		btrfs_drop_pages(pages, num_pages);
967 		if (ret)
968 			goto out;
969 
970 		buf += write_bytes;
971 		count -= write_bytes;
972 		pos += write_bytes;
973 		num_written += write_bytes;
974 
975 		balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
976 		if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
977 			btrfs_btree_balance_dirty(root, 1);
978 		btrfs_throttle(root);
979 		cond_resched();
980 	}
981 out:
982 	mutex_unlock(&inode->i_mutex);
983 
984 out_nolock:
985 	kfree(pages);
986 	if (pinned[0])
987 		page_cache_release(pinned[0]);
988 	if (pinned[1])
989 		page_cache_release(pinned[1]);
990 	*ppos = pos;
991 
992 	if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
993 		struct btrfs_trans_handle *trans;
994 
995 		err = btrfs_fdatawrite_range(inode->i_mapping, start_pos,
996 					     start_pos + num_written -1,
997 					     WB_SYNC_NONE);
998 		if (err < 0)
999 			num_written = err;
1000 
1001 		err = btrfs_wait_on_page_writeback_range(inode->i_mapping,
1002 				 start_pos, start_pos + num_written - 1);
1003 		if (err < 0)
1004 			num_written = err;
1005 
1006 		trans = btrfs_start_transaction(root, 1);
1007 		ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1008 		if (ret == 0) {
1009 			btrfs_sync_log(trans, root);
1010 			btrfs_end_transaction(trans, root);
1011 		} else {
1012 			btrfs_commit_transaction(trans, root);
1013 		}
1014 	} else if (num_written > 0 && (file->f_flags & O_DIRECT)) {
1015 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
1016 		do_sync_file_range(file, start_pos,
1017 				      start_pos + num_written - 1,
1018 				      SYNC_FILE_RANGE_WRITE |
1019 				      SYNC_FILE_RANGE_WAIT_AFTER);
1020 #else
1021 		do_sync_mapping_range(inode->i_mapping, start_pos,
1022 				      start_pos + num_written - 1,
1023 				      SYNC_FILE_RANGE_WRITE |
1024 				      SYNC_FILE_RANGE_WAIT_AFTER);
1025 #endif
1026 		invalidate_mapping_pages(inode->i_mapping,
1027 		      start_pos >> PAGE_CACHE_SHIFT,
1028 		     (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1029 	}
1030 	current->backing_dev_info = NULL;
1031 	return num_written ? num_written : err;
1032 }
1033 
1034 int btrfs_release_file(struct inode * inode, struct file * filp)
1035 {
1036 	if (filp->private_data)
1037 		btrfs_ioctl_trans_end(filp);
1038 	return 0;
1039 }
1040 
1041 int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
1042 {
1043 	struct inode *inode = dentry->d_inode;
1044 	struct btrfs_root *root = BTRFS_I(inode)->root;
1045 	int ret = 0;
1046 	struct btrfs_trans_handle *trans;
1047 
1048 	/*
1049 	 * check the transaction that last modified this inode
1050 	 * and see if its already been committed
1051 	 */
1052 	if (!BTRFS_I(inode)->last_trans)
1053 		goto out;
1054 
1055 	mutex_lock(&root->fs_info->trans_mutex);
1056 	if (BTRFS_I(inode)->last_trans <=
1057 	    root->fs_info->last_trans_committed) {
1058 		BTRFS_I(inode)->last_trans = 0;
1059 		mutex_unlock(&root->fs_info->trans_mutex);
1060 		goto out;
1061 	}
1062 	mutex_unlock(&root->fs_info->trans_mutex);
1063 
1064 	filemap_fdatawait(inode->i_mapping);
1065 
1066 	/*
1067 	 * ok we haven't committed the transaction yet, lets do a commit
1068 	 */
1069 	if (file->private_data)
1070 		btrfs_ioctl_trans_end(file);
1071 
1072 	trans = btrfs_start_transaction(root, 1);
1073 	if (!trans) {
1074 		ret = -ENOMEM;
1075 		goto out;
1076 	}
1077 
1078 	ret = btrfs_log_dentry_safe(trans, root, file->f_dentry);
1079 	if (ret < 0)
1080 		goto out;
1081 	if (ret > 0) {
1082 		ret = btrfs_commit_transaction(trans, root);
1083 	} else {
1084 		btrfs_sync_log(trans, root);
1085 		ret = btrfs_end_transaction(trans, root);
1086 	}
1087 out:
1088 	return ret > 0 ? EIO : ret;
1089 }
1090 
1091 static struct vm_operations_struct btrfs_file_vm_ops = {
1092 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
1093 	.nopage         = filemap_nopage,
1094 	.populate       = filemap_populate,
1095 #else
1096 	.fault		= filemap_fault,
1097 #endif
1098 	.page_mkwrite	= btrfs_page_mkwrite,
1099 };
1100 
1101 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
1102 {
1103 	vma->vm_ops = &btrfs_file_vm_ops;
1104 	file_accessed(filp);
1105 	return 0;
1106 }
1107 
1108 struct file_operations btrfs_file_operations = {
1109 	.llseek		= generic_file_llseek,
1110 	.read		= do_sync_read,
1111 	.aio_read       = generic_file_aio_read,
1112 	.splice_read	= generic_file_splice_read,
1113 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1114 	.sendfile	= generic_file_sendfile,
1115 #endif
1116 	.write		= btrfs_file_write,
1117 	.mmap		= btrfs_file_mmap,
1118 	.open		= generic_file_open,
1119 	.release	= btrfs_release_file,
1120 	.fsync		= btrfs_sync_file,
1121 	.unlocked_ioctl	= btrfs_ioctl,
1122 #ifdef CONFIG_COMPAT
1123 	.compat_ioctl	= btrfs_ioctl,
1124 #endif
1125 };
1126