xref: /openbmc/linux/fs/btrfs/file.c (revision d1310b2e0cd98eb1348553e69b73827b436dca7b)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/fs.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/smp_lock.h>
26 #include <linux/backing-dev.h>
27 #include <linux/mpage.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/version.h>
33 #include "ctree.h"
34 #include "disk-io.h"
35 #include "transaction.h"
36 #include "btrfs_inode.h"
37 #include "ordered-data.h"
38 #include "ioctl.h"
39 #include "print-tree.h"
40 
41 
42 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
43 				struct page **prepared_pages,
44 				const char __user * buf)
45 {
46 	long page_fault = 0;
47 	int i;
48 	int offset = pos & (PAGE_CACHE_SIZE - 1);
49 
50 	for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
51 		size_t count = min_t(size_t,
52 				     PAGE_CACHE_SIZE - offset, write_bytes);
53 		struct page *page = prepared_pages[i];
54 		fault_in_pages_readable(buf, count);
55 
56 		/* Copy data from userspace to the current page */
57 		kmap(page);
58 		page_fault = __copy_from_user(page_address(page) + offset,
59 					      buf, count);
60 		/* Flush processor's dcache for this page */
61 		flush_dcache_page(page);
62 		kunmap(page);
63 		buf += count;
64 		write_bytes -= count;
65 
66 		if (page_fault)
67 			break;
68 	}
69 	return page_fault ? -EFAULT : 0;
70 }
71 
72 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
73 {
74 	size_t i;
75 	for (i = 0; i < num_pages; i++) {
76 		if (!pages[i])
77 			break;
78 		unlock_page(pages[i]);
79 		mark_page_accessed(pages[i]);
80 		page_cache_release(pages[i]);
81 	}
82 }
83 
84 static int noinline insert_inline_extent(struct btrfs_trans_handle *trans,
85 				struct btrfs_root *root, struct inode *inode,
86 				u64 offset, size_t size,
87 				struct page **pages, size_t page_offset,
88 				int num_pages)
89 {
90 	struct btrfs_key key;
91 	struct btrfs_path *path;
92 	struct extent_buffer *leaf;
93 	char *kaddr;
94 	unsigned long ptr;
95 	struct btrfs_file_extent_item *ei;
96 	struct page *page;
97 	u32 datasize;
98 	int err = 0;
99 	int ret;
100 	int i;
101 	ssize_t cur_size;
102 
103 	path = btrfs_alloc_path();
104 	if (!path)
105 		return -ENOMEM;
106 
107 	btrfs_set_trans_block_group(trans, inode);
108 
109 	key.objectid = inode->i_ino;
110 	key.offset = offset;
111 	btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
112 
113 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
114 	if (ret < 0) {
115 		err = ret;
116 		goto fail;
117 	}
118 	if (ret == 1) {
119 		struct btrfs_key found_key;
120 
121 		if (path->slots[0] == 0)
122 			goto insert;
123 
124 		path->slots[0]--;
125 		leaf = path->nodes[0];
126 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
127 
128 		if (found_key.objectid != inode->i_ino)
129 			goto insert;
130 
131 		if (found_key.type != BTRFS_EXTENT_DATA_KEY)
132 			goto insert;
133 		ei = btrfs_item_ptr(leaf, path->slots[0],
134 				    struct btrfs_file_extent_item);
135 
136 		if (btrfs_file_extent_type(leaf, ei) !=
137 		    BTRFS_FILE_EXTENT_INLINE) {
138 			goto insert;
139 		}
140 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
141 		ret = 0;
142 	}
143 	if (ret == 0) {
144 		u32 found_size;
145 		u64 found_end;
146 
147 		leaf = path->nodes[0];
148 		ei = btrfs_item_ptr(leaf, path->slots[0],
149 				    struct btrfs_file_extent_item);
150 
151 		if (btrfs_file_extent_type(leaf, ei) !=
152 		    BTRFS_FILE_EXTENT_INLINE) {
153 			err = ret;
154 			btrfs_print_leaf(root, leaf);
155 			printk("found wasn't inline offset %Lu inode %lu\n",
156 			       offset, inode->i_ino);
157 			goto fail;
158 		}
159 		found_size = btrfs_file_extent_inline_len(leaf,
160 					  btrfs_item_nr(leaf, path->slots[0]));
161 		found_end = key.offset + found_size;
162 
163 		if (found_end < offset + size) {
164 			btrfs_release_path(root, path);
165 			ret = btrfs_search_slot(trans, root, &key, path,
166 						offset + size - found_end, 1);
167 			BUG_ON(ret != 0);
168 
169 			ret = btrfs_extend_item(trans, root, path,
170 						offset + size - found_end);
171 			if (ret) {
172 				err = ret;
173 				goto fail;
174 			}
175 			leaf = path->nodes[0];
176 			ei = btrfs_item_ptr(leaf, path->slots[0],
177 					    struct btrfs_file_extent_item);
178 		}
179 		if (found_end < offset) {
180 			ptr = btrfs_file_extent_inline_start(ei) + found_size;
181 			memset_extent_buffer(leaf, 0, ptr, offset - found_end);
182 		}
183 	} else {
184 insert:
185 		btrfs_release_path(root, path);
186 		datasize = offset + size - key.offset;
187 		datasize = btrfs_file_extent_calc_inline_size(datasize);
188 		ret = btrfs_insert_empty_item(trans, root, path, &key,
189 					      datasize);
190 		if (ret) {
191 			err = ret;
192 			printk("got bad ret %d\n", ret);
193 			goto fail;
194 		}
195 		leaf = path->nodes[0];
196 		ei = btrfs_item_ptr(leaf, path->slots[0],
197 				    struct btrfs_file_extent_item);
198 		btrfs_set_file_extent_generation(leaf, ei, trans->transid);
199 		btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
200 	}
201 	ptr = btrfs_file_extent_inline_start(ei) + offset - key.offset;
202 
203 	cur_size = size;
204 	i = 0;
205 	while (size > 0) {
206 		page = pages[i];
207 		kaddr = kmap_atomic(page, KM_USER0);
208 		cur_size = min_t(size_t, PAGE_CACHE_SIZE - page_offset, size);
209 		write_extent_buffer(leaf, kaddr + page_offset, ptr, cur_size);
210 		kunmap_atomic(kaddr, KM_USER0);
211 		page_offset = 0;
212 		ptr += cur_size;
213 		size -= cur_size;
214 		if (i >= num_pages) {
215 			printk("i %d num_pages %d\n", i, num_pages);
216 		}
217 		i++;
218 	}
219 	btrfs_mark_buffer_dirty(leaf);
220 fail:
221 	btrfs_free_path(path);
222 	return err;
223 }
224 
225 static int noinline dirty_and_release_pages(struct btrfs_trans_handle *trans,
226 				   struct btrfs_root *root,
227 				   struct file *file,
228 				   struct page **pages,
229 				   size_t num_pages,
230 				   loff_t pos,
231 				   size_t write_bytes)
232 {
233 	int err = 0;
234 	int i;
235 	struct inode *inode = fdentry(file)->d_inode;
236 	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
237 	u64 hint_byte;
238 	u64 num_bytes;
239 	u64 start_pos;
240 	u64 end_of_last_block;
241 	u64 end_pos = pos + write_bytes;
242 	u64 inline_size;
243 	loff_t isize = i_size_read(inode);
244 
245 	start_pos = pos & ~((u64)root->sectorsize - 1);
246 	num_bytes = (write_bytes + pos - start_pos +
247 		    root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
248 
249 	end_of_last_block = start_pos + num_bytes - 1;
250 
251 	lock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
252 	mutex_lock(&root->fs_info->fs_mutex);
253 	trans = btrfs_start_transaction(root, 1);
254 	if (!trans) {
255 		err = -ENOMEM;
256 		goto out_unlock;
257 	}
258 	btrfs_set_trans_block_group(trans, inode);
259 	inode->i_blocks += num_bytes >> 9;
260 	hint_byte = 0;
261 
262 	if ((end_of_last_block & 4095) == 0) {
263 		printk("strange end of last %Lu %zu %Lu\n", start_pos, write_bytes, end_of_last_block);
264 	}
265 	set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
266 
267 	/* FIXME...EIEIO, ENOSPC and more */
268 
269 	/* insert any holes we need to create */
270 	if (inode->i_size < start_pos) {
271 		u64 last_pos_in_file;
272 		u64 hole_size;
273 		u64 mask = root->sectorsize - 1;
274 		last_pos_in_file = (isize + mask) & ~mask;
275 		hole_size = (end_pos - last_pos_in_file + mask) & ~mask;
276 
277 		if (last_pos_in_file < start_pos) {
278 			err = btrfs_drop_extents(trans, root, inode,
279 						 last_pos_in_file,
280 						 last_pos_in_file + hole_size,
281 						 last_pos_in_file,
282 						 &hint_byte);
283 			if (err)
284 				goto failed;
285 
286 			err = btrfs_insert_file_extent(trans, root,
287 						       inode->i_ino,
288 						       last_pos_in_file,
289 						       0, 0, hole_size);
290 			btrfs_drop_extent_cache(inode, last_pos_in_file,
291 					last_pos_in_file + hole_size -1);
292 			btrfs_check_file(root, inode);
293 		}
294 		if (err)
295 			goto failed;
296 	}
297 
298 	/*
299 	 * either allocate an extent for the new bytes or setup the key
300 	 * to show we are doing inline data in the extent
301 	 */
302 	inline_size = end_pos;
303 	if (isize >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
304 	    inline_size > 8192 ||
305 	    inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
306 		u64 last_end;
307 		u64 existing_delalloc = 0;
308 
309 		for (i = 0; i < num_pages; i++) {
310 			struct page *p = pages[i];
311 			SetPageUptodate(p);
312 			set_page_dirty(p);
313 		}
314 		last_end = (u64)(pages[num_pages -1]->index) <<
315 				PAGE_CACHE_SHIFT;
316 		last_end += PAGE_CACHE_SIZE - 1;
317 		if (start_pos < isize) {
318 			u64 delalloc_start = start_pos;
319 			existing_delalloc = count_range_bits(io_tree,
320 					     &delalloc_start,
321 					     end_of_last_block, (u64)-1,
322 					     EXTENT_DELALLOC);
323 		}
324 		set_extent_delalloc(io_tree, start_pos, end_of_last_block,
325 				 GFP_NOFS);
326 		spin_lock(&root->fs_info->delalloc_lock);
327 		root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
328 					  start_pos) - existing_delalloc;
329 		spin_unlock(&root->fs_info->delalloc_lock);
330 		btrfs_add_ordered_inode(inode);
331 	} else {
332 		u64 aligned_end;
333 		/* step one, delete the existing extents in this range */
334 		aligned_end = (pos + write_bytes + root->sectorsize - 1) &
335 			~((u64)root->sectorsize - 1);
336 		err = btrfs_drop_extents(trans, root, inode, start_pos,
337 					 aligned_end, aligned_end, &hint_byte);
338 		if (err)
339 			goto failed;
340 		if (isize > inline_size)
341 			inline_size = min_t(u64, isize, aligned_end);
342 		inline_size -= start_pos;
343 		err = insert_inline_extent(trans, root, inode, start_pos,
344 					   inline_size, pages, 0, num_pages);
345 		btrfs_drop_extent_cache(inode, start_pos, aligned_end - 1);
346 		BUG_ON(err);
347 	}
348 	if (end_pos > isize) {
349 		i_size_write(inode, end_pos);
350 		btrfs_update_inode(trans, root, inode);
351 	}
352 failed:
353 	err = btrfs_end_transaction(trans, root);
354 out_unlock:
355 	mutex_unlock(&root->fs_info->fs_mutex);
356 	unlock_extent(io_tree, start_pos, end_of_last_block, GFP_NOFS);
357 	return err;
358 }
359 
360 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end)
361 {
362 	struct extent_map *em;
363 	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
364 
365 	while(1) {
366 		spin_lock(&em_tree->lock);
367 		em = lookup_extent_mapping(em_tree, start, end);
368 		if (!em) {
369 			spin_unlock(&em_tree->lock);
370 			break;
371 		}
372 		remove_extent_mapping(em_tree, em);
373 		spin_unlock(&em_tree->lock);
374 
375 		/* once for us */
376 		free_extent_map(em);
377 		/* once for the tree*/
378 		free_extent_map(em);
379 	}
380 	return 0;
381 }
382 
383 int btrfs_check_file(struct btrfs_root *root, struct inode *inode)
384 {
385 	return 0;
386 #if 0
387 	struct btrfs_path *path;
388 	struct btrfs_key found_key;
389 	struct extent_buffer *leaf;
390 	struct btrfs_file_extent_item *extent;
391 	u64 last_offset = 0;
392 	int nritems;
393 	int slot;
394 	int found_type;
395 	int ret;
396 	int err = 0;
397 	u64 extent_end = 0;
398 
399 	path = btrfs_alloc_path();
400 	ret = btrfs_lookup_file_extent(NULL, root, path, inode->i_ino,
401 				       last_offset, 0);
402 	while(1) {
403 		nritems = btrfs_header_nritems(path->nodes[0]);
404 		if (path->slots[0] >= nritems) {
405 			ret = btrfs_next_leaf(root, path);
406 			if (ret)
407 				goto out;
408 			nritems = btrfs_header_nritems(path->nodes[0]);
409 		}
410 		slot = path->slots[0];
411 		leaf = path->nodes[0];
412 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
413 		if (found_key.objectid != inode->i_ino)
414 			break;
415 		if (found_key.type != BTRFS_EXTENT_DATA_KEY)
416 			goto out;
417 
418 		if (found_key.offset != last_offset) {
419 			WARN_ON(1);
420 			btrfs_print_leaf(root, leaf);
421 			printk("inode %lu found offset %Lu expected %Lu\n",
422 			       inode->i_ino, found_key.offset, last_offset);
423 			err = 1;
424 			goto out;
425 		}
426 		extent = btrfs_item_ptr(leaf, slot,
427 					struct btrfs_file_extent_item);
428 		found_type = btrfs_file_extent_type(leaf, extent);
429 		if (found_type == BTRFS_FILE_EXTENT_REG) {
430 			extent_end = found_key.offset +
431 			     btrfs_file_extent_num_bytes(leaf, extent);
432 		} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
433 			struct btrfs_item *item;
434 			item = btrfs_item_nr(leaf, slot);
435 			extent_end = found_key.offset +
436 			     btrfs_file_extent_inline_len(leaf, item);
437 			extent_end = (extent_end + root->sectorsize - 1) &
438 				~((u64)root->sectorsize -1 );
439 		}
440 		last_offset = extent_end;
441 		path->slots[0]++;
442 	}
443 	if (last_offset < inode->i_size) {
444 		WARN_ON(1);
445 		btrfs_print_leaf(root, leaf);
446 		printk("inode %lu found offset %Lu size %Lu\n", inode->i_ino,
447 		       last_offset, inode->i_size);
448 		err = 1;
449 
450 	}
451 out:
452 	btrfs_free_path(path);
453 	return err;
454 #endif
455 }
456 
457 /*
458  * this is very complex, but the basic idea is to drop all extents
459  * in the range start - end.  hint_block is filled in with a block number
460  * that would be a good hint to the block allocator for this file.
461  *
462  * If an extent intersects the range but is not entirely inside the range
463  * it is either truncated or split.  Anything entirely inside the range
464  * is deleted from the tree.
465  */
466 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
467 		       struct btrfs_root *root, struct inode *inode,
468 		       u64 start, u64 end, u64 inline_limit, u64 *hint_byte)
469 {
470 	u64 extent_end = 0;
471 	u64 search_start = start;
472 	struct extent_buffer *leaf;
473 	struct btrfs_file_extent_item *extent;
474 	struct btrfs_path *path;
475 	struct btrfs_key key;
476 	struct btrfs_file_extent_item old;
477 	int keep;
478 	int slot;
479 	int bookend;
480 	int found_type;
481 	int found_extent;
482 	int found_inline;
483 	int recow;
484 	int ret;
485 
486 	btrfs_drop_extent_cache(inode, start, end - 1);
487 
488 	path = btrfs_alloc_path();
489 	if (!path)
490 		return -ENOMEM;
491 	while(1) {
492 		recow = 0;
493 		btrfs_release_path(root, path);
494 		ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
495 					       search_start, -1);
496 		if (ret < 0)
497 			goto out;
498 		if (ret > 0) {
499 			if (path->slots[0] == 0) {
500 				ret = 0;
501 				goto out;
502 			}
503 			path->slots[0]--;
504 		}
505 next_slot:
506 		keep = 0;
507 		bookend = 0;
508 		found_extent = 0;
509 		found_inline = 0;
510 		extent = NULL;
511 		leaf = path->nodes[0];
512 		slot = path->slots[0];
513 		ret = 0;
514 		btrfs_item_key_to_cpu(leaf, &key, slot);
515 
516 		if (key.offset >= end || key.objectid != inode->i_ino) {
517 			goto out;
518 		}
519 		if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY) {
520 			goto out;
521 		}
522 		if (recow) {
523 			search_start = key.offset;
524 			continue;
525 		}
526 		if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
527 			extent = btrfs_item_ptr(leaf, slot,
528 						struct btrfs_file_extent_item);
529 			found_type = btrfs_file_extent_type(leaf, extent);
530 			if (found_type == BTRFS_FILE_EXTENT_REG) {
531 				extent_end =
532 				     btrfs_file_extent_disk_bytenr(leaf,
533 								   extent);
534 				if (extent_end)
535 					*hint_byte = extent_end;
536 
537 				extent_end = key.offset +
538 				     btrfs_file_extent_num_bytes(leaf, extent);
539 				found_extent = 1;
540 			} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
541 				struct btrfs_item *item;
542 				item = btrfs_item_nr(leaf, slot);
543 				found_inline = 1;
544 				extent_end = key.offset +
545 				     btrfs_file_extent_inline_len(leaf, item);
546 			}
547 		} else {
548 			extent_end = search_start;
549 		}
550 
551 		/* we found nothing we can drop */
552 		if ((!found_extent && !found_inline) ||
553 		    search_start >= extent_end) {
554 			int nextret;
555 			u32 nritems;
556 			nritems = btrfs_header_nritems(leaf);
557 			if (slot >= nritems - 1) {
558 				nextret = btrfs_next_leaf(root, path);
559 				if (nextret)
560 					goto out;
561 				recow = 1;
562 			} else {
563 				path->slots[0]++;
564 			}
565 			goto next_slot;
566 		}
567 
568 		if (found_inline) {
569 			u64 mask = root->sectorsize - 1;
570 			search_start = (extent_end + mask) & ~mask;
571 		} else
572 			search_start = extent_end;
573 		if (end <= extent_end && start >= key.offset && found_inline) {
574 			*hint_byte = EXTENT_MAP_INLINE;
575 			continue;
576 		}
577 		if (end < extent_end && end >= key.offset) {
578 			if (found_extent) {
579 				u64 disk_bytenr =
580 				    btrfs_file_extent_disk_bytenr(leaf, extent);
581 				u64 disk_num_bytes =
582 				    btrfs_file_extent_disk_num_bytes(leaf,
583 								      extent);
584 				read_extent_buffer(leaf, &old,
585 						   (unsigned long)extent,
586 						   sizeof(old));
587 				if (disk_bytenr != 0) {
588 					ret = btrfs_inc_extent_ref(trans, root,
589 					         disk_bytenr, disk_num_bytes,
590 						 root->root_key.objectid,
591 						 trans->transid,
592 						 key.objectid, end);
593 					BUG_ON(ret);
594 				}
595 			}
596 			bookend = 1;
597 			if (found_inline && start <= key.offset &&
598 			    inline_limit < extent_end)
599 				keep = 1;
600 		}
601 		/* truncate existing extent */
602 		if (start > key.offset) {
603 			u64 new_num;
604 			u64 old_num;
605 			keep = 1;
606 			WARN_ON(start & (root->sectorsize - 1));
607 			if (found_extent) {
608 				new_num = start - key.offset;
609 				old_num = btrfs_file_extent_num_bytes(leaf,
610 								      extent);
611 				*hint_byte =
612 					btrfs_file_extent_disk_bytenr(leaf,
613 								      extent);
614 				if (btrfs_file_extent_disk_bytenr(leaf,
615 								  extent)) {
616 					inode->i_blocks -=
617 						(old_num - new_num) >> 9;
618 				}
619 				btrfs_set_file_extent_num_bytes(leaf, extent,
620 								new_num);
621 				btrfs_mark_buffer_dirty(leaf);
622 			} else if (key.offset < inline_limit &&
623 				   (end > extent_end) &&
624 				   (inline_limit < extent_end)) {
625 				u32 new_size;
626 				new_size = btrfs_file_extent_calc_inline_size(
627 						   inline_limit - key.offset);
628 				btrfs_truncate_item(trans, root, path,
629 						    new_size, 1);
630 			}
631 		}
632 		/* delete the entire extent */
633 		if (!keep) {
634 			u64 disk_bytenr = 0;
635 			u64 disk_num_bytes = 0;
636 			u64 extent_num_bytes = 0;
637 			u64 root_gen;
638 			u64 root_owner;
639 
640 			root_gen = btrfs_header_generation(leaf);
641 			root_owner = btrfs_header_owner(leaf);
642 			if (found_extent) {
643 				disk_bytenr =
644 				      btrfs_file_extent_disk_bytenr(leaf,
645 								     extent);
646 				disk_num_bytes =
647 				      btrfs_file_extent_disk_num_bytes(leaf,
648 								       extent);
649 				extent_num_bytes =
650 				      btrfs_file_extent_num_bytes(leaf, extent);
651 				*hint_byte =
652 					btrfs_file_extent_disk_bytenr(leaf,
653 								      extent);
654 			}
655 			ret = btrfs_del_item(trans, root, path);
656 			/* TODO update progress marker and return */
657 			BUG_ON(ret);
658 			btrfs_release_path(root, path);
659 			extent = NULL;
660 			if (found_extent && disk_bytenr != 0) {
661 				inode->i_blocks -= extent_num_bytes >> 9;
662 				ret = btrfs_free_extent(trans, root,
663 						disk_bytenr,
664 						disk_num_bytes,
665 						root_owner,
666 						root_gen, inode->i_ino,
667 						key.offset, 0);
668 			}
669 
670 			BUG_ON(ret);
671 			if (!bookend && search_start >= end) {
672 				ret = 0;
673 				goto out;
674 			}
675 			if (!bookend)
676 				continue;
677 		}
678 		if (bookend && found_inline && start <= key.offset &&
679 		    inline_limit < extent_end && key.offset <= inline_limit) {
680 			u32 new_size;
681 			new_size = btrfs_file_extent_calc_inline_size(
682 						   extent_end - inline_limit);
683 			btrfs_truncate_item(trans, root, path, new_size, 0);
684 		}
685 		/* create bookend, splitting the extent in two */
686 		if (bookend && found_extent) {
687 			struct btrfs_key ins;
688 			ins.objectid = inode->i_ino;
689 			ins.offset = end;
690 			btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
691 			btrfs_release_path(root, path);
692 			ret = btrfs_insert_empty_item(trans, root, path, &ins,
693 						      sizeof(*extent));
694 
695 			leaf = path->nodes[0];
696 			if (ret) {
697 				btrfs_print_leaf(root, leaf);
698 				printk("got %d on inserting %Lu %u %Lu start %Lu end %Lu found %Lu %Lu keep was %d\n", ret , ins.objectid, ins.type, ins.offset, start, end, key.offset, extent_end, keep);
699 			}
700 			BUG_ON(ret);
701 			extent = btrfs_item_ptr(leaf, path->slots[0],
702 						struct btrfs_file_extent_item);
703 			write_extent_buffer(leaf, &old,
704 					    (unsigned long)extent, sizeof(old));
705 
706 			btrfs_set_file_extent_offset(leaf, extent,
707 				    le64_to_cpu(old.offset) + end - key.offset);
708 			WARN_ON(le64_to_cpu(old.num_bytes) <
709 				(extent_end - end));
710 			btrfs_set_file_extent_num_bytes(leaf, extent,
711 							extent_end - end);
712 			btrfs_set_file_extent_type(leaf, extent,
713 						   BTRFS_FILE_EXTENT_REG);
714 
715 			btrfs_mark_buffer_dirty(path->nodes[0]);
716 			if (le64_to_cpu(old.disk_bytenr) != 0) {
717 				inode->i_blocks +=
718 				      btrfs_file_extent_num_bytes(leaf,
719 								  extent) >> 9;
720 			}
721 			ret = 0;
722 			goto out;
723 		}
724 	}
725 out:
726 	btrfs_free_path(path);
727 	return ret;
728 }
729 
730 /*
731  * this gets pages into the page cache and locks them down
732  */
733 static int prepare_pages(struct btrfs_root *root, struct file *file,
734 			 struct page **pages, size_t num_pages,
735 			 loff_t pos, unsigned long first_index,
736 			 unsigned long last_index, size_t write_bytes)
737 {
738 	int i;
739 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
740 	struct inode *inode = fdentry(file)->d_inode;
741 	int err = 0;
742 	u64 start_pos;
743 
744 	start_pos = pos & ~((u64)root->sectorsize - 1);
745 
746 	memset(pages, 0, num_pages * sizeof(struct page *));
747 
748 	for (i = 0; i < num_pages; i++) {
749 		pages[i] = grab_cache_page(inode->i_mapping, index + i);
750 		if (!pages[i]) {
751 			err = -ENOMEM;
752 			BUG_ON(1);
753 		}
754 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
755 		ClearPageDirty(pages[i]);
756 #else
757 		cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
758 #endif
759 		wait_on_page_writeback(pages[i]);
760 		set_page_extent_mapped(pages[i]);
761 		WARN_ON(!PageLocked(pages[i]));
762 	}
763 	return 0;
764 }
765 
766 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
767 				size_t count, loff_t *ppos)
768 {
769 	loff_t pos;
770 	loff_t start_pos;
771 	ssize_t num_written = 0;
772 	ssize_t err = 0;
773 	int ret = 0;
774 	struct inode *inode = fdentry(file)->d_inode;
775 	struct btrfs_root *root = BTRFS_I(inode)->root;
776 	struct page **pages = NULL;
777 	int nrptrs;
778 	struct page *pinned[2];
779 	unsigned long first_index;
780 	unsigned long last_index;
781 
782 	nrptrs = min((count + PAGE_CACHE_SIZE - 1) / PAGE_CACHE_SIZE,
783 		     PAGE_CACHE_SIZE / (sizeof(struct page *)));
784 	pinned[0] = NULL;
785 	pinned[1] = NULL;
786 	if (file->f_flags & O_DIRECT)
787 		return -EINVAL;
788 
789 	pos = *ppos;
790 	start_pos = pos;
791 
792 	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
793 	current->backing_dev_info = inode->i_mapping->backing_dev_info;
794 	err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
795 	if (err)
796 		goto out_nolock;
797 	if (count == 0)
798 		goto out_nolock;
799 	err = remove_suid(fdentry(file));
800 	if (err)
801 		goto out_nolock;
802 	file_update_time(file);
803 
804 	pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
805 
806 	mutex_lock(&inode->i_mutex);
807 	first_index = pos >> PAGE_CACHE_SHIFT;
808 	last_index = (pos + count) >> PAGE_CACHE_SHIFT;
809 
810 	/*
811 	 * there are lots of better ways to do this, but this code
812 	 * makes sure the first and last page in the file range are
813 	 * up to date and ready for cow
814 	 */
815 	if ((pos & (PAGE_CACHE_SIZE - 1))) {
816 		pinned[0] = grab_cache_page(inode->i_mapping, first_index);
817 		if (!PageUptodate(pinned[0])) {
818 			ret = btrfs_readpage(NULL, pinned[0]);
819 			BUG_ON(ret);
820 			wait_on_page_locked(pinned[0]);
821 		} else {
822 			unlock_page(pinned[0]);
823 		}
824 	}
825 	if ((pos + count) & (PAGE_CACHE_SIZE - 1)) {
826 		pinned[1] = grab_cache_page(inode->i_mapping, last_index);
827 		if (!PageUptodate(pinned[1])) {
828 			ret = btrfs_readpage(NULL, pinned[1]);
829 			BUG_ON(ret);
830 			wait_on_page_locked(pinned[1]);
831 		} else {
832 			unlock_page(pinned[1]);
833 		}
834 	}
835 
836 	while(count > 0) {
837 		size_t offset = pos & (PAGE_CACHE_SIZE - 1);
838 		size_t write_bytes = min(count, nrptrs *
839 					(size_t)PAGE_CACHE_SIZE -
840 					 offset);
841 		size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
842 					PAGE_CACHE_SHIFT;
843 
844 		WARN_ON(num_pages > nrptrs);
845 		memset(pages, 0, sizeof(pages));
846 
847 		mutex_lock(&root->fs_info->fs_mutex);
848 		ret = btrfs_check_free_space(root, write_bytes, 0);
849 		mutex_unlock(&root->fs_info->fs_mutex);
850 		if (ret)
851 			goto out;
852 
853 		ret = prepare_pages(root, file, pages, num_pages,
854 				    pos, first_index, last_index,
855 				    write_bytes);
856 		if (ret)
857 			goto out;
858 
859 		ret = btrfs_copy_from_user(pos, num_pages,
860 					   write_bytes, pages, buf);
861 		if (ret) {
862 			btrfs_drop_pages(pages, num_pages);
863 			goto out;
864 		}
865 
866 		ret = dirty_and_release_pages(NULL, root, file, pages,
867 					      num_pages, pos, write_bytes);
868 		btrfs_drop_pages(pages, num_pages);
869 		if (ret)
870 			goto out;
871 
872 		buf += write_bytes;
873 		count -= write_bytes;
874 		pos += write_bytes;
875 		num_written += write_bytes;
876 
877 		balance_dirty_pages_ratelimited_nr(inode->i_mapping, num_pages);
878 		if (num_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
879 			btrfs_btree_balance_dirty(root, 1);
880 		btrfs_throttle(root);
881 		cond_resched();
882 	}
883 out:
884 	mutex_unlock(&inode->i_mutex);
885 
886 out_nolock:
887 	kfree(pages);
888 	if (pinned[0])
889 		page_cache_release(pinned[0]);
890 	if (pinned[1])
891 		page_cache_release(pinned[1]);
892 	*ppos = pos;
893 
894 	if (num_written > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
895 		err = sync_page_range(inode, inode->i_mapping,
896 				      start_pos, num_written);
897 		if (err < 0)
898 			num_written = err;
899 	}
900 	current->backing_dev_info = NULL;
901 	return num_written ? num_written : err;
902 }
903 
904 static int btrfs_sync_file(struct file *file,
905 			   struct dentry *dentry, int datasync)
906 {
907 	struct inode *inode = dentry->d_inode;
908 	struct btrfs_root *root = BTRFS_I(inode)->root;
909 	int ret = 0;
910 	struct btrfs_trans_handle *trans;
911 
912 	/*
913 	 * check the transaction that last modified this inode
914 	 * and see if its already been committed
915 	 */
916 	mutex_lock(&root->fs_info->fs_mutex);
917 	if (!BTRFS_I(inode)->last_trans)
918 		goto out;
919 	mutex_lock(&root->fs_info->trans_mutex);
920 	if (BTRFS_I(inode)->last_trans <=
921 	    root->fs_info->last_trans_committed) {
922 		BTRFS_I(inode)->last_trans = 0;
923 		mutex_unlock(&root->fs_info->trans_mutex);
924 		goto out;
925 	}
926 	mutex_unlock(&root->fs_info->trans_mutex);
927 
928 	/*
929 	 * ok we haven't committed the transaction yet, lets do a commit
930 	 */
931 	trans = btrfs_start_transaction(root, 1);
932 	if (!trans) {
933 		ret = -ENOMEM;
934 		goto out;
935 	}
936 	ret = btrfs_commit_transaction(trans, root);
937 out:
938 	mutex_unlock(&root->fs_info->fs_mutex);
939 	return ret > 0 ? EIO : ret;
940 }
941 
942 static struct vm_operations_struct btrfs_file_vm_ops = {
943 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
944 	.nopage         = filemap_nopage,
945 	.populate       = filemap_populate,
946 #else
947 	.fault		= filemap_fault,
948 #endif
949 	.page_mkwrite	= btrfs_page_mkwrite,
950 };
951 
952 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
953 {
954 	vma->vm_ops = &btrfs_file_vm_ops;
955 	file_accessed(filp);
956 	return 0;
957 }
958 
959 struct file_operations btrfs_file_operations = {
960 	.llseek		= generic_file_llseek,
961 	.read		= do_sync_read,
962 	.aio_read       = generic_file_aio_read,
963 	.splice_read	= generic_file_splice_read,
964 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
965 	.sendfile	= generic_file_sendfile,
966 #endif
967 	.write		= btrfs_file_write,
968 	.mmap		= btrfs_file_mmap,
969 	.open		= generic_file_open,
970 	.fsync		= btrfs_sync_file,
971 	.unlocked_ioctl	= btrfs_ioctl,
972 #ifdef CONFIG_COMPAT
973 	.compat_ioctl	= btrfs_ioctl,
974 #endif
975 };
976 
977