xref: /openbmc/linux/fs/btrfs/file.c (revision f845af67)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "btrfs_inode.h"
24 #include "print-tree.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "volumes.h"
28 #include "qgroup.h"
29 #include "compression.h"
30 #include "delalloc-space.h"
31 #include "reflink.h"
32 #include "subpage.h"
33 #include "fs.h"
34 #include "accessors.h"
35 #include "extent-tree.h"
36 #include "file-item.h"
37 #include "ioctl.h"
38 #include "file.h"
39 #include "super.h"
40 
41 /* simple helper to fault in pages and copy.  This should go away
42  * and be replaced with calls into generic code.
43  */
44 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
45 					 struct page **prepared_pages,
46 					 struct iov_iter *i)
47 {
48 	size_t copied = 0;
49 	size_t total_copied = 0;
50 	int pg = 0;
51 	int offset = offset_in_page(pos);
52 
53 	while (write_bytes > 0) {
54 		size_t count = min_t(size_t,
55 				     PAGE_SIZE - offset, write_bytes);
56 		struct page *page = prepared_pages[pg];
57 		/*
58 		 * Copy data from userspace to the current page
59 		 */
60 		copied = copy_page_from_iter_atomic(page, offset, count, i);
61 
62 		/* Flush processor's dcache for this page */
63 		flush_dcache_page(page);
64 
65 		/*
66 		 * if we get a partial write, we can end up with
67 		 * partially up to date pages.  These add
68 		 * a lot of complexity, so make sure they don't
69 		 * happen by forcing this copy to be retried.
70 		 *
71 		 * The rest of the btrfs_file_write code will fall
72 		 * back to page at a time copies after we return 0.
73 		 */
74 		if (unlikely(copied < count)) {
75 			if (!PageUptodate(page)) {
76 				iov_iter_revert(i, copied);
77 				copied = 0;
78 			}
79 			if (!copied)
80 				break;
81 		}
82 
83 		write_bytes -= copied;
84 		total_copied += copied;
85 		offset += copied;
86 		if (offset == PAGE_SIZE) {
87 			pg++;
88 			offset = 0;
89 		}
90 	}
91 	return total_copied;
92 }
93 
94 /*
95  * unlocks pages after btrfs_file_write is done with them
96  */
97 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
98 			     struct page **pages, size_t num_pages,
99 			     u64 pos, u64 copied)
100 {
101 	size_t i;
102 	u64 block_start = round_down(pos, fs_info->sectorsize);
103 	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
104 
105 	ASSERT(block_len <= U32_MAX);
106 	for (i = 0; i < num_pages; i++) {
107 		/* page checked is some magic around finding pages that
108 		 * have been modified without going through btrfs_set_page_dirty
109 		 * clear it here. There should be no need to mark the pages
110 		 * accessed as prepare_pages should have marked them accessed
111 		 * in prepare_pages via find_or_create_page()
112 		 */
113 		btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
114 					       block_len);
115 		unlock_page(pages[i]);
116 		put_page(pages[i]);
117 	}
118 }
119 
120 /*
121  * After btrfs_copy_from_user(), update the following things for delalloc:
122  * - Mark newly dirtied pages as DELALLOC in the io tree.
123  *   Used to advise which range is to be written back.
124  * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
125  * - Update inode size for past EOF write
126  */
127 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
128 		      size_t num_pages, loff_t pos, size_t write_bytes,
129 		      struct extent_state **cached, bool noreserve)
130 {
131 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
132 	int err = 0;
133 	int i;
134 	u64 num_bytes;
135 	u64 start_pos;
136 	u64 end_of_last_block;
137 	u64 end_pos = pos + write_bytes;
138 	loff_t isize = i_size_read(&inode->vfs_inode);
139 	unsigned int extra_bits = 0;
140 
141 	if (write_bytes == 0)
142 		return 0;
143 
144 	if (noreserve)
145 		extra_bits |= EXTENT_NORESERVE;
146 
147 	start_pos = round_down(pos, fs_info->sectorsize);
148 	num_bytes = round_up(write_bytes + pos - start_pos,
149 			     fs_info->sectorsize);
150 	ASSERT(num_bytes <= U32_MAX);
151 
152 	end_of_last_block = start_pos + num_bytes - 1;
153 
154 	/*
155 	 * The pages may have already been dirty, clear out old accounting so
156 	 * we can set things up properly
157 	 */
158 	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
159 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
160 			 cached);
161 
162 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
163 					extra_bits, cached);
164 	if (err)
165 		return err;
166 
167 	for (i = 0; i < num_pages; i++) {
168 		struct page *p = pages[i];
169 
170 		btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
171 		btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
172 		btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
173 	}
174 
175 	/*
176 	 * we've only changed i_size in ram, and we haven't updated
177 	 * the disk i_size.  There is no need to log the inode
178 	 * at this time.
179 	 */
180 	if (end_pos > isize)
181 		i_size_write(&inode->vfs_inode, end_pos);
182 	return 0;
183 }
184 
185 /*
186  * this is very complex, but the basic idea is to drop all extents
187  * in the range start - end.  hint_block is filled in with a block number
188  * that would be a good hint to the block allocator for this file.
189  *
190  * If an extent intersects the range but is not entirely inside the range
191  * it is either truncated or split.  Anything entirely inside the range
192  * is deleted from the tree.
193  *
194  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
195  * to deal with that. We set the field 'bytes_found' of the arguments structure
196  * with the number of allocated bytes found in the target range, so that the
197  * caller can update the inode's number of bytes in an atomic way when
198  * replacing extents in a range to avoid races with stat(2).
199  */
200 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
201 		       struct btrfs_root *root, struct btrfs_inode *inode,
202 		       struct btrfs_drop_extents_args *args)
203 {
204 	struct btrfs_fs_info *fs_info = root->fs_info;
205 	struct extent_buffer *leaf;
206 	struct btrfs_file_extent_item *fi;
207 	struct btrfs_ref ref = { 0 };
208 	struct btrfs_key key;
209 	struct btrfs_key new_key;
210 	u64 ino = btrfs_ino(inode);
211 	u64 search_start = args->start;
212 	u64 disk_bytenr = 0;
213 	u64 num_bytes = 0;
214 	u64 extent_offset = 0;
215 	u64 extent_end = 0;
216 	u64 last_end = args->start;
217 	int del_nr = 0;
218 	int del_slot = 0;
219 	int extent_type;
220 	int recow;
221 	int ret;
222 	int modify_tree = -1;
223 	int update_refs;
224 	int found = 0;
225 	struct btrfs_path *path = args->path;
226 
227 	args->bytes_found = 0;
228 	args->extent_inserted = false;
229 
230 	/* Must always have a path if ->replace_extent is true */
231 	ASSERT(!(args->replace_extent && !args->path));
232 
233 	if (!path) {
234 		path = btrfs_alloc_path();
235 		if (!path) {
236 			ret = -ENOMEM;
237 			goto out;
238 		}
239 	}
240 
241 	if (args->drop_cache)
242 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
243 
244 	if (args->start >= inode->disk_i_size && !args->replace_extent)
245 		modify_tree = 0;
246 
247 	update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
248 	while (1) {
249 		recow = 0;
250 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
251 					       search_start, modify_tree);
252 		if (ret < 0)
253 			break;
254 		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
255 			leaf = path->nodes[0];
256 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
257 			if (key.objectid == ino &&
258 			    key.type == BTRFS_EXTENT_DATA_KEY)
259 				path->slots[0]--;
260 		}
261 		ret = 0;
262 next_slot:
263 		leaf = path->nodes[0];
264 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
265 			BUG_ON(del_nr > 0);
266 			ret = btrfs_next_leaf(root, path);
267 			if (ret < 0)
268 				break;
269 			if (ret > 0) {
270 				ret = 0;
271 				break;
272 			}
273 			leaf = path->nodes[0];
274 			recow = 1;
275 		}
276 
277 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
278 
279 		if (key.objectid > ino)
280 			break;
281 		if (WARN_ON_ONCE(key.objectid < ino) ||
282 		    key.type < BTRFS_EXTENT_DATA_KEY) {
283 			ASSERT(del_nr == 0);
284 			path->slots[0]++;
285 			goto next_slot;
286 		}
287 		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
288 			break;
289 
290 		fi = btrfs_item_ptr(leaf, path->slots[0],
291 				    struct btrfs_file_extent_item);
292 		extent_type = btrfs_file_extent_type(leaf, fi);
293 
294 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
295 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
296 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
297 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
298 			extent_offset = btrfs_file_extent_offset(leaf, fi);
299 			extent_end = key.offset +
300 				btrfs_file_extent_num_bytes(leaf, fi);
301 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
302 			extent_end = key.offset +
303 				btrfs_file_extent_ram_bytes(leaf, fi);
304 		} else {
305 			/* can't happen */
306 			BUG();
307 		}
308 
309 		/*
310 		 * Don't skip extent items representing 0 byte lengths. They
311 		 * used to be created (bug) if while punching holes we hit
312 		 * -ENOSPC condition. So if we find one here, just ensure we
313 		 * delete it, otherwise we would insert a new file extent item
314 		 * with the same key (offset) as that 0 bytes length file
315 		 * extent item in the call to setup_items_for_insert() later
316 		 * in this function.
317 		 */
318 		if (extent_end == key.offset && extent_end >= search_start) {
319 			last_end = extent_end;
320 			goto delete_extent_item;
321 		}
322 
323 		if (extent_end <= search_start) {
324 			path->slots[0]++;
325 			goto next_slot;
326 		}
327 
328 		found = 1;
329 		search_start = max(key.offset, args->start);
330 		if (recow || !modify_tree) {
331 			modify_tree = -1;
332 			btrfs_release_path(path);
333 			continue;
334 		}
335 
336 		/*
337 		 *     | - range to drop - |
338 		 *  | -------- extent -------- |
339 		 */
340 		if (args->start > key.offset && args->end < extent_end) {
341 			BUG_ON(del_nr > 0);
342 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
343 				ret = -EOPNOTSUPP;
344 				break;
345 			}
346 
347 			memcpy(&new_key, &key, sizeof(new_key));
348 			new_key.offset = args->start;
349 			ret = btrfs_duplicate_item(trans, root, path,
350 						   &new_key);
351 			if (ret == -EAGAIN) {
352 				btrfs_release_path(path);
353 				continue;
354 			}
355 			if (ret < 0)
356 				break;
357 
358 			leaf = path->nodes[0];
359 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
360 					    struct btrfs_file_extent_item);
361 			btrfs_set_file_extent_num_bytes(leaf, fi,
362 							args->start - key.offset);
363 
364 			fi = btrfs_item_ptr(leaf, path->slots[0],
365 					    struct btrfs_file_extent_item);
366 
367 			extent_offset += args->start - key.offset;
368 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
369 			btrfs_set_file_extent_num_bytes(leaf, fi,
370 							extent_end - args->start);
371 			btrfs_mark_buffer_dirty(trans, leaf);
372 
373 			if (update_refs && disk_bytenr > 0) {
374 				btrfs_init_generic_ref(&ref,
375 						BTRFS_ADD_DELAYED_REF,
376 						disk_bytenr, num_bytes, 0);
377 				btrfs_init_data_ref(&ref,
378 						root->root_key.objectid,
379 						new_key.objectid,
380 						args->start - extent_offset,
381 						0, false);
382 				ret = btrfs_inc_extent_ref(trans, &ref);
383 				if (ret) {
384 					btrfs_abort_transaction(trans, ret);
385 					break;
386 				}
387 			}
388 			key.offset = args->start;
389 		}
390 		/*
391 		 * From here on out we will have actually dropped something, so
392 		 * last_end can be updated.
393 		 */
394 		last_end = extent_end;
395 
396 		/*
397 		 *  | ---- range to drop ----- |
398 		 *      | -------- extent -------- |
399 		 */
400 		if (args->start <= key.offset && args->end < extent_end) {
401 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
402 				ret = -EOPNOTSUPP;
403 				break;
404 			}
405 
406 			memcpy(&new_key, &key, sizeof(new_key));
407 			new_key.offset = args->end;
408 			btrfs_set_item_key_safe(trans, path, &new_key);
409 
410 			extent_offset += args->end - key.offset;
411 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
412 			btrfs_set_file_extent_num_bytes(leaf, fi,
413 							extent_end - args->end);
414 			btrfs_mark_buffer_dirty(trans, leaf);
415 			if (update_refs && disk_bytenr > 0)
416 				args->bytes_found += args->end - key.offset;
417 			break;
418 		}
419 
420 		search_start = extent_end;
421 		/*
422 		 *       | ---- range to drop ----- |
423 		 *  | -------- extent -------- |
424 		 */
425 		if (args->start > key.offset && args->end >= extent_end) {
426 			BUG_ON(del_nr > 0);
427 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
428 				ret = -EOPNOTSUPP;
429 				break;
430 			}
431 
432 			btrfs_set_file_extent_num_bytes(leaf, fi,
433 							args->start - key.offset);
434 			btrfs_mark_buffer_dirty(trans, leaf);
435 			if (update_refs && disk_bytenr > 0)
436 				args->bytes_found += extent_end - args->start;
437 			if (args->end == extent_end)
438 				break;
439 
440 			path->slots[0]++;
441 			goto next_slot;
442 		}
443 
444 		/*
445 		 *  | ---- range to drop ----- |
446 		 *    | ------ extent ------ |
447 		 */
448 		if (args->start <= key.offset && args->end >= extent_end) {
449 delete_extent_item:
450 			if (del_nr == 0) {
451 				del_slot = path->slots[0];
452 				del_nr = 1;
453 			} else {
454 				BUG_ON(del_slot + del_nr != path->slots[0]);
455 				del_nr++;
456 			}
457 
458 			if (update_refs &&
459 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
460 				args->bytes_found += extent_end - key.offset;
461 				extent_end = ALIGN(extent_end,
462 						   fs_info->sectorsize);
463 			} else if (update_refs && disk_bytenr > 0) {
464 				btrfs_init_generic_ref(&ref,
465 						BTRFS_DROP_DELAYED_REF,
466 						disk_bytenr, num_bytes, 0);
467 				btrfs_init_data_ref(&ref,
468 						root->root_key.objectid,
469 						key.objectid,
470 						key.offset - extent_offset, 0,
471 						false);
472 				ret = btrfs_free_extent(trans, &ref);
473 				if (ret) {
474 					btrfs_abort_transaction(trans, ret);
475 					break;
476 				}
477 				args->bytes_found += extent_end - key.offset;
478 			}
479 
480 			if (args->end == extent_end)
481 				break;
482 
483 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
484 				path->slots[0]++;
485 				goto next_slot;
486 			}
487 
488 			ret = btrfs_del_items(trans, root, path, del_slot,
489 					      del_nr);
490 			if (ret) {
491 				btrfs_abort_transaction(trans, ret);
492 				break;
493 			}
494 
495 			del_nr = 0;
496 			del_slot = 0;
497 
498 			btrfs_release_path(path);
499 			continue;
500 		}
501 
502 		BUG();
503 	}
504 
505 	if (!ret && del_nr > 0) {
506 		/*
507 		 * Set path->slots[0] to first slot, so that after the delete
508 		 * if items are move off from our leaf to its immediate left or
509 		 * right neighbor leafs, we end up with a correct and adjusted
510 		 * path->slots[0] for our insertion (if args->replace_extent).
511 		 */
512 		path->slots[0] = del_slot;
513 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
514 		if (ret)
515 			btrfs_abort_transaction(trans, ret);
516 	}
517 
518 	leaf = path->nodes[0];
519 	/*
520 	 * If btrfs_del_items() was called, it might have deleted a leaf, in
521 	 * which case it unlocked our path, so check path->locks[0] matches a
522 	 * write lock.
523 	 */
524 	if (!ret && args->replace_extent &&
525 	    path->locks[0] == BTRFS_WRITE_LOCK &&
526 	    btrfs_leaf_free_space(leaf) >=
527 	    sizeof(struct btrfs_item) + args->extent_item_size) {
528 
529 		key.objectid = ino;
530 		key.type = BTRFS_EXTENT_DATA_KEY;
531 		key.offset = args->start;
532 		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
533 			struct btrfs_key slot_key;
534 
535 			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
536 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
537 				path->slots[0]++;
538 		}
539 		btrfs_setup_item_for_insert(trans, root, path, &key,
540 					    args->extent_item_size);
541 		args->extent_inserted = true;
542 	}
543 
544 	if (!args->path)
545 		btrfs_free_path(path);
546 	else if (!args->extent_inserted)
547 		btrfs_release_path(path);
548 out:
549 	args->drop_end = found ? min(args->end, last_end) : args->end;
550 
551 	return ret;
552 }
553 
554 static int extent_mergeable(struct extent_buffer *leaf, int slot,
555 			    u64 objectid, u64 bytenr, u64 orig_offset,
556 			    u64 *start, u64 *end)
557 {
558 	struct btrfs_file_extent_item *fi;
559 	struct btrfs_key key;
560 	u64 extent_end;
561 
562 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
563 		return 0;
564 
565 	btrfs_item_key_to_cpu(leaf, &key, slot);
566 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
567 		return 0;
568 
569 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
570 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
571 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
572 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
573 	    btrfs_file_extent_compression(leaf, fi) ||
574 	    btrfs_file_extent_encryption(leaf, fi) ||
575 	    btrfs_file_extent_other_encoding(leaf, fi))
576 		return 0;
577 
578 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
579 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
580 		return 0;
581 
582 	*start = key.offset;
583 	*end = extent_end;
584 	return 1;
585 }
586 
587 /*
588  * Mark extent in the range start - end as written.
589  *
590  * This changes extent type from 'pre-allocated' to 'regular'. If only
591  * part of extent is marked as written, the extent will be split into
592  * two or three.
593  */
594 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
595 			      struct btrfs_inode *inode, u64 start, u64 end)
596 {
597 	struct btrfs_root *root = inode->root;
598 	struct extent_buffer *leaf;
599 	struct btrfs_path *path;
600 	struct btrfs_file_extent_item *fi;
601 	struct btrfs_ref ref = { 0 };
602 	struct btrfs_key key;
603 	struct btrfs_key new_key;
604 	u64 bytenr;
605 	u64 num_bytes;
606 	u64 extent_end;
607 	u64 orig_offset;
608 	u64 other_start;
609 	u64 other_end;
610 	u64 split;
611 	int del_nr = 0;
612 	int del_slot = 0;
613 	int recow;
614 	int ret = 0;
615 	u64 ino = btrfs_ino(inode);
616 
617 	path = btrfs_alloc_path();
618 	if (!path)
619 		return -ENOMEM;
620 again:
621 	recow = 0;
622 	split = start;
623 	key.objectid = ino;
624 	key.type = BTRFS_EXTENT_DATA_KEY;
625 	key.offset = split;
626 
627 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
628 	if (ret < 0)
629 		goto out;
630 	if (ret > 0 && path->slots[0] > 0)
631 		path->slots[0]--;
632 
633 	leaf = path->nodes[0];
634 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
635 	if (key.objectid != ino ||
636 	    key.type != BTRFS_EXTENT_DATA_KEY) {
637 		ret = -EINVAL;
638 		btrfs_abort_transaction(trans, ret);
639 		goto out;
640 	}
641 	fi = btrfs_item_ptr(leaf, path->slots[0],
642 			    struct btrfs_file_extent_item);
643 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
644 		ret = -EINVAL;
645 		btrfs_abort_transaction(trans, ret);
646 		goto out;
647 	}
648 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
649 	if (key.offset > start || extent_end < end) {
650 		ret = -EINVAL;
651 		btrfs_abort_transaction(trans, ret);
652 		goto out;
653 	}
654 
655 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
656 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
657 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
658 	memcpy(&new_key, &key, sizeof(new_key));
659 
660 	if (start == key.offset && end < extent_end) {
661 		other_start = 0;
662 		other_end = start;
663 		if (extent_mergeable(leaf, path->slots[0] - 1,
664 				     ino, bytenr, orig_offset,
665 				     &other_start, &other_end)) {
666 			new_key.offset = end;
667 			btrfs_set_item_key_safe(trans, path, &new_key);
668 			fi = btrfs_item_ptr(leaf, path->slots[0],
669 					    struct btrfs_file_extent_item);
670 			btrfs_set_file_extent_generation(leaf, fi,
671 							 trans->transid);
672 			btrfs_set_file_extent_num_bytes(leaf, fi,
673 							extent_end - end);
674 			btrfs_set_file_extent_offset(leaf, fi,
675 						     end - orig_offset);
676 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
677 					    struct btrfs_file_extent_item);
678 			btrfs_set_file_extent_generation(leaf, fi,
679 							 trans->transid);
680 			btrfs_set_file_extent_num_bytes(leaf, fi,
681 							end - other_start);
682 			btrfs_mark_buffer_dirty(trans, leaf);
683 			goto out;
684 		}
685 	}
686 
687 	if (start > key.offset && end == extent_end) {
688 		other_start = end;
689 		other_end = 0;
690 		if (extent_mergeable(leaf, path->slots[0] + 1,
691 				     ino, bytenr, orig_offset,
692 				     &other_start, &other_end)) {
693 			fi = btrfs_item_ptr(leaf, path->slots[0],
694 					    struct btrfs_file_extent_item);
695 			btrfs_set_file_extent_num_bytes(leaf, fi,
696 							start - key.offset);
697 			btrfs_set_file_extent_generation(leaf, fi,
698 							 trans->transid);
699 			path->slots[0]++;
700 			new_key.offset = start;
701 			btrfs_set_item_key_safe(trans, path, &new_key);
702 
703 			fi = btrfs_item_ptr(leaf, path->slots[0],
704 					    struct btrfs_file_extent_item);
705 			btrfs_set_file_extent_generation(leaf, fi,
706 							 trans->transid);
707 			btrfs_set_file_extent_num_bytes(leaf, fi,
708 							other_end - start);
709 			btrfs_set_file_extent_offset(leaf, fi,
710 						     start - orig_offset);
711 			btrfs_mark_buffer_dirty(trans, leaf);
712 			goto out;
713 		}
714 	}
715 
716 	while (start > key.offset || end < extent_end) {
717 		if (key.offset == start)
718 			split = end;
719 
720 		new_key.offset = split;
721 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
722 		if (ret == -EAGAIN) {
723 			btrfs_release_path(path);
724 			goto again;
725 		}
726 		if (ret < 0) {
727 			btrfs_abort_transaction(trans, ret);
728 			goto out;
729 		}
730 
731 		leaf = path->nodes[0];
732 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
733 				    struct btrfs_file_extent_item);
734 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
735 		btrfs_set_file_extent_num_bytes(leaf, fi,
736 						split - key.offset);
737 
738 		fi = btrfs_item_ptr(leaf, path->slots[0],
739 				    struct btrfs_file_extent_item);
740 
741 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
742 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
743 		btrfs_set_file_extent_num_bytes(leaf, fi,
744 						extent_end - split);
745 		btrfs_mark_buffer_dirty(trans, leaf);
746 
747 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
748 				       num_bytes, 0);
749 		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
750 				    orig_offset, 0, false);
751 		ret = btrfs_inc_extent_ref(trans, &ref);
752 		if (ret) {
753 			btrfs_abort_transaction(trans, ret);
754 			goto out;
755 		}
756 
757 		if (split == start) {
758 			key.offset = start;
759 		} else {
760 			if (start != key.offset) {
761 				ret = -EINVAL;
762 				btrfs_abort_transaction(trans, ret);
763 				goto out;
764 			}
765 			path->slots[0]--;
766 			extent_end = end;
767 		}
768 		recow = 1;
769 	}
770 
771 	other_start = end;
772 	other_end = 0;
773 	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
774 			       num_bytes, 0);
775 	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
776 			    0, false);
777 	if (extent_mergeable(leaf, path->slots[0] + 1,
778 			     ino, bytenr, orig_offset,
779 			     &other_start, &other_end)) {
780 		if (recow) {
781 			btrfs_release_path(path);
782 			goto again;
783 		}
784 		extent_end = other_end;
785 		del_slot = path->slots[0] + 1;
786 		del_nr++;
787 		ret = btrfs_free_extent(trans, &ref);
788 		if (ret) {
789 			btrfs_abort_transaction(trans, ret);
790 			goto out;
791 		}
792 	}
793 	other_start = 0;
794 	other_end = start;
795 	if (extent_mergeable(leaf, path->slots[0] - 1,
796 			     ino, bytenr, orig_offset,
797 			     &other_start, &other_end)) {
798 		if (recow) {
799 			btrfs_release_path(path);
800 			goto again;
801 		}
802 		key.offset = other_start;
803 		del_slot = path->slots[0];
804 		del_nr++;
805 		ret = btrfs_free_extent(trans, &ref);
806 		if (ret) {
807 			btrfs_abort_transaction(trans, ret);
808 			goto out;
809 		}
810 	}
811 	if (del_nr == 0) {
812 		fi = btrfs_item_ptr(leaf, path->slots[0],
813 			   struct btrfs_file_extent_item);
814 		btrfs_set_file_extent_type(leaf, fi,
815 					   BTRFS_FILE_EXTENT_REG);
816 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
817 		btrfs_mark_buffer_dirty(trans, leaf);
818 	} else {
819 		fi = btrfs_item_ptr(leaf, del_slot - 1,
820 			   struct btrfs_file_extent_item);
821 		btrfs_set_file_extent_type(leaf, fi,
822 					   BTRFS_FILE_EXTENT_REG);
823 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
824 		btrfs_set_file_extent_num_bytes(leaf, fi,
825 						extent_end - key.offset);
826 		btrfs_mark_buffer_dirty(trans, leaf);
827 
828 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
829 		if (ret < 0) {
830 			btrfs_abort_transaction(trans, ret);
831 			goto out;
832 		}
833 	}
834 out:
835 	btrfs_free_path(path);
836 	return ret;
837 }
838 
839 /*
840  * on error we return an unlocked page and the error value
841  * on success we return a locked page and 0
842  */
843 static int prepare_uptodate_page(struct inode *inode,
844 				 struct page *page, u64 pos,
845 				 bool force_uptodate)
846 {
847 	struct folio *folio = page_folio(page);
848 	int ret = 0;
849 
850 	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
851 	    !PageUptodate(page)) {
852 		ret = btrfs_read_folio(NULL, folio);
853 		if (ret)
854 			return ret;
855 		lock_page(page);
856 		if (!PageUptodate(page)) {
857 			unlock_page(page);
858 			return -EIO;
859 		}
860 
861 		/*
862 		 * Since btrfs_read_folio() will unlock the folio before it
863 		 * returns, there is a window where btrfs_release_folio() can be
864 		 * called to release the page.  Here we check both inode
865 		 * mapping and PagePrivate() to make sure the page was not
866 		 * released.
867 		 *
868 		 * The private flag check is essential for subpage as we need
869 		 * to store extra bitmap using page->private.
870 		 */
871 		if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
872 			unlock_page(page);
873 			return -EAGAIN;
874 		}
875 	}
876 	return 0;
877 }
878 
879 static fgf_t get_prepare_fgp_flags(bool nowait)
880 {
881 	fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
882 
883 	if (nowait)
884 		fgp_flags |= FGP_NOWAIT;
885 
886 	return fgp_flags;
887 }
888 
889 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
890 {
891 	gfp_t gfp;
892 
893 	gfp = btrfs_alloc_write_mask(inode->i_mapping);
894 	if (nowait) {
895 		gfp &= ~__GFP_DIRECT_RECLAIM;
896 		gfp |= GFP_NOWAIT;
897 	}
898 
899 	return gfp;
900 }
901 
902 /*
903  * this just gets pages into the page cache and locks them down.
904  */
905 static noinline int prepare_pages(struct inode *inode, struct page **pages,
906 				  size_t num_pages, loff_t pos,
907 				  size_t write_bytes, bool force_uptodate,
908 				  bool nowait)
909 {
910 	int i;
911 	unsigned long index = pos >> PAGE_SHIFT;
912 	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
913 	fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
914 	int err = 0;
915 	int faili;
916 
917 	for (i = 0; i < num_pages; i++) {
918 again:
919 		pages[i] = pagecache_get_page(inode->i_mapping, index + i,
920 					      fgp_flags, mask | __GFP_WRITE);
921 		if (!pages[i]) {
922 			faili = i - 1;
923 			if (nowait)
924 				err = -EAGAIN;
925 			else
926 				err = -ENOMEM;
927 			goto fail;
928 		}
929 
930 		err = set_page_extent_mapped(pages[i]);
931 		if (err < 0) {
932 			faili = i;
933 			goto fail;
934 		}
935 
936 		if (i == 0)
937 			err = prepare_uptodate_page(inode, pages[i], pos,
938 						    force_uptodate);
939 		if (!err && i == num_pages - 1)
940 			err = prepare_uptodate_page(inode, pages[i],
941 						    pos + write_bytes, false);
942 		if (err) {
943 			put_page(pages[i]);
944 			if (!nowait && err == -EAGAIN) {
945 				err = 0;
946 				goto again;
947 			}
948 			faili = i - 1;
949 			goto fail;
950 		}
951 		wait_on_page_writeback(pages[i]);
952 	}
953 
954 	return 0;
955 fail:
956 	while (faili >= 0) {
957 		unlock_page(pages[faili]);
958 		put_page(pages[faili]);
959 		faili--;
960 	}
961 	return err;
962 
963 }
964 
965 /*
966  * This function locks the extent and properly waits for data=ordered extents
967  * to finish before allowing the pages to be modified if need.
968  *
969  * The return value:
970  * 1 - the extent is locked
971  * 0 - the extent is not locked, and everything is OK
972  * -EAGAIN - need re-prepare the pages
973  * the other < 0 number - Something wrong happens
974  */
975 static noinline int
976 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
977 				size_t num_pages, loff_t pos,
978 				size_t write_bytes,
979 				u64 *lockstart, u64 *lockend, bool nowait,
980 				struct extent_state **cached_state)
981 {
982 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
983 	u64 start_pos;
984 	u64 last_pos;
985 	int i;
986 	int ret = 0;
987 
988 	start_pos = round_down(pos, fs_info->sectorsize);
989 	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
990 
991 	if (start_pos < inode->vfs_inode.i_size) {
992 		struct btrfs_ordered_extent *ordered;
993 
994 		if (nowait) {
995 			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
996 					     cached_state)) {
997 				for (i = 0; i < num_pages; i++) {
998 					unlock_page(pages[i]);
999 					put_page(pages[i]);
1000 					pages[i] = NULL;
1001 				}
1002 
1003 				return -EAGAIN;
1004 			}
1005 		} else {
1006 			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1007 		}
1008 
1009 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1010 						     last_pos - start_pos + 1);
1011 		if (ordered &&
1012 		    ordered->file_offset + ordered->num_bytes > start_pos &&
1013 		    ordered->file_offset <= last_pos) {
1014 			unlock_extent(&inode->io_tree, start_pos, last_pos,
1015 				      cached_state);
1016 			for (i = 0; i < num_pages; i++) {
1017 				unlock_page(pages[i]);
1018 				put_page(pages[i]);
1019 			}
1020 			btrfs_start_ordered_extent(ordered);
1021 			btrfs_put_ordered_extent(ordered);
1022 			return -EAGAIN;
1023 		}
1024 		if (ordered)
1025 			btrfs_put_ordered_extent(ordered);
1026 
1027 		*lockstart = start_pos;
1028 		*lockend = last_pos;
1029 		ret = 1;
1030 	}
1031 
1032 	/*
1033 	 * We should be called after prepare_pages() which should have locked
1034 	 * all pages in the range.
1035 	 */
1036 	for (i = 0; i < num_pages; i++)
1037 		WARN_ON(!PageLocked(pages[i]));
1038 
1039 	return ret;
1040 }
1041 
1042 /*
1043  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1044  *
1045  * @pos:         File offset.
1046  * @write_bytes: The length to write, will be updated to the nocow writeable
1047  *               range.
1048  *
1049  * This function will flush ordered extents in the range to ensure proper
1050  * nocow checks.
1051  *
1052  * Return:
1053  * > 0          If we can nocow, and updates @write_bytes.
1054  *  0           If we can't do a nocow write.
1055  * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
1056  *              root is in progress.
1057  * < 0          If an error happened.
1058  *
1059  * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1060  */
1061 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1062 			   size_t *write_bytes, bool nowait)
1063 {
1064 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1065 	struct btrfs_root *root = inode->root;
1066 	struct extent_state *cached_state = NULL;
1067 	u64 lockstart, lockend;
1068 	u64 num_bytes;
1069 	int ret;
1070 
1071 	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1072 		return 0;
1073 
1074 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1075 		return -EAGAIN;
1076 
1077 	lockstart = round_down(pos, fs_info->sectorsize);
1078 	lockend = round_up(pos + *write_bytes,
1079 			   fs_info->sectorsize) - 1;
1080 	num_bytes = lockend - lockstart + 1;
1081 
1082 	if (nowait) {
1083 		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1084 						  &cached_state)) {
1085 			btrfs_drew_write_unlock(&root->snapshot_lock);
1086 			return -EAGAIN;
1087 		}
1088 	} else {
1089 		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1090 						   &cached_state);
1091 	}
1092 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1093 			NULL, NULL, NULL, nowait, false);
1094 	if (ret <= 0)
1095 		btrfs_drew_write_unlock(&root->snapshot_lock);
1096 	else
1097 		*write_bytes = min_t(size_t, *write_bytes ,
1098 				     num_bytes - pos + lockstart);
1099 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1100 
1101 	return ret;
1102 }
1103 
1104 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1105 {
1106 	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1107 }
1108 
1109 static void update_time_for_write(struct inode *inode)
1110 {
1111 	struct timespec64 now, ctime;
1112 
1113 	if (IS_NOCMTIME(inode))
1114 		return;
1115 
1116 	now = current_time(inode);
1117 	if (!timespec64_equal(&inode->i_mtime, &now))
1118 		inode->i_mtime = now;
1119 
1120 	ctime = inode_get_ctime(inode);
1121 	if (!timespec64_equal(&ctime, &now))
1122 		inode_set_ctime_to_ts(inode, now);
1123 
1124 	if (IS_I_VERSION(inode))
1125 		inode_inc_iversion(inode);
1126 }
1127 
1128 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1129 			     size_t count)
1130 {
1131 	struct file *file = iocb->ki_filp;
1132 	struct inode *inode = file_inode(file);
1133 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1134 	loff_t pos = iocb->ki_pos;
1135 	int ret;
1136 	loff_t oldsize;
1137 	loff_t start_pos;
1138 
1139 	/*
1140 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1141 	 * prealloc flags, as without those flags we always have to COW. We will
1142 	 * later check if we can really COW into the target range (using
1143 	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1144 	 */
1145 	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1146 	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1147 		return -EAGAIN;
1148 
1149 	ret = file_remove_privs(file);
1150 	if (ret)
1151 		return ret;
1152 
1153 	/*
1154 	 * We reserve space for updating the inode when we reserve space for the
1155 	 * extent we are going to write, so we will enospc out there.  We don't
1156 	 * need to start yet another transaction to update the inode as we will
1157 	 * update the inode when we finish writing whatever data we write.
1158 	 */
1159 	update_time_for_write(inode);
1160 
1161 	start_pos = round_down(pos, fs_info->sectorsize);
1162 	oldsize = i_size_read(inode);
1163 	if (start_pos > oldsize) {
1164 		/* Expand hole size to cover write data, preventing empty gap */
1165 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1166 
1167 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1168 		if (ret)
1169 			return ret;
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1176 					       struct iov_iter *i)
1177 {
1178 	struct file *file = iocb->ki_filp;
1179 	loff_t pos;
1180 	struct inode *inode = file_inode(file);
1181 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1182 	struct page **pages = NULL;
1183 	struct extent_changeset *data_reserved = NULL;
1184 	u64 release_bytes = 0;
1185 	u64 lockstart;
1186 	u64 lockend;
1187 	size_t num_written = 0;
1188 	int nrptrs;
1189 	ssize_t ret;
1190 	bool only_release_metadata = false;
1191 	bool force_page_uptodate = false;
1192 	loff_t old_isize = i_size_read(inode);
1193 	unsigned int ilock_flags = 0;
1194 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1195 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1196 
1197 	if (nowait)
1198 		ilock_flags |= BTRFS_ILOCK_TRY;
1199 
1200 	ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1201 	if (ret < 0)
1202 		return ret;
1203 
1204 	ret = generic_write_checks(iocb, i);
1205 	if (ret <= 0)
1206 		goto out;
1207 
1208 	ret = btrfs_write_check(iocb, i, ret);
1209 	if (ret < 0)
1210 		goto out;
1211 
1212 	pos = iocb->ki_pos;
1213 	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1214 			PAGE_SIZE / (sizeof(struct page *)));
1215 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1216 	nrptrs = max(nrptrs, 8);
1217 	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1218 	if (!pages) {
1219 		ret = -ENOMEM;
1220 		goto out;
1221 	}
1222 
1223 	while (iov_iter_count(i) > 0) {
1224 		struct extent_state *cached_state = NULL;
1225 		size_t offset = offset_in_page(pos);
1226 		size_t sector_offset;
1227 		size_t write_bytes = min(iov_iter_count(i),
1228 					 nrptrs * (size_t)PAGE_SIZE -
1229 					 offset);
1230 		size_t num_pages;
1231 		size_t reserve_bytes;
1232 		size_t dirty_pages;
1233 		size_t copied;
1234 		size_t dirty_sectors;
1235 		size_t num_sectors;
1236 		int extents_locked;
1237 
1238 		/*
1239 		 * Fault pages before locking them in prepare_pages
1240 		 * to avoid recursive lock
1241 		 */
1242 		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1243 			ret = -EFAULT;
1244 			break;
1245 		}
1246 
1247 		only_release_metadata = false;
1248 		sector_offset = pos & (fs_info->sectorsize - 1);
1249 
1250 		extent_changeset_release(data_reserved);
1251 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1252 						  &data_reserved, pos,
1253 						  write_bytes, nowait);
1254 		if (ret < 0) {
1255 			int can_nocow;
1256 
1257 			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1258 				ret = -EAGAIN;
1259 				break;
1260 			}
1261 
1262 			/*
1263 			 * If we don't have to COW at the offset, reserve
1264 			 * metadata only. write_bytes may get smaller than
1265 			 * requested here.
1266 			 */
1267 			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1268 							   &write_bytes, nowait);
1269 			if (can_nocow < 0)
1270 				ret = can_nocow;
1271 			if (can_nocow > 0)
1272 				ret = 0;
1273 			if (ret)
1274 				break;
1275 			only_release_metadata = true;
1276 		}
1277 
1278 		num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1279 		WARN_ON(num_pages > nrptrs);
1280 		reserve_bytes = round_up(write_bytes + sector_offset,
1281 					 fs_info->sectorsize);
1282 		WARN_ON(reserve_bytes == 0);
1283 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1284 						      reserve_bytes,
1285 						      reserve_bytes, nowait);
1286 		if (ret) {
1287 			if (!only_release_metadata)
1288 				btrfs_free_reserved_data_space(BTRFS_I(inode),
1289 						data_reserved, pos,
1290 						write_bytes);
1291 			else
1292 				btrfs_check_nocow_unlock(BTRFS_I(inode));
1293 
1294 			if (nowait && ret == -ENOSPC)
1295 				ret = -EAGAIN;
1296 			break;
1297 		}
1298 
1299 		release_bytes = reserve_bytes;
1300 again:
1301 		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1302 		if (ret) {
1303 			btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1304 			break;
1305 		}
1306 
1307 		/*
1308 		 * This is going to setup the pages array with the number of
1309 		 * pages we want, so we don't really need to worry about the
1310 		 * contents of pages from loop to loop
1311 		 */
1312 		ret = prepare_pages(inode, pages, num_pages,
1313 				    pos, write_bytes, force_page_uptodate, false);
1314 		if (ret) {
1315 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1316 						       reserve_bytes);
1317 			break;
1318 		}
1319 
1320 		extents_locked = lock_and_cleanup_extent_if_need(
1321 				BTRFS_I(inode), pages,
1322 				num_pages, pos, write_bytes, &lockstart,
1323 				&lockend, nowait, &cached_state);
1324 		if (extents_locked < 0) {
1325 			if (!nowait && extents_locked == -EAGAIN)
1326 				goto again;
1327 
1328 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1329 						       reserve_bytes);
1330 			ret = extents_locked;
1331 			break;
1332 		}
1333 
1334 		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1335 
1336 		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1337 		dirty_sectors = round_up(copied + sector_offset,
1338 					fs_info->sectorsize);
1339 		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1340 
1341 		/*
1342 		 * if we have trouble faulting in the pages, fall
1343 		 * back to one page at a time
1344 		 */
1345 		if (copied < write_bytes)
1346 			nrptrs = 1;
1347 
1348 		if (copied == 0) {
1349 			force_page_uptodate = true;
1350 			dirty_sectors = 0;
1351 			dirty_pages = 0;
1352 		} else {
1353 			force_page_uptodate = false;
1354 			dirty_pages = DIV_ROUND_UP(copied + offset,
1355 						   PAGE_SIZE);
1356 		}
1357 
1358 		if (num_sectors > dirty_sectors) {
1359 			/* release everything except the sectors we dirtied */
1360 			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1361 			if (only_release_metadata) {
1362 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1363 							release_bytes, true);
1364 			} else {
1365 				u64 __pos;
1366 
1367 				__pos = round_down(pos,
1368 						   fs_info->sectorsize) +
1369 					(dirty_pages << PAGE_SHIFT);
1370 				btrfs_delalloc_release_space(BTRFS_I(inode),
1371 						data_reserved, __pos,
1372 						release_bytes, true);
1373 			}
1374 		}
1375 
1376 		release_bytes = round_up(copied + sector_offset,
1377 					fs_info->sectorsize);
1378 
1379 		ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1380 					dirty_pages, pos, copied,
1381 					&cached_state, only_release_metadata);
1382 
1383 		/*
1384 		 * If we have not locked the extent range, because the range's
1385 		 * start offset is >= i_size, we might still have a non-NULL
1386 		 * cached extent state, acquired while marking the extent range
1387 		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1388 		 * possible cached extent state to avoid a memory leak.
1389 		 */
1390 		if (extents_locked)
1391 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1392 				      lockend, &cached_state);
1393 		else
1394 			free_extent_state(cached_state);
1395 
1396 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1397 		if (ret) {
1398 			btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1399 			break;
1400 		}
1401 
1402 		release_bytes = 0;
1403 		if (only_release_metadata)
1404 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1405 
1406 		btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1407 
1408 		cond_resched();
1409 
1410 		pos += copied;
1411 		num_written += copied;
1412 	}
1413 
1414 	kfree(pages);
1415 
1416 	if (release_bytes) {
1417 		if (only_release_metadata) {
1418 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1419 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1420 					release_bytes, true);
1421 		} else {
1422 			btrfs_delalloc_release_space(BTRFS_I(inode),
1423 					data_reserved,
1424 					round_down(pos, fs_info->sectorsize),
1425 					release_bytes, true);
1426 		}
1427 	}
1428 
1429 	extent_changeset_free(data_reserved);
1430 	if (num_written > 0) {
1431 		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1432 		iocb->ki_pos += num_written;
1433 	}
1434 out:
1435 	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1436 	return num_written ? num_written : ret;
1437 }
1438 
1439 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1440 			       const struct iov_iter *iter, loff_t offset)
1441 {
1442 	const u32 blocksize_mask = fs_info->sectorsize - 1;
1443 
1444 	if (offset & blocksize_mask)
1445 		return -EINVAL;
1446 
1447 	if (iov_iter_alignment(iter) & blocksize_mask)
1448 		return -EINVAL;
1449 
1450 	return 0;
1451 }
1452 
1453 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1454 {
1455 	struct file *file = iocb->ki_filp;
1456 	struct inode *inode = file_inode(file);
1457 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1458 	loff_t pos;
1459 	ssize_t written = 0;
1460 	ssize_t written_buffered;
1461 	size_t prev_left = 0;
1462 	loff_t endbyte;
1463 	ssize_t err;
1464 	unsigned int ilock_flags = 0;
1465 	struct iomap_dio *dio;
1466 
1467 	if (iocb->ki_flags & IOCB_NOWAIT)
1468 		ilock_flags |= BTRFS_ILOCK_TRY;
1469 
1470 	/*
1471 	 * If the write DIO is within EOF, use a shared lock and also only if
1472 	 * security bits will likely not be dropped by file_remove_privs() called
1473 	 * from btrfs_write_check(). Either will need to be rechecked after the
1474 	 * lock was acquired.
1475 	 */
1476 	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
1477 		ilock_flags |= BTRFS_ILOCK_SHARED;
1478 
1479 relock:
1480 	err = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1481 	if (err < 0)
1482 		return err;
1483 
1484 	/* Shared lock cannot be used with security bits set. */
1485 	if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
1486 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1487 		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1488 		goto relock;
1489 	}
1490 
1491 	err = generic_write_checks(iocb, from);
1492 	if (err <= 0) {
1493 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1494 		return err;
1495 	}
1496 
1497 	err = btrfs_write_check(iocb, from, err);
1498 	if (err < 0) {
1499 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1500 		goto out;
1501 	}
1502 
1503 	pos = iocb->ki_pos;
1504 	/*
1505 	 * Re-check since file size may have changed just before taking the
1506 	 * lock or pos may have changed because of O_APPEND in generic_write_check()
1507 	 */
1508 	if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1509 	    pos + iov_iter_count(from) > i_size_read(inode)) {
1510 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1511 		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1512 		goto relock;
1513 	}
1514 
1515 	if (check_direct_IO(fs_info, from, pos)) {
1516 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1517 		goto buffered;
1518 	}
1519 
1520 	/*
1521 	 * The iov_iter can be mapped to the same file range we are writing to.
1522 	 * If that's the case, then we will deadlock in the iomap code, because
1523 	 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1524 	 * an ordered extent, and after that it will fault in the pages that the
1525 	 * iov_iter refers to. During the fault in we end up in the readahead
1526 	 * pages code (starting at btrfs_readahead()), which will lock the range,
1527 	 * find that ordered extent and then wait for it to complete (at
1528 	 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1529 	 * obviously the ordered extent can never complete as we didn't submit
1530 	 * yet the respective bio(s). This always happens when the buffer is
1531 	 * memory mapped to the same file range, since the iomap DIO code always
1532 	 * invalidates pages in the target file range (after starting and waiting
1533 	 * for any writeback).
1534 	 *
1535 	 * So here we disable page faults in the iov_iter and then retry if we
1536 	 * got -EFAULT, faulting in the pages before the retry.
1537 	 */
1538 again:
1539 	from->nofault = true;
1540 	dio = btrfs_dio_write(iocb, from, written);
1541 	from->nofault = false;
1542 
1543 	if (IS_ERR_OR_NULL(dio)) {
1544 		err = PTR_ERR_OR_ZERO(dio);
1545 	} else {
1546 		struct btrfs_file_private stack_private = { 0 };
1547 		struct btrfs_file_private *private;
1548 		const bool have_private = (file->private_data != NULL);
1549 
1550 		if (!have_private)
1551 			file->private_data = &stack_private;
1552 
1553 		/*
1554 		 * If we have a synchoronous write, we must make sure the fsync
1555 		 * triggered by the iomap_dio_complete() call below doesn't
1556 		 * deadlock on the inode lock - we are already holding it and we
1557 		 * can't call it after unlocking because we may need to complete
1558 		 * partial writes due to the input buffer (or parts of it) not
1559 		 * being already faulted in.
1560 		 */
1561 		private = file->private_data;
1562 		private->fsync_skip_inode_lock = true;
1563 		err = iomap_dio_complete(dio);
1564 		private->fsync_skip_inode_lock = false;
1565 
1566 		if (!have_private)
1567 			file->private_data = NULL;
1568 	}
1569 
1570 	/* No increment (+=) because iomap returns a cumulative value. */
1571 	if (err > 0)
1572 		written = err;
1573 
1574 	if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1575 		const size_t left = iov_iter_count(from);
1576 		/*
1577 		 * We have more data left to write. Try to fault in as many as
1578 		 * possible of the remainder pages and retry. We do this without
1579 		 * releasing and locking again the inode, to prevent races with
1580 		 * truncate.
1581 		 *
1582 		 * Also, in case the iov refers to pages in the file range of the
1583 		 * file we want to write to (due to a mmap), we could enter an
1584 		 * infinite loop if we retry after faulting the pages in, since
1585 		 * iomap will invalidate any pages in the range early on, before
1586 		 * it tries to fault in the pages of the iov. So we keep track of
1587 		 * how much was left of iov in the previous EFAULT and fallback
1588 		 * to buffered IO in case we haven't made any progress.
1589 		 */
1590 		if (left == prev_left) {
1591 			err = -ENOTBLK;
1592 		} else {
1593 			fault_in_iov_iter_readable(from, left);
1594 			prev_left = left;
1595 			goto again;
1596 		}
1597 	}
1598 
1599 	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1600 
1601 	/*
1602 	 * If 'err' is -ENOTBLK or we have not written all data, then it means
1603 	 * we must fallback to buffered IO.
1604 	 */
1605 	if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1606 		goto out;
1607 
1608 buffered:
1609 	/*
1610 	 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1611 	 * it must retry the operation in a context where blocking is acceptable,
1612 	 * because even if we end up not blocking during the buffered IO attempt
1613 	 * below, we will block when flushing and waiting for the IO.
1614 	 */
1615 	if (iocb->ki_flags & IOCB_NOWAIT) {
1616 		err = -EAGAIN;
1617 		goto out;
1618 	}
1619 
1620 	pos = iocb->ki_pos;
1621 	written_buffered = btrfs_buffered_write(iocb, from);
1622 	if (written_buffered < 0) {
1623 		err = written_buffered;
1624 		goto out;
1625 	}
1626 	/*
1627 	 * Ensure all data is persisted. We want the next direct IO read to be
1628 	 * able to read what was just written.
1629 	 */
1630 	endbyte = pos + written_buffered - 1;
1631 	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1632 	if (err)
1633 		goto out;
1634 	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1635 	if (err)
1636 		goto out;
1637 	written += written_buffered;
1638 	iocb->ki_pos = pos + written_buffered;
1639 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1640 				 endbyte >> PAGE_SHIFT);
1641 out:
1642 	return err < 0 ? err : written;
1643 }
1644 
1645 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1646 			const struct btrfs_ioctl_encoded_io_args *encoded)
1647 {
1648 	struct file *file = iocb->ki_filp;
1649 	struct inode *inode = file_inode(file);
1650 	loff_t count;
1651 	ssize_t ret;
1652 
1653 	btrfs_inode_lock(BTRFS_I(inode), 0);
1654 	count = encoded->len;
1655 	ret = generic_write_checks_count(iocb, &count);
1656 	if (ret == 0 && count != encoded->len) {
1657 		/*
1658 		 * The write got truncated by generic_write_checks_count(). We
1659 		 * can't do a partial encoded write.
1660 		 */
1661 		ret = -EFBIG;
1662 	}
1663 	if (ret || encoded->len == 0)
1664 		goto out;
1665 
1666 	ret = btrfs_write_check(iocb, from, encoded->len);
1667 	if (ret < 0)
1668 		goto out;
1669 
1670 	ret = btrfs_do_encoded_write(iocb, from, encoded);
1671 out:
1672 	btrfs_inode_unlock(BTRFS_I(inode), 0);
1673 	return ret;
1674 }
1675 
1676 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1677 			    const struct btrfs_ioctl_encoded_io_args *encoded)
1678 {
1679 	struct file *file = iocb->ki_filp;
1680 	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1681 	ssize_t num_written, num_sync;
1682 
1683 	/*
1684 	 * If the fs flips readonly due to some impossible error, although we
1685 	 * have opened a file as writable, we have to stop this write operation
1686 	 * to ensure consistency.
1687 	 */
1688 	if (BTRFS_FS_ERROR(inode->root->fs_info))
1689 		return -EROFS;
1690 
1691 	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1692 		return -EOPNOTSUPP;
1693 
1694 	if (encoded) {
1695 		num_written = btrfs_encoded_write(iocb, from, encoded);
1696 		num_sync = encoded->len;
1697 	} else if (iocb->ki_flags & IOCB_DIRECT) {
1698 		num_written = btrfs_direct_write(iocb, from);
1699 		num_sync = num_written;
1700 	} else {
1701 		num_written = btrfs_buffered_write(iocb, from);
1702 		num_sync = num_written;
1703 	}
1704 
1705 	btrfs_set_inode_last_sub_trans(inode);
1706 
1707 	if (num_sync > 0) {
1708 		num_sync = generic_write_sync(iocb, num_sync);
1709 		if (num_sync < 0)
1710 			num_written = num_sync;
1711 	}
1712 
1713 	return num_written;
1714 }
1715 
1716 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1717 {
1718 	return btrfs_do_write_iter(iocb, from, NULL);
1719 }
1720 
1721 int btrfs_release_file(struct inode *inode, struct file *filp)
1722 {
1723 	struct btrfs_file_private *private = filp->private_data;
1724 
1725 	if (private) {
1726 		kfree(private->filldir_buf);
1727 		free_extent_state(private->llseek_cached_state);
1728 		kfree(private);
1729 		filp->private_data = NULL;
1730 	}
1731 
1732 	/*
1733 	 * Set by setattr when we are about to truncate a file from a non-zero
1734 	 * size to a zero size.  This tries to flush down new bytes that may
1735 	 * have been written if the application were using truncate to replace
1736 	 * a file in place.
1737 	 */
1738 	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1739 			       &BTRFS_I(inode)->runtime_flags))
1740 			filemap_flush(inode->i_mapping);
1741 	return 0;
1742 }
1743 
1744 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1745 {
1746 	int ret;
1747 	struct blk_plug plug;
1748 
1749 	/*
1750 	 * This is only called in fsync, which would do synchronous writes, so
1751 	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
1752 	 * multiple disks using raid profile, a large IO can be split to
1753 	 * several segments of stripe length (currently 64K).
1754 	 */
1755 	blk_start_plug(&plug);
1756 	ret = btrfs_fdatawrite_range(inode, start, end);
1757 	blk_finish_plug(&plug);
1758 
1759 	return ret;
1760 }
1761 
1762 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1763 {
1764 	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
1765 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1766 
1767 	if (btrfs_inode_in_log(inode, fs_info->generation) &&
1768 	    list_empty(&ctx->ordered_extents))
1769 		return true;
1770 
1771 	/*
1772 	 * If we are doing a fast fsync we can not bail out if the inode's
1773 	 * last_trans is <= then the last committed transaction, because we only
1774 	 * update the last_trans of the inode during ordered extent completion,
1775 	 * and for a fast fsync we don't wait for that, we only wait for the
1776 	 * writeback to complete.
1777 	 */
1778 	if (inode->last_trans <= fs_info->last_trans_committed &&
1779 	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1780 	     list_empty(&ctx->ordered_extents)))
1781 		return true;
1782 
1783 	return false;
1784 }
1785 
1786 /*
1787  * fsync call for both files and directories.  This logs the inode into
1788  * the tree log instead of forcing full commits whenever possible.
1789  *
1790  * It needs to call filemap_fdatawait so that all ordered extent updates are
1791  * in the metadata btree are up to date for copying to the log.
1792  *
1793  * It drops the inode mutex before doing the tree log commit.  This is an
1794  * important optimization for directories because holding the mutex prevents
1795  * new operations on the dir while we write to disk.
1796  */
1797 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1798 {
1799 	struct btrfs_file_private *private = file->private_data;
1800 	struct dentry *dentry = file_dentry(file);
1801 	struct inode *inode = d_inode(dentry);
1802 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1803 	struct btrfs_root *root = BTRFS_I(inode)->root;
1804 	struct btrfs_trans_handle *trans;
1805 	struct btrfs_log_ctx ctx;
1806 	int ret = 0, err;
1807 	u64 len;
1808 	bool full_sync;
1809 	const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
1810 
1811 	trace_btrfs_sync_file(file, datasync);
1812 
1813 	btrfs_init_log_ctx(&ctx, inode);
1814 
1815 	/*
1816 	 * Always set the range to a full range, otherwise we can get into
1817 	 * several problems, from missing file extent items to represent holes
1818 	 * when not using the NO_HOLES feature, to log tree corruption due to
1819 	 * races between hole detection during logging and completion of ordered
1820 	 * extents outside the range, to missing checksums due to ordered extents
1821 	 * for which we flushed only a subset of their pages.
1822 	 */
1823 	start = 0;
1824 	end = LLONG_MAX;
1825 	len = (u64)LLONG_MAX + 1;
1826 
1827 	/*
1828 	 * We write the dirty pages in the range and wait until they complete
1829 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1830 	 * multi-task, and make the performance up.  See
1831 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1832 	 */
1833 	ret = start_ordered_ops(inode, start, end);
1834 	if (ret)
1835 		goto out;
1836 
1837 	if (skip_ilock)
1838 		down_write(&BTRFS_I(inode)->i_mmap_lock);
1839 	else
1840 		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1841 
1842 	atomic_inc(&root->log_batch);
1843 
1844 	/*
1845 	 * Before we acquired the inode's lock and the mmap lock, someone may
1846 	 * have dirtied more pages in the target range. We need to make sure
1847 	 * that writeback for any such pages does not start while we are logging
1848 	 * the inode, because if it does, any of the following might happen when
1849 	 * we are not doing a full inode sync:
1850 	 *
1851 	 * 1) We log an extent after its writeback finishes but before its
1852 	 *    checksums are added to the csum tree, leading to -EIO errors
1853 	 *    when attempting to read the extent after a log replay.
1854 	 *
1855 	 * 2) We can end up logging an extent before its writeback finishes.
1856 	 *    Therefore after the log replay we will have a file extent item
1857 	 *    pointing to an unwritten extent (and no data checksums as well).
1858 	 *
1859 	 * So trigger writeback for any eventual new dirty pages and then we
1860 	 * wait for all ordered extents to complete below.
1861 	 */
1862 	ret = start_ordered_ops(inode, start, end);
1863 	if (ret) {
1864 		if (skip_ilock)
1865 			up_write(&BTRFS_I(inode)->i_mmap_lock);
1866 		else
1867 			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1868 		goto out;
1869 	}
1870 
1871 	/*
1872 	 * Always check for the full sync flag while holding the inode's lock,
1873 	 * to avoid races with other tasks. The flag must be either set all the
1874 	 * time during logging or always off all the time while logging.
1875 	 * We check the flag here after starting delalloc above, because when
1876 	 * running delalloc the full sync flag may be set if we need to drop
1877 	 * extra extent map ranges due to temporary memory allocation failures.
1878 	 */
1879 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1880 			     &BTRFS_I(inode)->runtime_flags);
1881 
1882 	/*
1883 	 * We have to do this here to avoid the priority inversion of waiting on
1884 	 * IO of a lower priority task while holding a transaction open.
1885 	 *
1886 	 * For a full fsync we wait for the ordered extents to complete while
1887 	 * for a fast fsync we wait just for writeback to complete, and then
1888 	 * attach the ordered extents to the transaction so that a transaction
1889 	 * commit waits for their completion, to avoid data loss if we fsync,
1890 	 * the current transaction commits before the ordered extents complete
1891 	 * and a power failure happens right after that.
1892 	 *
1893 	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1894 	 * logical address recorded in the ordered extent may change. We need
1895 	 * to wait for the IO to stabilize the logical address.
1896 	 */
1897 	if (full_sync || btrfs_is_zoned(fs_info)) {
1898 		ret = btrfs_wait_ordered_range(inode, start, len);
1899 	} else {
1900 		/*
1901 		 * Get our ordered extents as soon as possible to avoid doing
1902 		 * checksum lookups in the csum tree, and use instead the
1903 		 * checksums attached to the ordered extents.
1904 		 */
1905 		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
1906 						      &ctx.ordered_extents);
1907 		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
1908 	}
1909 
1910 	if (ret)
1911 		goto out_release_extents;
1912 
1913 	atomic_inc(&root->log_batch);
1914 
1915 	smp_mb();
1916 	if (skip_inode_logging(&ctx)) {
1917 		/*
1918 		 * We've had everything committed since the last time we were
1919 		 * modified so clear this flag in case it was set for whatever
1920 		 * reason, it's no longer relevant.
1921 		 */
1922 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1923 			  &BTRFS_I(inode)->runtime_flags);
1924 		/*
1925 		 * An ordered extent might have started before and completed
1926 		 * already with io errors, in which case the inode was not
1927 		 * updated and we end up here. So check the inode's mapping
1928 		 * for any errors that might have happened since we last
1929 		 * checked called fsync.
1930 		 */
1931 		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
1932 		goto out_release_extents;
1933 	}
1934 
1935 	/*
1936 	 * We use start here because we will need to wait on the IO to complete
1937 	 * in btrfs_sync_log, which could require joining a transaction (for
1938 	 * example checking cross references in the nocow path).  If we use join
1939 	 * here we could get into a situation where we're waiting on IO to
1940 	 * happen that is blocked on a transaction trying to commit.  With start
1941 	 * we inc the extwriter counter, so we wait for all extwriters to exit
1942 	 * before we start blocking joiners.  This comment is to keep somebody
1943 	 * from thinking they are super smart and changing this to
1944 	 * btrfs_join_transaction *cough*Josef*cough*.
1945 	 */
1946 	trans = btrfs_start_transaction(root, 0);
1947 	if (IS_ERR(trans)) {
1948 		ret = PTR_ERR(trans);
1949 		goto out_release_extents;
1950 	}
1951 	trans->in_fsync = true;
1952 
1953 	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1954 	btrfs_release_log_ctx_extents(&ctx);
1955 	if (ret < 0) {
1956 		/* Fallthrough and commit/free transaction. */
1957 		ret = BTRFS_LOG_FORCE_COMMIT;
1958 	}
1959 
1960 	/* we've logged all the items and now have a consistent
1961 	 * version of the file in the log.  It is possible that
1962 	 * someone will come in and modify the file, but that's
1963 	 * fine because the log is consistent on disk, and we
1964 	 * have references to all of the file's extents
1965 	 *
1966 	 * It is possible that someone will come in and log the
1967 	 * file again, but that will end up using the synchronization
1968 	 * inside btrfs_sync_log to keep things safe.
1969 	 */
1970 	if (skip_ilock)
1971 		up_write(&BTRFS_I(inode)->i_mmap_lock);
1972 	else
1973 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1974 
1975 	if (ret == BTRFS_NO_LOG_SYNC) {
1976 		ret = btrfs_end_transaction(trans);
1977 		goto out;
1978 	}
1979 
1980 	/* We successfully logged the inode, attempt to sync the log. */
1981 	if (!ret) {
1982 		ret = btrfs_sync_log(trans, root, &ctx);
1983 		if (!ret) {
1984 			ret = btrfs_end_transaction(trans);
1985 			goto out;
1986 		}
1987 	}
1988 
1989 	/*
1990 	 * At this point we need to commit the transaction because we had
1991 	 * btrfs_need_log_full_commit() or some other error.
1992 	 *
1993 	 * If we didn't do a full sync we have to stop the trans handle, wait on
1994 	 * the ordered extents, start it again and commit the transaction.  If
1995 	 * we attempt to wait on the ordered extents here we could deadlock with
1996 	 * something like fallocate() that is holding the extent lock trying to
1997 	 * start a transaction while some other thread is trying to commit the
1998 	 * transaction while we (fsync) are currently holding the transaction
1999 	 * open.
2000 	 */
2001 	if (!full_sync) {
2002 		ret = btrfs_end_transaction(trans);
2003 		if (ret)
2004 			goto out;
2005 		ret = btrfs_wait_ordered_range(inode, start, len);
2006 		if (ret)
2007 			goto out;
2008 
2009 		/*
2010 		 * This is safe to use here because we're only interested in
2011 		 * making sure the transaction that had the ordered extents is
2012 		 * committed.  We aren't waiting on anything past this point,
2013 		 * we're purely getting the transaction and committing it.
2014 		 */
2015 		trans = btrfs_attach_transaction_barrier(root);
2016 		if (IS_ERR(trans)) {
2017 			ret = PTR_ERR(trans);
2018 
2019 			/*
2020 			 * We committed the transaction and there's no currently
2021 			 * running transaction, this means everything we care
2022 			 * about made it to disk and we are done.
2023 			 */
2024 			if (ret == -ENOENT)
2025 				ret = 0;
2026 			goto out;
2027 		}
2028 	}
2029 
2030 	ret = btrfs_commit_transaction(trans);
2031 out:
2032 	ASSERT(list_empty(&ctx.list));
2033 	ASSERT(list_empty(&ctx.conflict_inodes));
2034 	err = file_check_and_advance_wb_err(file);
2035 	if (!ret)
2036 		ret = err;
2037 	return ret > 0 ? -EIO : ret;
2038 
2039 out_release_extents:
2040 	btrfs_release_log_ctx_extents(&ctx);
2041 	if (skip_ilock)
2042 		up_write(&BTRFS_I(inode)->i_mmap_lock);
2043 	else
2044 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2045 	goto out;
2046 }
2047 
2048 static const struct vm_operations_struct btrfs_file_vm_ops = {
2049 	.fault		= filemap_fault,
2050 	.map_pages	= filemap_map_pages,
2051 	.page_mkwrite	= btrfs_page_mkwrite,
2052 };
2053 
2054 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2055 {
2056 	struct address_space *mapping = filp->f_mapping;
2057 
2058 	if (!mapping->a_ops->read_folio)
2059 		return -ENOEXEC;
2060 
2061 	file_accessed(filp);
2062 	vma->vm_ops = &btrfs_file_vm_ops;
2063 
2064 	return 0;
2065 }
2066 
2067 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2068 			  int slot, u64 start, u64 end)
2069 {
2070 	struct btrfs_file_extent_item *fi;
2071 	struct btrfs_key key;
2072 
2073 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2074 		return 0;
2075 
2076 	btrfs_item_key_to_cpu(leaf, &key, slot);
2077 	if (key.objectid != btrfs_ino(inode) ||
2078 	    key.type != BTRFS_EXTENT_DATA_KEY)
2079 		return 0;
2080 
2081 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2082 
2083 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2084 		return 0;
2085 
2086 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2087 		return 0;
2088 
2089 	if (key.offset == end)
2090 		return 1;
2091 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2092 		return 1;
2093 	return 0;
2094 }
2095 
2096 static int fill_holes(struct btrfs_trans_handle *trans,
2097 		struct btrfs_inode *inode,
2098 		struct btrfs_path *path, u64 offset, u64 end)
2099 {
2100 	struct btrfs_fs_info *fs_info = trans->fs_info;
2101 	struct btrfs_root *root = inode->root;
2102 	struct extent_buffer *leaf;
2103 	struct btrfs_file_extent_item *fi;
2104 	struct extent_map *hole_em;
2105 	struct btrfs_key key;
2106 	int ret;
2107 
2108 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2109 		goto out;
2110 
2111 	key.objectid = btrfs_ino(inode);
2112 	key.type = BTRFS_EXTENT_DATA_KEY;
2113 	key.offset = offset;
2114 
2115 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2116 	if (ret <= 0) {
2117 		/*
2118 		 * We should have dropped this offset, so if we find it then
2119 		 * something has gone horribly wrong.
2120 		 */
2121 		if (ret == 0)
2122 			ret = -EINVAL;
2123 		return ret;
2124 	}
2125 
2126 	leaf = path->nodes[0];
2127 	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2128 		u64 num_bytes;
2129 
2130 		path->slots[0]--;
2131 		fi = btrfs_item_ptr(leaf, path->slots[0],
2132 				    struct btrfs_file_extent_item);
2133 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2134 			end - offset;
2135 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2136 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2137 		btrfs_set_file_extent_offset(leaf, fi, 0);
2138 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2139 		btrfs_mark_buffer_dirty(trans, leaf);
2140 		goto out;
2141 	}
2142 
2143 	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2144 		u64 num_bytes;
2145 
2146 		key.offset = offset;
2147 		btrfs_set_item_key_safe(trans, path, &key);
2148 		fi = btrfs_item_ptr(leaf, path->slots[0],
2149 				    struct btrfs_file_extent_item);
2150 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2151 			offset;
2152 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2153 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2154 		btrfs_set_file_extent_offset(leaf, fi, 0);
2155 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2156 		btrfs_mark_buffer_dirty(trans, leaf);
2157 		goto out;
2158 	}
2159 	btrfs_release_path(path);
2160 
2161 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2162 				       end - offset);
2163 	if (ret)
2164 		return ret;
2165 
2166 out:
2167 	btrfs_release_path(path);
2168 
2169 	hole_em = alloc_extent_map();
2170 	if (!hole_em) {
2171 		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2172 		btrfs_set_inode_full_sync(inode);
2173 	} else {
2174 		hole_em->start = offset;
2175 		hole_em->len = end - offset;
2176 		hole_em->ram_bytes = hole_em->len;
2177 		hole_em->orig_start = offset;
2178 
2179 		hole_em->block_start = EXTENT_MAP_HOLE;
2180 		hole_em->block_len = 0;
2181 		hole_em->orig_block_len = 0;
2182 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2183 		hole_em->generation = trans->transid;
2184 
2185 		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2186 		free_extent_map(hole_em);
2187 		if (ret)
2188 			btrfs_set_inode_full_sync(inode);
2189 	}
2190 
2191 	return 0;
2192 }
2193 
2194 /*
2195  * Find a hole extent on given inode and change start/len to the end of hole
2196  * extent.(hole/vacuum extent whose em->start <= start &&
2197  *	   em->start + em->len > start)
2198  * When a hole extent is found, return 1 and modify start/len.
2199  */
2200 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2201 {
2202 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2203 	struct extent_map *em;
2204 	int ret = 0;
2205 
2206 	em = btrfs_get_extent(inode, NULL, 0,
2207 			      round_down(*start, fs_info->sectorsize),
2208 			      round_up(*len, fs_info->sectorsize));
2209 	if (IS_ERR(em))
2210 		return PTR_ERR(em);
2211 
2212 	/* Hole or vacuum extent(only exists in no-hole mode) */
2213 	if (em->block_start == EXTENT_MAP_HOLE) {
2214 		ret = 1;
2215 		*len = em->start + em->len > *start + *len ?
2216 		       0 : *start + *len - em->start - em->len;
2217 		*start = em->start + em->len;
2218 	}
2219 	free_extent_map(em);
2220 	return ret;
2221 }
2222 
2223 static void btrfs_punch_hole_lock_range(struct inode *inode,
2224 					const u64 lockstart,
2225 					const u64 lockend,
2226 					struct extent_state **cached_state)
2227 {
2228 	/*
2229 	 * For subpage case, if the range is not at page boundary, we could
2230 	 * have pages at the leading/tailing part of the range.
2231 	 * This could lead to dead loop since filemap_range_has_page()
2232 	 * will always return true.
2233 	 * So here we need to do extra page alignment for
2234 	 * filemap_range_has_page().
2235 	 */
2236 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2237 	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2238 
2239 	while (1) {
2240 		truncate_pagecache_range(inode, lockstart, lockend);
2241 
2242 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2243 			    cached_state);
2244 		/*
2245 		 * We can't have ordered extents in the range, nor dirty/writeback
2246 		 * pages, because we have locked the inode's VFS lock in exclusive
2247 		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2248 		 * we have flushed all delalloc in the range and we have waited
2249 		 * for any ordered extents in the range to complete.
2250 		 * We can race with anyone reading pages from this range, so after
2251 		 * locking the range check if we have pages in the range, and if
2252 		 * we do, unlock the range and retry.
2253 		 */
2254 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2255 					    page_lockend))
2256 			break;
2257 
2258 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2259 			      cached_state);
2260 	}
2261 
2262 	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2263 }
2264 
2265 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2266 				     struct btrfs_inode *inode,
2267 				     struct btrfs_path *path,
2268 				     struct btrfs_replace_extent_info *extent_info,
2269 				     const u64 replace_len,
2270 				     const u64 bytes_to_drop)
2271 {
2272 	struct btrfs_fs_info *fs_info = trans->fs_info;
2273 	struct btrfs_root *root = inode->root;
2274 	struct btrfs_file_extent_item *extent;
2275 	struct extent_buffer *leaf;
2276 	struct btrfs_key key;
2277 	int slot;
2278 	struct btrfs_ref ref = { 0 };
2279 	int ret;
2280 
2281 	if (replace_len == 0)
2282 		return 0;
2283 
2284 	if (extent_info->disk_offset == 0 &&
2285 	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2286 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2287 		return 0;
2288 	}
2289 
2290 	key.objectid = btrfs_ino(inode);
2291 	key.type = BTRFS_EXTENT_DATA_KEY;
2292 	key.offset = extent_info->file_offset;
2293 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2294 				      sizeof(struct btrfs_file_extent_item));
2295 	if (ret)
2296 		return ret;
2297 	leaf = path->nodes[0];
2298 	slot = path->slots[0];
2299 	write_extent_buffer(leaf, extent_info->extent_buf,
2300 			    btrfs_item_ptr_offset(leaf, slot),
2301 			    sizeof(struct btrfs_file_extent_item));
2302 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2303 	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2304 	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2305 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2306 	if (extent_info->is_new_extent)
2307 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2308 	btrfs_mark_buffer_dirty(trans, leaf);
2309 	btrfs_release_path(path);
2310 
2311 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2312 						replace_len);
2313 	if (ret)
2314 		return ret;
2315 
2316 	/* If it's a hole, nothing more needs to be done. */
2317 	if (extent_info->disk_offset == 0) {
2318 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2319 		return 0;
2320 	}
2321 
2322 	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2323 
2324 	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2325 		key.objectid = extent_info->disk_offset;
2326 		key.type = BTRFS_EXTENT_ITEM_KEY;
2327 		key.offset = extent_info->disk_len;
2328 		ret = btrfs_alloc_reserved_file_extent(trans, root,
2329 						       btrfs_ino(inode),
2330 						       extent_info->file_offset,
2331 						       extent_info->qgroup_reserved,
2332 						       &key);
2333 	} else {
2334 		u64 ref_offset;
2335 
2336 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2337 				       extent_info->disk_offset,
2338 				       extent_info->disk_len, 0);
2339 		ref_offset = extent_info->file_offset - extent_info->data_offset;
2340 		btrfs_init_data_ref(&ref, root->root_key.objectid,
2341 				    btrfs_ino(inode), ref_offset, 0, false);
2342 		ret = btrfs_inc_extent_ref(trans, &ref);
2343 	}
2344 
2345 	extent_info->insertions++;
2346 
2347 	return ret;
2348 }
2349 
2350 /*
2351  * The respective range must have been previously locked, as well as the inode.
2352  * The end offset is inclusive (last byte of the range).
2353  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2354  * the file range with an extent.
2355  * When not punching a hole, we don't want to end up in a state where we dropped
2356  * extents without inserting a new one, so we must abort the transaction to avoid
2357  * a corruption.
2358  */
2359 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2360 			       struct btrfs_path *path, const u64 start,
2361 			       const u64 end,
2362 			       struct btrfs_replace_extent_info *extent_info,
2363 			       struct btrfs_trans_handle **trans_out)
2364 {
2365 	struct btrfs_drop_extents_args drop_args = { 0 };
2366 	struct btrfs_root *root = inode->root;
2367 	struct btrfs_fs_info *fs_info = root->fs_info;
2368 	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2369 	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2370 	struct btrfs_trans_handle *trans = NULL;
2371 	struct btrfs_block_rsv *rsv;
2372 	unsigned int rsv_count;
2373 	u64 cur_offset;
2374 	u64 len = end - start;
2375 	int ret = 0;
2376 
2377 	if (end <= start)
2378 		return -EINVAL;
2379 
2380 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2381 	if (!rsv) {
2382 		ret = -ENOMEM;
2383 		goto out;
2384 	}
2385 	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2386 	rsv->failfast = true;
2387 
2388 	/*
2389 	 * 1 - update the inode
2390 	 * 1 - removing the extents in the range
2391 	 * 1 - adding the hole extent if no_holes isn't set or if we are
2392 	 *     replacing the range with a new extent
2393 	 */
2394 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2395 		rsv_count = 3;
2396 	else
2397 		rsv_count = 2;
2398 
2399 	trans = btrfs_start_transaction(root, rsv_count);
2400 	if (IS_ERR(trans)) {
2401 		ret = PTR_ERR(trans);
2402 		trans = NULL;
2403 		goto out_free;
2404 	}
2405 
2406 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2407 				      min_size, false);
2408 	if (WARN_ON(ret))
2409 		goto out_trans;
2410 	trans->block_rsv = rsv;
2411 
2412 	cur_offset = start;
2413 	drop_args.path = path;
2414 	drop_args.end = end + 1;
2415 	drop_args.drop_cache = true;
2416 	while (cur_offset < end) {
2417 		drop_args.start = cur_offset;
2418 		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2419 		/* If we are punching a hole decrement the inode's byte count */
2420 		if (!extent_info)
2421 			btrfs_update_inode_bytes(inode, 0,
2422 						 drop_args.bytes_found);
2423 		if (ret != -ENOSPC) {
2424 			/*
2425 			 * The only time we don't want to abort is if we are
2426 			 * attempting to clone a partial inline extent, in which
2427 			 * case we'll get EOPNOTSUPP.  However if we aren't
2428 			 * clone we need to abort no matter what, because if we
2429 			 * got EOPNOTSUPP via prealloc then we messed up and
2430 			 * need to abort.
2431 			 */
2432 			if (ret &&
2433 			    (ret != -EOPNOTSUPP ||
2434 			     (extent_info && extent_info->is_new_extent)))
2435 				btrfs_abort_transaction(trans, ret);
2436 			break;
2437 		}
2438 
2439 		trans->block_rsv = &fs_info->trans_block_rsv;
2440 
2441 		if (!extent_info && cur_offset < drop_args.drop_end &&
2442 		    cur_offset < ino_size) {
2443 			ret = fill_holes(trans, inode, path, cur_offset,
2444 					 drop_args.drop_end);
2445 			if (ret) {
2446 				/*
2447 				 * If we failed then we didn't insert our hole
2448 				 * entries for the area we dropped, so now the
2449 				 * fs is corrupted, so we must abort the
2450 				 * transaction.
2451 				 */
2452 				btrfs_abort_transaction(trans, ret);
2453 				break;
2454 			}
2455 		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2456 			/*
2457 			 * We are past the i_size here, but since we didn't
2458 			 * insert holes we need to clear the mapped area so we
2459 			 * know to not set disk_i_size in this area until a new
2460 			 * file extent is inserted here.
2461 			 */
2462 			ret = btrfs_inode_clear_file_extent_range(inode,
2463 					cur_offset,
2464 					drop_args.drop_end - cur_offset);
2465 			if (ret) {
2466 				/*
2467 				 * We couldn't clear our area, so we could
2468 				 * presumably adjust up and corrupt the fs, so
2469 				 * we need to abort.
2470 				 */
2471 				btrfs_abort_transaction(trans, ret);
2472 				break;
2473 			}
2474 		}
2475 
2476 		if (extent_info &&
2477 		    drop_args.drop_end > extent_info->file_offset) {
2478 			u64 replace_len = drop_args.drop_end -
2479 					  extent_info->file_offset;
2480 
2481 			ret = btrfs_insert_replace_extent(trans, inode,	path,
2482 					extent_info, replace_len,
2483 					drop_args.bytes_found);
2484 			if (ret) {
2485 				btrfs_abort_transaction(trans, ret);
2486 				break;
2487 			}
2488 			extent_info->data_len -= replace_len;
2489 			extent_info->data_offset += replace_len;
2490 			extent_info->file_offset += replace_len;
2491 		}
2492 
2493 		/*
2494 		 * We are releasing our handle on the transaction, balance the
2495 		 * dirty pages of the btree inode and flush delayed items, and
2496 		 * then get a new transaction handle, which may now point to a
2497 		 * new transaction in case someone else may have committed the
2498 		 * transaction we used to replace/drop file extent items. So
2499 		 * bump the inode's iversion and update mtime and ctime except
2500 		 * if we are called from a dedupe context. This is because a
2501 		 * power failure/crash may happen after the transaction is
2502 		 * committed and before we finish replacing/dropping all the
2503 		 * file extent items we need.
2504 		 */
2505 		inode_inc_iversion(&inode->vfs_inode);
2506 
2507 		if (!extent_info || extent_info->update_times)
2508 			inode->vfs_inode.i_mtime = inode_set_ctime_current(&inode->vfs_inode);
2509 
2510 		ret = btrfs_update_inode(trans, root, inode);
2511 		if (ret)
2512 			break;
2513 
2514 		btrfs_end_transaction(trans);
2515 		btrfs_btree_balance_dirty(fs_info);
2516 
2517 		trans = btrfs_start_transaction(root, rsv_count);
2518 		if (IS_ERR(trans)) {
2519 			ret = PTR_ERR(trans);
2520 			trans = NULL;
2521 			break;
2522 		}
2523 
2524 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2525 					      rsv, min_size, false);
2526 		if (WARN_ON(ret))
2527 			break;
2528 		trans->block_rsv = rsv;
2529 
2530 		cur_offset = drop_args.drop_end;
2531 		len = end - cur_offset;
2532 		if (!extent_info && len) {
2533 			ret = find_first_non_hole(inode, &cur_offset, &len);
2534 			if (unlikely(ret < 0))
2535 				break;
2536 			if (ret && !len) {
2537 				ret = 0;
2538 				break;
2539 			}
2540 		}
2541 	}
2542 
2543 	/*
2544 	 * If we were cloning, force the next fsync to be a full one since we
2545 	 * we replaced (or just dropped in the case of cloning holes when
2546 	 * NO_HOLES is enabled) file extent items and did not setup new extent
2547 	 * maps for the replacement extents (or holes).
2548 	 */
2549 	if (extent_info && !extent_info->is_new_extent)
2550 		btrfs_set_inode_full_sync(inode);
2551 
2552 	if (ret)
2553 		goto out_trans;
2554 
2555 	trans->block_rsv = &fs_info->trans_block_rsv;
2556 	/*
2557 	 * If we are using the NO_HOLES feature we might have had already an
2558 	 * hole that overlaps a part of the region [lockstart, lockend] and
2559 	 * ends at (or beyond) lockend. Since we have no file extent items to
2560 	 * represent holes, drop_end can be less than lockend and so we must
2561 	 * make sure we have an extent map representing the existing hole (the
2562 	 * call to __btrfs_drop_extents() might have dropped the existing extent
2563 	 * map representing the existing hole), otherwise the fast fsync path
2564 	 * will not record the existence of the hole region
2565 	 * [existing_hole_start, lockend].
2566 	 */
2567 	if (drop_args.drop_end <= end)
2568 		drop_args.drop_end = end + 1;
2569 	/*
2570 	 * Don't insert file hole extent item if it's for a range beyond eof
2571 	 * (because it's useless) or if it represents a 0 bytes range (when
2572 	 * cur_offset == drop_end).
2573 	 */
2574 	if (!extent_info && cur_offset < ino_size &&
2575 	    cur_offset < drop_args.drop_end) {
2576 		ret = fill_holes(trans, inode, path, cur_offset,
2577 				 drop_args.drop_end);
2578 		if (ret) {
2579 			/* Same comment as above. */
2580 			btrfs_abort_transaction(trans, ret);
2581 			goto out_trans;
2582 		}
2583 	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2584 		/* See the comment in the loop above for the reasoning here. */
2585 		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2586 					drop_args.drop_end - cur_offset);
2587 		if (ret) {
2588 			btrfs_abort_transaction(trans, ret);
2589 			goto out_trans;
2590 		}
2591 
2592 	}
2593 	if (extent_info) {
2594 		ret = btrfs_insert_replace_extent(trans, inode, path,
2595 				extent_info, extent_info->data_len,
2596 				drop_args.bytes_found);
2597 		if (ret) {
2598 			btrfs_abort_transaction(trans, ret);
2599 			goto out_trans;
2600 		}
2601 	}
2602 
2603 out_trans:
2604 	if (!trans)
2605 		goto out_free;
2606 
2607 	trans->block_rsv = &fs_info->trans_block_rsv;
2608 	if (ret)
2609 		btrfs_end_transaction(trans);
2610 	else
2611 		*trans_out = trans;
2612 out_free:
2613 	btrfs_free_block_rsv(fs_info, rsv);
2614 out:
2615 	return ret;
2616 }
2617 
2618 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2619 {
2620 	struct inode *inode = file_inode(file);
2621 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2622 	struct btrfs_root *root = BTRFS_I(inode)->root;
2623 	struct extent_state *cached_state = NULL;
2624 	struct btrfs_path *path;
2625 	struct btrfs_trans_handle *trans = NULL;
2626 	u64 lockstart;
2627 	u64 lockend;
2628 	u64 tail_start;
2629 	u64 tail_len;
2630 	u64 orig_start = offset;
2631 	int ret = 0;
2632 	bool same_block;
2633 	u64 ino_size;
2634 	bool truncated_block = false;
2635 	bool updated_inode = false;
2636 
2637 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2638 
2639 	ret = btrfs_wait_ordered_range(inode, offset, len);
2640 	if (ret)
2641 		goto out_only_mutex;
2642 
2643 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2644 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2645 	if (ret < 0)
2646 		goto out_only_mutex;
2647 	if (ret && !len) {
2648 		/* Already in a large hole */
2649 		ret = 0;
2650 		goto out_only_mutex;
2651 	}
2652 
2653 	ret = file_modified(file);
2654 	if (ret)
2655 		goto out_only_mutex;
2656 
2657 	lockstart = round_up(offset, fs_info->sectorsize);
2658 	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2659 	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2660 		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2661 	/*
2662 	 * We needn't truncate any block which is beyond the end of the file
2663 	 * because we are sure there is no data there.
2664 	 */
2665 	/*
2666 	 * Only do this if we are in the same block and we aren't doing the
2667 	 * entire block.
2668 	 */
2669 	if (same_block && len < fs_info->sectorsize) {
2670 		if (offset < ino_size) {
2671 			truncated_block = true;
2672 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2673 						   0);
2674 		} else {
2675 			ret = 0;
2676 		}
2677 		goto out_only_mutex;
2678 	}
2679 
2680 	/* zero back part of the first block */
2681 	if (offset < ino_size) {
2682 		truncated_block = true;
2683 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2684 		if (ret) {
2685 			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2686 			return ret;
2687 		}
2688 	}
2689 
2690 	/* Check the aligned pages after the first unaligned page,
2691 	 * if offset != orig_start, which means the first unaligned page
2692 	 * including several following pages are already in holes,
2693 	 * the extra check can be skipped */
2694 	if (offset == orig_start) {
2695 		/* after truncate page, check hole again */
2696 		len = offset + len - lockstart;
2697 		offset = lockstart;
2698 		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2699 		if (ret < 0)
2700 			goto out_only_mutex;
2701 		if (ret && !len) {
2702 			ret = 0;
2703 			goto out_only_mutex;
2704 		}
2705 		lockstart = offset;
2706 	}
2707 
2708 	/* Check the tail unaligned part is in a hole */
2709 	tail_start = lockend + 1;
2710 	tail_len = offset + len - tail_start;
2711 	if (tail_len) {
2712 		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2713 		if (unlikely(ret < 0))
2714 			goto out_only_mutex;
2715 		if (!ret) {
2716 			/* zero the front end of the last page */
2717 			if (tail_start + tail_len < ino_size) {
2718 				truncated_block = true;
2719 				ret = btrfs_truncate_block(BTRFS_I(inode),
2720 							tail_start + tail_len,
2721 							0, 1);
2722 				if (ret)
2723 					goto out_only_mutex;
2724 			}
2725 		}
2726 	}
2727 
2728 	if (lockend < lockstart) {
2729 		ret = 0;
2730 		goto out_only_mutex;
2731 	}
2732 
2733 	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2734 
2735 	path = btrfs_alloc_path();
2736 	if (!path) {
2737 		ret = -ENOMEM;
2738 		goto out;
2739 	}
2740 
2741 	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2742 					 lockend, NULL, &trans);
2743 	btrfs_free_path(path);
2744 	if (ret)
2745 		goto out;
2746 
2747 	ASSERT(trans != NULL);
2748 	inode_inc_iversion(inode);
2749 	inode->i_mtime = inode_set_ctime_current(inode);
2750 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2751 	updated_inode = true;
2752 	btrfs_end_transaction(trans);
2753 	btrfs_btree_balance_dirty(fs_info);
2754 out:
2755 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2756 		      &cached_state);
2757 out_only_mutex:
2758 	if (!updated_inode && truncated_block && !ret) {
2759 		/*
2760 		 * If we only end up zeroing part of a page, we still need to
2761 		 * update the inode item, so that all the time fields are
2762 		 * updated as well as the necessary btrfs inode in memory fields
2763 		 * for detecting, at fsync time, if the inode isn't yet in the
2764 		 * log tree or it's there but not up to date.
2765 		 */
2766 		struct timespec64 now = inode_set_ctime_current(inode);
2767 
2768 		inode_inc_iversion(inode);
2769 		inode->i_mtime = now;
2770 		trans = btrfs_start_transaction(root, 1);
2771 		if (IS_ERR(trans)) {
2772 			ret = PTR_ERR(trans);
2773 		} else {
2774 			int ret2;
2775 
2776 			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2777 			ret2 = btrfs_end_transaction(trans);
2778 			if (!ret)
2779 				ret = ret2;
2780 		}
2781 	}
2782 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2783 	return ret;
2784 }
2785 
2786 /* Helper structure to record which range is already reserved */
2787 struct falloc_range {
2788 	struct list_head list;
2789 	u64 start;
2790 	u64 len;
2791 };
2792 
2793 /*
2794  * Helper function to add falloc range
2795  *
2796  * Caller should have locked the larger range of extent containing
2797  * [start, len)
2798  */
2799 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2800 {
2801 	struct falloc_range *range = NULL;
2802 
2803 	if (!list_empty(head)) {
2804 		/*
2805 		 * As fallocate iterates by bytenr order, we only need to check
2806 		 * the last range.
2807 		 */
2808 		range = list_last_entry(head, struct falloc_range, list);
2809 		if (range->start + range->len == start) {
2810 			range->len += len;
2811 			return 0;
2812 		}
2813 	}
2814 
2815 	range = kmalloc(sizeof(*range), GFP_KERNEL);
2816 	if (!range)
2817 		return -ENOMEM;
2818 	range->start = start;
2819 	range->len = len;
2820 	list_add_tail(&range->list, head);
2821 	return 0;
2822 }
2823 
2824 static int btrfs_fallocate_update_isize(struct inode *inode,
2825 					const u64 end,
2826 					const int mode)
2827 {
2828 	struct btrfs_trans_handle *trans;
2829 	struct btrfs_root *root = BTRFS_I(inode)->root;
2830 	int ret;
2831 	int ret2;
2832 
2833 	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2834 		return 0;
2835 
2836 	trans = btrfs_start_transaction(root, 1);
2837 	if (IS_ERR(trans))
2838 		return PTR_ERR(trans);
2839 
2840 	inode_set_ctime_current(inode);
2841 	i_size_write(inode, end);
2842 	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2843 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2844 	ret2 = btrfs_end_transaction(trans);
2845 
2846 	return ret ? ret : ret2;
2847 }
2848 
2849 enum {
2850 	RANGE_BOUNDARY_WRITTEN_EXTENT,
2851 	RANGE_BOUNDARY_PREALLOC_EXTENT,
2852 	RANGE_BOUNDARY_HOLE,
2853 };
2854 
2855 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2856 						 u64 offset)
2857 {
2858 	const u64 sectorsize = inode->root->fs_info->sectorsize;
2859 	struct extent_map *em;
2860 	int ret;
2861 
2862 	offset = round_down(offset, sectorsize);
2863 	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
2864 	if (IS_ERR(em))
2865 		return PTR_ERR(em);
2866 
2867 	if (em->block_start == EXTENT_MAP_HOLE)
2868 		ret = RANGE_BOUNDARY_HOLE;
2869 	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2870 		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2871 	else
2872 		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2873 
2874 	free_extent_map(em);
2875 	return ret;
2876 }
2877 
2878 static int btrfs_zero_range(struct inode *inode,
2879 			    loff_t offset,
2880 			    loff_t len,
2881 			    const int mode)
2882 {
2883 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2884 	struct extent_map *em;
2885 	struct extent_changeset *data_reserved = NULL;
2886 	int ret;
2887 	u64 alloc_hint = 0;
2888 	const u64 sectorsize = fs_info->sectorsize;
2889 	u64 alloc_start = round_down(offset, sectorsize);
2890 	u64 alloc_end = round_up(offset + len, sectorsize);
2891 	u64 bytes_to_reserve = 0;
2892 	bool space_reserved = false;
2893 
2894 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2895 			      alloc_end - alloc_start);
2896 	if (IS_ERR(em)) {
2897 		ret = PTR_ERR(em);
2898 		goto out;
2899 	}
2900 
2901 	/*
2902 	 * Avoid hole punching and extent allocation for some cases. More cases
2903 	 * could be considered, but these are unlikely common and we keep things
2904 	 * as simple as possible for now. Also, intentionally, if the target
2905 	 * range contains one or more prealloc extents together with regular
2906 	 * extents and holes, we drop all the existing extents and allocate a
2907 	 * new prealloc extent, so that we get a larger contiguous disk extent.
2908 	 */
2909 	if (em->start <= alloc_start &&
2910 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2911 		const u64 em_end = em->start + em->len;
2912 
2913 		if (em_end >= offset + len) {
2914 			/*
2915 			 * The whole range is already a prealloc extent,
2916 			 * do nothing except updating the inode's i_size if
2917 			 * needed.
2918 			 */
2919 			free_extent_map(em);
2920 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2921 							   mode);
2922 			goto out;
2923 		}
2924 		/*
2925 		 * Part of the range is already a prealloc extent, so operate
2926 		 * only on the remaining part of the range.
2927 		 */
2928 		alloc_start = em_end;
2929 		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2930 		len = offset + len - alloc_start;
2931 		offset = alloc_start;
2932 		alloc_hint = em->block_start + em->len;
2933 	}
2934 	free_extent_map(em);
2935 
2936 	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2937 	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2938 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2939 				      sectorsize);
2940 		if (IS_ERR(em)) {
2941 			ret = PTR_ERR(em);
2942 			goto out;
2943 		}
2944 
2945 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2946 			free_extent_map(em);
2947 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2948 							   mode);
2949 			goto out;
2950 		}
2951 		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2952 			free_extent_map(em);
2953 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2954 						   0);
2955 			if (!ret)
2956 				ret = btrfs_fallocate_update_isize(inode,
2957 								   offset + len,
2958 								   mode);
2959 			return ret;
2960 		}
2961 		free_extent_map(em);
2962 		alloc_start = round_down(offset, sectorsize);
2963 		alloc_end = alloc_start + sectorsize;
2964 		goto reserve_space;
2965 	}
2966 
2967 	alloc_start = round_up(offset, sectorsize);
2968 	alloc_end = round_down(offset + len, sectorsize);
2969 
2970 	/*
2971 	 * For unaligned ranges, check the pages at the boundaries, they might
2972 	 * map to an extent, in which case we need to partially zero them, or
2973 	 * they might map to a hole, in which case we need our allocation range
2974 	 * to cover them.
2975 	 */
2976 	if (!IS_ALIGNED(offset, sectorsize)) {
2977 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2978 							    offset);
2979 		if (ret < 0)
2980 			goto out;
2981 		if (ret == RANGE_BOUNDARY_HOLE) {
2982 			alloc_start = round_down(offset, sectorsize);
2983 			ret = 0;
2984 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2985 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2986 			if (ret)
2987 				goto out;
2988 		} else {
2989 			ret = 0;
2990 		}
2991 	}
2992 
2993 	if (!IS_ALIGNED(offset + len, sectorsize)) {
2994 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2995 							    offset + len);
2996 		if (ret < 0)
2997 			goto out;
2998 		if (ret == RANGE_BOUNDARY_HOLE) {
2999 			alloc_end = round_up(offset + len, sectorsize);
3000 			ret = 0;
3001 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
3002 			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
3003 						   0, 1);
3004 			if (ret)
3005 				goto out;
3006 		} else {
3007 			ret = 0;
3008 		}
3009 	}
3010 
3011 reserve_space:
3012 	if (alloc_start < alloc_end) {
3013 		struct extent_state *cached_state = NULL;
3014 		const u64 lockstart = alloc_start;
3015 		const u64 lockend = alloc_end - 1;
3016 
3017 		bytes_to_reserve = alloc_end - alloc_start;
3018 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3019 						      bytes_to_reserve);
3020 		if (ret < 0)
3021 			goto out;
3022 		space_reserved = true;
3023 		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3024 					    &cached_state);
3025 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3026 						alloc_start, bytes_to_reserve);
3027 		if (ret) {
3028 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3029 				      lockend, &cached_state);
3030 			goto out;
3031 		}
3032 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3033 						alloc_end - alloc_start,
3034 						i_blocksize(inode),
3035 						offset + len, &alloc_hint);
3036 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3037 			      &cached_state);
3038 		/* btrfs_prealloc_file_range releases reserved space on error */
3039 		if (ret) {
3040 			space_reserved = false;
3041 			goto out;
3042 		}
3043 	}
3044 	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3045  out:
3046 	if (ret && space_reserved)
3047 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3048 					       alloc_start, bytes_to_reserve);
3049 	extent_changeset_free(data_reserved);
3050 
3051 	return ret;
3052 }
3053 
3054 static long btrfs_fallocate(struct file *file, int mode,
3055 			    loff_t offset, loff_t len)
3056 {
3057 	struct inode *inode = file_inode(file);
3058 	struct extent_state *cached_state = NULL;
3059 	struct extent_changeset *data_reserved = NULL;
3060 	struct falloc_range *range;
3061 	struct falloc_range *tmp;
3062 	LIST_HEAD(reserve_list);
3063 	u64 cur_offset;
3064 	u64 last_byte;
3065 	u64 alloc_start;
3066 	u64 alloc_end;
3067 	u64 alloc_hint = 0;
3068 	u64 locked_end;
3069 	u64 actual_end = 0;
3070 	u64 data_space_needed = 0;
3071 	u64 data_space_reserved = 0;
3072 	u64 qgroup_reserved = 0;
3073 	struct extent_map *em;
3074 	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3075 	int ret;
3076 
3077 	/* Do not allow fallocate in ZONED mode */
3078 	if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3079 		return -EOPNOTSUPP;
3080 
3081 	alloc_start = round_down(offset, blocksize);
3082 	alloc_end = round_up(offset + len, blocksize);
3083 	cur_offset = alloc_start;
3084 
3085 	/* Make sure we aren't being give some crap mode */
3086 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3087 		     FALLOC_FL_ZERO_RANGE))
3088 		return -EOPNOTSUPP;
3089 
3090 	if (mode & FALLOC_FL_PUNCH_HOLE)
3091 		return btrfs_punch_hole(file, offset, len);
3092 
3093 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3094 
3095 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3096 		ret = inode_newsize_ok(inode, offset + len);
3097 		if (ret)
3098 			goto out;
3099 	}
3100 
3101 	ret = file_modified(file);
3102 	if (ret)
3103 		goto out;
3104 
3105 	/*
3106 	 * TODO: Move these two operations after we have checked
3107 	 * accurate reserved space, or fallocate can still fail but
3108 	 * with page truncated or size expanded.
3109 	 *
3110 	 * But that's a minor problem and won't do much harm BTW.
3111 	 */
3112 	if (alloc_start > inode->i_size) {
3113 		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3114 					alloc_start);
3115 		if (ret)
3116 			goto out;
3117 	} else if (offset + len > inode->i_size) {
3118 		/*
3119 		 * If we are fallocating from the end of the file onward we
3120 		 * need to zero out the end of the block if i_size lands in the
3121 		 * middle of a block.
3122 		 */
3123 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3124 		if (ret)
3125 			goto out;
3126 	}
3127 
3128 	/*
3129 	 * We have locked the inode at the VFS level (in exclusive mode) and we
3130 	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3131 	 * locking the file range, flush all dealloc in the range and wait for
3132 	 * all ordered extents in the range to complete. After this we can lock
3133 	 * the file range and, due to the previous locking we did, we know there
3134 	 * can't be more delalloc or ordered extents in the range.
3135 	 */
3136 	ret = btrfs_wait_ordered_range(inode, alloc_start,
3137 				       alloc_end - alloc_start);
3138 	if (ret)
3139 		goto out;
3140 
3141 	if (mode & FALLOC_FL_ZERO_RANGE) {
3142 		ret = btrfs_zero_range(inode, offset, len, mode);
3143 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3144 		return ret;
3145 	}
3146 
3147 	locked_end = alloc_end - 1;
3148 	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3149 		    &cached_state);
3150 
3151 	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3152 
3153 	/* First, check if we exceed the qgroup limit */
3154 	while (cur_offset < alloc_end) {
3155 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3156 				      alloc_end - cur_offset);
3157 		if (IS_ERR(em)) {
3158 			ret = PTR_ERR(em);
3159 			break;
3160 		}
3161 		last_byte = min(extent_map_end(em), alloc_end);
3162 		actual_end = min_t(u64, extent_map_end(em), offset + len);
3163 		last_byte = ALIGN(last_byte, blocksize);
3164 		if (em->block_start == EXTENT_MAP_HOLE ||
3165 		    (cur_offset >= inode->i_size &&
3166 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3167 			const u64 range_len = last_byte - cur_offset;
3168 
3169 			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3170 			if (ret < 0) {
3171 				free_extent_map(em);
3172 				break;
3173 			}
3174 			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3175 					&data_reserved, cur_offset, range_len);
3176 			if (ret < 0) {
3177 				free_extent_map(em);
3178 				break;
3179 			}
3180 			qgroup_reserved += range_len;
3181 			data_space_needed += range_len;
3182 		}
3183 		free_extent_map(em);
3184 		cur_offset = last_byte;
3185 	}
3186 
3187 	if (!ret && data_space_needed > 0) {
3188 		/*
3189 		 * We are safe to reserve space here as we can't have delalloc
3190 		 * in the range, see above.
3191 		 */
3192 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3193 						      data_space_needed);
3194 		if (!ret)
3195 			data_space_reserved = data_space_needed;
3196 	}
3197 
3198 	/*
3199 	 * If ret is still 0, means we're OK to fallocate.
3200 	 * Or just cleanup the list and exit.
3201 	 */
3202 	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3203 		if (!ret) {
3204 			ret = btrfs_prealloc_file_range(inode, mode,
3205 					range->start,
3206 					range->len, i_blocksize(inode),
3207 					offset + len, &alloc_hint);
3208 			/*
3209 			 * btrfs_prealloc_file_range() releases space even
3210 			 * if it returns an error.
3211 			 */
3212 			data_space_reserved -= range->len;
3213 			qgroup_reserved -= range->len;
3214 		} else if (data_space_reserved > 0) {
3215 			btrfs_free_reserved_data_space(BTRFS_I(inode),
3216 					       data_reserved, range->start,
3217 					       range->len);
3218 			data_space_reserved -= range->len;
3219 			qgroup_reserved -= range->len;
3220 		} else if (qgroup_reserved > 0) {
3221 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3222 					       range->start, range->len, NULL);
3223 			qgroup_reserved -= range->len;
3224 		}
3225 		list_del(&range->list);
3226 		kfree(range);
3227 	}
3228 	if (ret < 0)
3229 		goto out_unlock;
3230 
3231 	/*
3232 	 * We didn't need to allocate any more space, but we still extended the
3233 	 * size of the file so we need to update i_size and the inode item.
3234 	 */
3235 	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3236 out_unlock:
3237 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3238 		      &cached_state);
3239 out:
3240 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3241 	extent_changeset_free(data_reserved);
3242 	return ret;
3243 }
3244 
3245 /*
3246  * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3247  * that has unflushed and/or flushing delalloc. There might be other adjacent
3248  * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3249  * looping while it gets adjacent subranges, and merging them together.
3250  */
3251 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3252 				   struct extent_state **cached_state,
3253 				   bool *search_io_tree,
3254 				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3255 {
3256 	u64 len = end + 1 - start;
3257 	u64 delalloc_len = 0;
3258 	struct btrfs_ordered_extent *oe;
3259 	u64 oe_start;
3260 	u64 oe_end;
3261 
3262 	/*
3263 	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3264 	 * means we have delalloc (dirty pages) for which writeback has not
3265 	 * started yet.
3266 	 */
3267 	if (*search_io_tree) {
3268 		spin_lock(&inode->lock);
3269 		if (inode->delalloc_bytes > 0) {
3270 			spin_unlock(&inode->lock);
3271 			*delalloc_start_ret = start;
3272 			delalloc_len = count_range_bits(&inode->io_tree,
3273 							delalloc_start_ret, end,
3274 							len, EXTENT_DELALLOC, 1,
3275 							cached_state);
3276 		} else {
3277 			spin_unlock(&inode->lock);
3278 		}
3279 	}
3280 
3281 	if (delalloc_len > 0) {
3282 		/*
3283 		 * If delalloc was found then *delalloc_start_ret has a sector size
3284 		 * aligned value (rounded down).
3285 		 */
3286 		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3287 
3288 		if (*delalloc_start_ret == start) {
3289 			/* Delalloc for the whole range, nothing more to do. */
3290 			if (*delalloc_end_ret == end)
3291 				return true;
3292 			/* Else trim our search range for ordered extents. */
3293 			start = *delalloc_end_ret + 1;
3294 			len = end + 1 - start;
3295 		}
3296 	} else {
3297 		/* No delalloc, future calls don't need to search again. */
3298 		*search_io_tree = false;
3299 	}
3300 
3301 	/*
3302 	 * Now also check if there's any ordered extent in the range.
3303 	 * We do this because:
3304 	 *
3305 	 * 1) When delalloc is flushed, the file range is locked, we clear the
3306 	 *    EXTENT_DELALLOC bit from the io tree and create an extent map and
3307 	 *    an ordered extent for the write. So we might just have been called
3308 	 *    after delalloc is flushed and before the ordered extent completes
3309 	 *    and inserts the new file extent item in the subvolume's btree;
3310 	 *
3311 	 * 2) We may have an ordered extent created by flushing delalloc for a
3312 	 *    subrange that starts before the subrange we found marked with
3313 	 *    EXTENT_DELALLOC in the io tree.
3314 	 *
3315 	 * We could also use the extent map tree to find such delalloc that is
3316 	 * being flushed, but using the ordered extents tree is more efficient
3317 	 * because it's usually much smaller as ordered extents are removed from
3318 	 * the tree once they complete. With the extent maps, we mau have them
3319 	 * in the extent map tree for a very long time, and they were either
3320 	 * created by previous writes or loaded by read operations.
3321 	 */
3322 	oe = btrfs_lookup_first_ordered_range(inode, start, len);
3323 	if (!oe)
3324 		return (delalloc_len > 0);
3325 
3326 	/* The ordered extent may span beyond our search range. */
3327 	oe_start = max(oe->file_offset, start);
3328 	oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3329 
3330 	btrfs_put_ordered_extent(oe);
3331 
3332 	/* Don't have unflushed delalloc, return the ordered extent range. */
3333 	if (delalloc_len == 0) {
3334 		*delalloc_start_ret = oe_start;
3335 		*delalloc_end_ret = oe_end;
3336 		return true;
3337 	}
3338 
3339 	/*
3340 	 * We have both unflushed delalloc (io_tree) and an ordered extent.
3341 	 * If the ranges are adjacent returned a combined range, otherwise
3342 	 * return the leftmost range.
3343 	 */
3344 	if (oe_start < *delalloc_start_ret) {
3345 		if (oe_end < *delalloc_start_ret)
3346 			*delalloc_end_ret = oe_end;
3347 		*delalloc_start_ret = oe_start;
3348 	} else if (*delalloc_end_ret + 1 == oe_start) {
3349 		*delalloc_end_ret = oe_end;
3350 	}
3351 
3352 	return true;
3353 }
3354 
3355 /*
3356  * Check if there's delalloc in a given range.
3357  *
3358  * @inode:               The inode.
3359  * @start:               The start offset of the range. It does not need to be
3360  *                       sector size aligned.
3361  * @end:                 The end offset (inclusive value) of the search range.
3362  *                       It does not need to be sector size aligned.
3363  * @cached_state:        Extent state record used for speeding up delalloc
3364  *                       searches in the inode's io_tree. Can be NULL.
3365  * @delalloc_start_ret:  Output argument, set to the start offset of the
3366  *                       subrange found with delalloc (may not be sector size
3367  *                       aligned).
3368  * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3369  *                       of the subrange found with delalloc.
3370  *
3371  * Returns true if a subrange with delalloc is found within the given range, and
3372  * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3373  * end offsets of the subrange.
3374  */
3375 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3376 				  struct extent_state **cached_state,
3377 				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3378 {
3379 	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3380 	u64 prev_delalloc_end = 0;
3381 	bool search_io_tree = true;
3382 	bool ret = false;
3383 
3384 	while (cur_offset <= end) {
3385 		u64 delalloc_start;
3386 		u64 delalloc_end;
3387 		bool delalloc;
3388 
3389 		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3390 						  cached_state, &search_io_tree,
3391 						  &delalloc_start,
3392 						  &delalloc_end);
3393 		if (!delalloc)
3394 			break;
3395 
3396 		if (prev_delalloc_end == 0) {
3397 			/* First subrange found. */
3398 			*delalloc_start_ret = max(delalloc_start, start);
3399 			*delalloc_end_ret = delalloc_end;
3400 			ret = true;
3401 		} else if (delalloc_start == prev_delalloc_end + 1) {
3402 			/* Subrange adjacent to the previous one, merge them. */
3403 			*delalloc_end_ret = delalloc_end;
3404 		} else {
3405 			/* Subrange not adjacent to the previous one, exit. */
3406 			break;
3407 		}
3408 
3409 		prev_delalloc_end = delalloc_end;
3410 		cur_offset = delalloc_end + 1;
3411 		cond_resched();
3412 	}
3413 
3414 	return ret;
3415 }
3416 
3417 /*
3418  * Check if there's a hole or delalloc range in a range representing a hole (or
3419  * prealloc extent) found in the inode's subvolume btree.
3420  *
3421  * @inode:      The inode.
3422  * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3423  * @start:      Start offset of the hole region. It does not need to be sector
3424  *              size aligned.
3425  * @end:        End offset (inclusive value) of the hole region. It does not
3426  *              need to be sector size aligned.
3427  * @start_ret:  Return parameter, used to set the start of the subrange in the
3428  *              hole that matches the search criteria (seek mode), if such
3429  *              subrange is found (return value of the function is true).
3430  *              The value returned here may not be sector size aligned.
3431  *
3432  * Returns true if a subrange matching the given seek mode is found, and if one
3433  * is found, it updates @start_ret with the start of the subrange.
3434  */
3435 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3436 					struct extent_state **cached_state,
3437 					u64 start, u64 end, u64 *start_ret)
3438 {
3439 	u64 delalloc_start;
3440 	u64 delalloc_end;
3441 	bool delalloc;
3442 
3443 	delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3444 						&delalloc_start, &delalloc_end);
3445 	if (delalloc && whence == SEEK_DATA) {
3446 		*start_ret = delalloc_start;
3447 		return true;
3448 	}
3449 
3450 	if (delalloc && whence == SEEK_HOLE) {
3451 		/*
3452 		 * We found delalloc but it starts after out start offset. So we
3453 		 * have a hole between our start offset and the delalloc start.
3454 		 */
3455 		if (start < delalloc_start) {
3456 			*start_ret = start;
3457 			return true;
3458 		}
3459 		/*
3460 		 * Delalloc range starts at our start offset.
3461 		 * If the delalloc range's length is smaller than our range,
3462 		 * then it means we have a hole that starts where the delalloc
3463 		 * subrange ends.
3464 		 */
3465 		if (delalloc_end < end) {
3466 			*start_ret = delalloc_end + 1;
3467 			return true;
3468 		}
3469 
3470 		/* There's delalloc for the whole range. */
3471 		return false;
3472 	}
3473 
3474 	if (!delalloc && whence == SEEK_HOLE) {
3475 		*start_ret = start;
3476 		return true;
3477 	}
3478 
3479 	/*
3480 	 * No delalloc in the range and we are seeking for data. The caller has
3481 	 * to iterate to the next extent item in the subvolume btree.
3482 	 */
3483 	return false;
3484 }
3485 
3486 static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3487 {
3488 	struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3489 	struct btrfs_file_private *private = file->private_data;
3490 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3491 	struct extent_state *cached_state = NULL;
3492 	struct extent_state **delalloc_cached_state;
3493 	const loff_t i_size = i_size_read(&inode->vfs_inode);
3494 	const u64 ino = btrfs_ino(inode);
3495 	struct btrfs_root *root = inode->root;
3496 	struct btrfs_path *path;
3497 	struct btrfs_key key;
3498 	u64 last_extent_end;
3499 	u64 lockstart;
3500 	u64 lockend;
3501 	u64 start;
3502 	int ret;
3503 	bool found = false;
3504 
3505 	if (i_size == 0 || offset >= i_size)
3506 		return -ENXIO;
3507 
3508 	/*
3509 	 * Quick path. If the inode has no prealloc extents and its number of
3510 	 * bytes used matches its i_size, then it can not have holes.
3511 	 */
3512 	if (whence == SEEK_HOLE &&
3513 	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3514 	    inode_get_bytes(&inode->vfs_inode) == i_size)
3515 		return i_size;
3516 
3517 	if (!private) {
3518 		private = kzalloc(sizeof(*private), GFP_KERNEL);
3519 		/*
3520 		 * No worries if memory allocation failed.
3521 		 * The private structure is used only for speeding up multiple
3522 		 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3523 		 * so everything will still be correct.
3524 		 */
3525 		file->private_data = private;
3526 	}
3527 
3528 	if (private)
3529 		delalloc_cached_state = &private->llseek_cached_state;
3530 	else
3531 		delalloc_cached_state = NULL;
3532 
3533 	/*
3534 	 * offset can be negative, in this case we start finding DATA/HOLE from
3535 	 * the very start of the file.
3536 	 */
3537 	start = max_t(loff_t, 0, offset);
3538 
3539 	lockstart = round_down(start, fs_info->sectorsize);
3540 	lockend = round_up(i_size, fs_info->sectorsize);
3541 	if (lockend <= lockstart)
3542 		lockend = lockstart + fs_info->sectorsize;
3543 	lockend--;
3544 
3545 	path = btrfs_alloc_path();
3546 	if (!path)
3547 		return -ENOMEM;
3548 	path->reada = READA_FORWARD;
3549 
3550 	key.objectid = ino;
3551 	key.type = BTRFS_EXTENT_DATA_KEY;
3552 	key.offset = start;
3553 
3554 	last_extent_end = lockstart;
3555 
3556 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3557 
3558 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3559 	if (ret < 0) {
3560 		goto out;
3561 	} else if (ret > 0 && path->slots[0] > 0) {
3562 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3563 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3564 			path->slots[0]--;
3565 	}
3566 
3567 	while (start < i_size) {
3568 		struct extent_buffer *leaf = path->nodes[0];
3569 		struct btrfs_file_extent_item *extent;
3570 		u64 extent_end;
3571 		u8 type;
3572 
3573 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3574 			ret = btrfs_next_leaf(root, path);
3575 			if (ret < 0)
3576 				goto out;
3577 			else if (ret > 0)
3578 				break;
3579 
3580 			leaf = path->nodes[0];
3581 		}
3582 
3583 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3584 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3585 			break;
3586 
3587 		extent_end = btrfs_file_extent_end(path);
3588 
3589 		/*
3590 		 * In the first iteration we may have a slot that points to an
3591 		 * extent that ends before our start offset, so skip it.
3592 		 */
3593 		if (extent_end <= start) {
3594 			path->slots[0]++;
3595 			continue;
3596 		}
3597 
3598 		/* We have an implicit hole, NO_HOLES feature is likely set. */
3599 		if (last_extent_end < key.offset) {
3600 			u64 search_start = last_extent_end;
3601 			u64 found_start;
3602 
3603 			/*
3604 			 * First iteration, @start matches @offset and it's
3605 			 * within the hole.
3606 			 */
3607 			if (start == offset)
3608 				search_start = offset;
3609 
3610 			found = find_desired_extent_in_hole(inode, whence,
3611 							    delalloc_cached_state,
3612 							    search_start,
3613 							    key.offset - 1,
3614 							    &found_start);
3615 			if (found) {
3616 				start = found_start;
3617 				break;
3618 			}
3619 			/*
3620 			 * Didn't find data or a hole (due to delalloc) in the
3621 			 * implicit hole range, so need to analyze the extent.
3622 			 */
3623 		}
3624 
3625 		extent = btrfs_item_ptr(leaf, path->slots[0],
3626 					struct btrfs_file_extent_item);
3627 		type = btrfs_file_extent_type(leaf, extent);
3628 
3629 		/*
3630 		 * Can't access the extent's disk_bytenr field if this is an
3631 		 * inline extent, since at that offset, it's where the extent
3632 		 * data starts.
3633 		 */
3634 		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3635 		    (type == BTRFS_FILE_EXTENT_REG &&
3636 		     btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3637 			/*
3638 			 * Explicit hole or prealloc extent, search for delalloc.
3639 			 * A prealloc extent is treated like a hole.
3640 			 */
3641 			u64 search_start = key.offset;
3642 			u64 found_start;
3643 
3644 			/*
3645 			 * First iteration, @start matches @offset and it's
3646 			 * within the hole.
3647 			 */
3648 			if (start == offset)
3649 				search_start = offset;
3650 
3651 			found = find_desired_extent_in_hole(inode, whence,
3652 							    delalloc_cached_state,
3653 							    search_start,
3654 							    extent_end - 1,
3655 							    &found_start);
3656 			if (found) {
3657 				start = found_start;
3658 				break;
3659 			}
3660 			/*
3661 			 * Didn't find data or a hole (due to delalloc) in the
3662 			 * implicit hole range, so need to analyze the next
3663 			 * extent item.
3664 			 */
3665 		} else {
3666 			/*
3667 			 * Found a regular or inline extent.
3668 			 * If we are seeking for data, adjust the start offset
3669 			 * and stop, we're done.
3670 			 */
3671 			if (whence == SEEK_DATA) {
3672 				start = max_t(u64, key.offset, offset);
3673 				found = true;
3674 				break;
3675 			}
3676 			/*
3677 			 * Else, we are seeking for a hole, check the next file
3678 			 * extent item.
3679 			 */
3680 		}
3681 
3682 		start = extent_end;
3683 		last_extent_end = extent_end;
3684 		path->slots[0]++;
3685 		if (fatal_signal_pending(current)) {
3686 			ret = -EINTR;
3687 			goto out;
3688 		}
3689 		cond_resched();
3690 	}
3691 
3692 	/* We have an implicit hole from the last extent found up to i_size. */
3693 	if (!found && start < i_size) {
3694 		found = find_desired_extent_in_hole(inode, whence,
3695 						    delalloc_cached_state, start,
3696 						    i_size - 1, &start);
3697 		if (!found)
3698 			start = i_size;
3699 	}
3700 
3701 out:
3702 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3703 	btrfs_free_path(path);
3704 
3705 	if (ret < 0)
3706 		return ret;
3707 
3708 	if (whence == SEEK_DATA && start >= i_size)
3709 		return -ENXIO;
3710 
3711 	return min_t(loff_t, start, i_size);
3712 }
3713 
3714 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3715 {
3716 	struct inode *inode = file->f_mapping->host;
3717 
3718 	switch (whence) {
3719 	default:
3720 		return generic_file_llseek(file, offset, whence);
3721 	case SEEK_DATA:
3722 	case SEEK_HOLE:
3723 		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3724 		offset = find_desired_extent(file, offset, whence);
3725 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3726 		break;
3727 	}
3728 
3729 	if (offset < 0)
3730 		return offset;
3731 
3732 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3733 }
3734 
3735 static int btrfs_file_open(struct inode *inode, struct file *filp)
3736 {
3737 	int ret;
3738 
3739 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
3740 		        FMODE_CAN_ODIRECT;
3741 
3742 	ret = fsverity_file_open(inode, filp);
3743 	if (ret)
3744 		return ret;
3745 	return generic_file_open(inode, filp);
3746 }
3747 
3748 static int check_direct_read(struct btrfs_fs_info *fs_info,
3749 			     const struct iov_iter *iter, loff_t offset)
3750 {
3751 	int ret;
3752 	int i, seg;
3753 
3754 	ret = check_direct_IO(fs_info, iter, offset);
3755 	if (ret < 0)
3756 		return ret;
3757 
3758 	if (!iter_is_iovec(iter))
3759 		return 0;
3760 
3761 	for (seg = 0; seg < iter->nr_segs; seg++) {
3762 		for (i = seg + 1; i < iter->nr_segs; i++) {
3763 			const struct iovec *iov1 = iter_iov(iter) + seg;
3764 			const struct iovec *iov2 = iter_iov(iter) + i;
3765 
3766 			if (iov1->iov_base == iov2->iov_base)
3767 				return -EINVAL;
3768 		}
3769 	}
3770 	return 0;
3771 }
3772 
3773 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3774 {
3775 	struct inode *inode = file_inode(iocb->ki_filp);
3776 	size_t prev_left = 0;
3777 	ssize_t read = 0;
3778 	ssize_t ret;
3779 
3780 	if (fsverity_active(inode))
3781 		return 0;
3782 
3783 	if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3784 		return 0;
3785 
3786 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3787 again:
3788 	/*
3789 	 * This is similar to what we do for direct IO writes, see the comment
3790 	 * at btrfs_direct_write(), but we also disable page faults in addition
3791 	 * to disabling them only at the iov_iter level. This is because when
3792 	 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3793 	 * which can still trigger page fault ins despite having set ->nofault
3794 	 * to true of our 'to' iov_iter.
3795 	 *
3796 	 * The difference to direct IO writes is that we deadlock when trying
3797 	 * to lock the extent range in the inode's tree during he page reads
3798 	 * triggered by the fault in (while for writes it is due to waiting for
3799 	 * our own ordered extent). This is because for direct IO reads,
3800 	 * btrfs_dio_iomap_begin() returns with the extent range locked, which
3801 	 * is only unlocked in the endio callback (end_bio_extent_readpage()).
3802 	 */
3803 	pagefault_disable();
3804 	to->nofault = true;
3805 	ret = btrfs_dio_read(iocb, to, read);
3806 	to->nofault = false;
3807 	pagefault_enable();
3808 
3809 	/* No increment (+=) because iomap returns a cumulative value. */
3810 	if (ret > 0)
3811 		read = ret;
3812 
3813 	if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3814 		const size_t left = iov_iter_count(to);
3815 
3816 		if (left == prev_left) {
3817 			/*
3818 			 * We didn't make any progress since the last attempt,
3819 			 * fallback to a buffered read for the remainder of the
3820 			 * range. This is just to avoid any possibility of looping
3821 			 * for too long.
3822 			 */
3823 			ret = read;
3824 		} else {
3825 			/*
3826 			 * We made some progress since the last retry or this is
3827 			 * the first time we are retrying. Fault in as many pages
3828 			 * as possible and retry.
3829 			 */
3830 			fault_in_iov_iter_writeable(to, left);
3831 			prev_left = left;
3832 			goto again;
3833 		}
3834 	}
3835 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3836 	return ret < 0 ? ret : read;
3837 }
3838 
3839 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3840 {
3841 	ssize_t ret = 0;
3842 
3843 	if (iocb->ki_flags & IOCB_DIRECT) {
3844 		ret = btrfs_direct_read(iocb, to);
3845 		if (ret < 0 || !iov_iter_count(to) ||
3846 		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3847 			return ret;
3848 	}
3849 
3850 	return filemap_read(iocb, to, ret);
3851 }
3852 
3853 const struct file_operations btrfs_file_operations = {
3854 	.llseek		= btrfs_file_llseek,
3855 	.read_iter      = btrfs_file_read_iter,
3856 	.splice_read	= filemap_splice_read,
3857 	.write_iter	= btrfs_file_write_iter,
3858 	.splice_write	= iter_file_splice_write,
3859 	.mmap		= btrfs_file_mmap,
3860 	.open		= btrfs_file_open,
3861 	.release	= btrfs_release_file,
3862 	.get_unmapped_area = thp_get_unmapped_area,
3863 	.fsync		= btrfs_sync_file,
3864 	.fallocate	= btrfs_fallocate,
3865 	.unlocked_ioctl	= btrfs_ioctl,
3866 #ifdef CONFIG_COMPAT
3867 	.compat_ioctl	= btrfs_compat_ioctl,
3868 #endif
3869 	.remap_file_range = btrfs_remap_file_range,
3870 };
3871 
3872 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3873 {
3874 	int ret;
3875 
3876 	/*
3877 	 * So with compression we will find and lock a dirty page and clear the
3878 	 * first one as dirty, setup an async extent, and immediately return
3879 	 * with the entire range locked but with nobody actually marked with
3880 	 * writeback.  So we can't just filemap_write_and_wait_range() and
3881 	 * expect it to work since it will just kick off a thread to do the
3882 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3883 	 * since it will wait on the page lock, which won't be unlocked until
3884 	 * after the pages have been marked as writeback and so we're good to go
3885 	 * from there.  We have to do this otherwise we'll miss the ordered
3886 	 * extents and that results in badness.  Please Josef, do not think you
3887 	 * know better and pull this out at some point in the future, it is
3888 	 * right and you are wrong.
3889 	 */
3890 	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3891 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3892 			     &BTRFS_I(inode)->runtime_flags))
3893 		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3894 
3895 	return ret;
3896 }
3897