xref: /openbmc/linux/fs/btrfs/file.c (revision fd5e9fccbd504c5179ab57ff695c610bca8809d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/time.h>
9 #include <linux/init.h>
10 #include <linux/string.h>
11 #include <linux/backing-dev.h>
12 #include <linux/falloc.h>
13 #include <linux/writeback.h>
14 #include <linux/compat.h>
15 #include <linux/slab.h>
16 #include <linux/btrfs.h>
17 #include <linux/uio.h>
18 #include <linux/iversion.h>
19 #include <linux/fsverity.h>
20 #include "ctree.h"
21 #include "disk-io.h"
22 #include "transaction.h"
23 #include "btrfs_inode.h"
24 #include "print-tree.h"
25 #include "tree-log.h"
26 #include "locking.h"
27 #include "volumes.h"
28 #include "qgroup.h"
29 #include "compression.h"
30 #include "delalloc-space.h"
31 #include "reflink.h"
32 #include "subpage.h"
33 #include "fs.h"
34 #include "accessors.h"
35 #include "extent-tree.h"
36 #include "file-item.h"
37 #include "ioctl.h"
38 #include "file.h"
39 #include "super.h"
40 
41 /* simple helper to fault in pages and copy.  This should go away
42  * and be replaced with calls into generic code.
43  */
btrfs_copy_from_user(loff_t pos,size_t write_bytes,struct page ** prepared_pages,struct iov_iter * i)44 static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes,
45 					 struct page **prepared_pages,
46 					 struct iov_iter *i)
47 {
48 	size_t copied = 0;
49 	size_t total_copied = 0;
50 	int pg = 0;
51 	int offset = offset_in_page(pos);
52 
53 	while (write_bytes > 0) {
54 		size_t count = min_t(size_t,
55 				     PAGE_SIZE - offset, write_bytes);
56 		struct page *page = prepared_pages[pg];
57 		/*
58 		 * Copy data from userspace to the current page
59 		 */
60 		copied = copy_page_from_iter_atomic(page, offset, count, i);
61 
62 		/* Flush processor's dcache for this page */
63 		flush_dcache_page(page);
64 
65 		/*
66 		 * if we get a partial write, we can end up with
67 		 * partially up to date pages.  These add
68 		 * a lot of complexity, so make sure they don't
69 		 * happen by forcing this copy to be retried.
70 		 *
71 		 * The rest of the btrfs_file_write code will fall
72 		 * back to page at a time copies after we return 0.
73 		 */
74 		if (unlikely(copied < count)) {
75 			if (!PageUptodate(page)) {
76 				iov_iter_revert(i, copied);
77 				copied = 0;
78 			}
79 			if (!copied)
80 				break;
81 		}
82 
83 		write_bytes -= copied;
84 		total_copied += copied;
85 		offset += copied;
86 		if (offset == PAGE_SIZE) {
87 			pg++;
88 			offset = 0;
89 		}
90 	}
91 	return total_copied;
92 }
93 
94 /*
95  * unlocks pages after btrfs_file_write is done with them
96  */
btrfs_drop_pages(struct btrfs_fs_info * fs_info,struct page ** pages,size_t num_pages,u64 pos,u64 copied)97 static void btrfs_drop_pages(struct btrfs_fs_info *fs_info,
98 			     struct page **pages, size_t num_pages,
99 			     u64 pos, u64 copied)
100 {
101 	size_t i;
102 	u64 block_start = round_down(pos, fs_info->sectorsize);
103 	u64 block_len = round_up(pos + copied, fs_info->sectorsize) - block_start;
104 
105 	ASSERT(block_len <= U32_MAX);
106 	for (i = 0; i < num_pages; i++) {
107 		/* page checked is some magic around finding pages that
108 		 * have been modified without going through btrfs_set_page_dirty
109 		 * clear it here. There should be no need to mark the pages
110 		 * accessed as prepare_pages should have marked them accessed
111 		 * in prepare_pages via find_or_create_page()
112 		 */
113 		btrfs_page_clamp_clear_checked(fs_info, pages[i], block_start,
114 					       block_len);
115 		unlock_page(pages[i]);
116 		put_page(pages[i]);
117 	}
118 }
119 
120 /*
121  * After btrfs_copy_from_user(), update the following things for delalloc:
122  * - Mark newly dirtied pages as DELALLOC in the io tree.
123  *   Used to advise which range is to be written back.
124  * - Mark modified pages as Uptodate/Dirty and not needing COW fixup
125  * - Update inode size for past EOF write
126  */
btrfs_dirty_pages(struct btrfs_inode * inode,struct page ** pages,size_t num_pages,loff_t pos,size_t write_bytes,struct extent_state ** cached,bool noreserve)127 int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages,
128 		      size_t num_pages, loff_t pos, size_t write_bytes,
129 		      struct extent_state **cached, bool noreserve)
130 {
131 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
132 	int err = 0;
133 	int i;
134 	u64 num_bytes;
135 	u64 start_pos;
136 	u64 end_of_last_block;
137 	u64 end_pos = pos + write_bytes;
138 	loff_t isize = i_size_read(&inode->vfs_inode);
139 	unsigned int extra_bits = 0;
140 
141 	if (write_bytes == 0)
142 		return 0;
143 
144 	if (noreserve)
145 		extra_bits |= EXTENT_NORESERVE;
146 
147 	start_pos = round_down(pos, fs_info->sectorsize);
148 	num_bytes = round_up(write_bytes + pos - start_pos,
149 			     fs_info->sectorsize);
150 	ASSERT(num_bytes <= U32_MAX);
151 
152 	end_of_last_block = start_pos + num_bytes - 1;
153 
154 	/*
155 	 * The pages may have already been dirty, clear out old accounting so
156 	 * we can set things up properly
157 	 */
158 	clear_extent_bit(&inode->io_tree, start_pos, end_of_last_block,
159 			 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
160 			 cached);
161 
162 	err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
163 					extra_bits, cached);
164 	if (err)
165 		return err;
166 
167 	for (i = 0; i < num_pages; i++) {
168 		struct page *p = pages[i];
169 
170 		btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes);
171 		btrfs_page_clamp_clear_checked(fs_info, p, start_pos, num_bytes);
172 		btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes);
173 	}
174 
175 	/*
176 	 * we've only changed i_size in ram, and we haven't updated
177 	 * the disk i_size.  There is no need to log the inode
178 	 * at this time.
179 	 */
180 	if (end_pos > isize)
181 		i_size_write(&inode->vfs_inode, end_pos);
182 	return 0;
183 }
184 
185 /*
186  * this is very complex, but the basic idea is to drop all extents
187  * in the range start - end.  hint_block is filled in with a block number
188  * that would be a good hint to the block allocator for this file.
189  *
190  * If an extent intersects the range but is not entirely inside the range
191  * it is either truncated or split.  Anything entirely inside the range
192  * is deleted from the tree.
193  *
194  * Note: the VFS' inode number of bytes is not updated, it's up to the caller
195  * to deal with that. We set the field 'bytes_found' of the arguments structure
196  * with the number of allocated bytes found in the target range, so that the
197  * caller can update the inode's number of bytes in an atomic way when
198  * replacing extents in a range to avoid races with stat(2).
199  */
btrfs_drop_extents(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_drop_extents_args * args)200 int btrfs_drop_extents(struct btrfs_trans_handle *trans,
201 		       struct btrfs_root *root, struct btrfs_inode *inode,
202 		       struct btrfs_drop_extents_args *args)
203 {
204 	struct btrfs_fs_info *fs_info = root->fs_info;
205 	struct extent_buffer *leaf;
206 	struct btrfs_file_extent_item *fi;
207 	struct btrfs_ref ref = { 0 };
208 	struct btrfs_key key;
209 	struct btrfs_key new_key;
210 	u64 ino = btrfs_ino(inode);
211 	u64 search_start = args->start;
212 	u64 disk_bytenr = 0;
213 	u64 num_bytes = 0;
214 	u64 extent_offset = 0;
215 	u64 extent_end = 0;
216 	u64 last_end = args->start;
217 	int del_nr = 0;
218 	int del_slot = 0;
219 	int extent_type;
220 	int recow;
221 	int ret;
222 	int modify_tree = -1;
223 	int update_refs;
224 	int found = 0;
225 	struct btrfs_path *path = args->path;
226 
227 	args->bytes_found = 0;
228 	args->extent_inserted = false;
229 
230 	/* Must always have a path if ->replace_extent is true */
231 	ASSERT(!(args->replace_extent && !args->path));
232 
233 	if (!path) {
234 		path = btrfs_alloc_path();
235 		if (!path) {
236 			ret = -ENOMEM;
237 			goto out;
238 		}
239 	}
240 
241 	if (args->drop_cache)
242 		btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false);
243 
244 	if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent)
245 		modify_tree = 0;
246 
247 	update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID);
248 	while (1) {
249 		recow = 0;
250 		ret = btrfs_lookup_file_extent(trans, root, path, ino,
251 					       search_start, modify_tree);
252 		if (ret < 0)
253 			break;
254 		if (ret > 0 && path->slots[0] > 0 && search_start == args->start) {
255 			leaf = path->nodes[0];
256 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
257 			if (key.objectid == ino &&
258 			    key.type == BTRFS_EXTENT_DATA_KEY)
259 				path->slots[0]--;
260 		}
261 		ret = 0;
262 next_slot:
263 		leaf = path->nodes[0];
264 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
265 			BUG_ON(del_nr > 0);
266 			ret = btrfs_next_leaf(root, path);
267 			if (ret < 0)
268 				break;
269 			if (ret > 0) {
270 				ret = 0;
271 				break;
272 			}
273 			leaf = path->nodes[0];
274 			recow = 1;
275 		}
276 
277 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
278 
279 		if (key.objectid > ino)
280 			break;
281 		if (WARN_ON_ONCE(key.objectid < ino) ||
282 		    key.type < BTRFS_EXTENT_DATA_KEY) {
283 			ASSERT(del_nr == 0);
284 			path->slots[0]++;
285 			goto next_slot;
286 		}
287 		if (key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= args->end)
288 			break;
289 
290 		fi = btrfs_item_ptr(leaf, path->slots[0],
291 				    struct btrfs_file_extent_item);
292 		extent_type = btrfs_file_extent_type(leaf, fi);
293 
294 		if (extent_type == BTRFS_FILE_EXTENT_REG ||
295 		    extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
296 			disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
297 			num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
298 			extent_offset = btrfs_file_extent_offset(leaf, fi);
299 			extent_end = key.offset +
300 				btrfs_file_extent_num_bytes(leaf, fi);
301 		} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
302 			extent_end = key.offset +
303 				btrfs_file_extent_ram_bytes(leaf, fi);
304 		} else {
305 			/* can't happen */
306 			BUG();
307 		}
308 
309 		/*
310 		 * Don't skip extent items representing 0 byte lengths. They
311 		 * used to be created (bug) if while punching holes we hit
312 		 * -ENOSPC condition. So if we find one here, just ensure we
313 		 * delete it, otherwise we would insert a new file extent item
314 		 * with the same key (offset) as that 0 bytes length file
315 		 * extent item in the call to setup_items_for_insert() later
316 		 * in this function.
317 		 */
318 		if (extent_end == key.offset && extent_end >= search_start) {
319 			last_end = extent_end;
320 			goto delete_extent_item;
321 		}
322 
323 		if (extent_end <= search_start) {
324 			path->slots[0]++;
325 			goto next_slot;
326 		}
327 
328 		found = 1;
329 		search_start = max(key.offset, args->start);
330 		if (recow || !modify_tree) {
331 			modify_tree = -1;
332 			btrfs_release_path(path);
333 			continue;
334 		}
335 
336 		/*
337 		 *     | - range to drop - |
338 		 *  | -------- extent -------- |
339 		 */
340 		if (args->start > key.offset && args->end < extent_end) {
341 			BUG_ON(del_nr > 0);
342 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
343 				ret = -EOPNOTSUPP;
344 				break;
345 			}
346 
347 			memcpy(&new_key, &key, sizeof(new_key));
348 			new_key.offset = args->start;
349 			ret = btrfs_duplicate_item(trans, root, path,
350 						   &new_key);
351 			if (ret == -EAGAIN) {
352 				btrfs_release_path(path);
353 				continue;
354 			}
355 			if (ret < 0)
356 				break;
357 
358 			leaf = path->nodes[0];
359 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
360 					    struct btrfs_file_extent_item);
361 			btrfs_set_file_extent_num_bytes(leaf, fi,
362 							args->start - key.offset);
363 
364 			fi = btrfs_item_ptr(leaf, path->slots[0],
365 					    struct btrfs_file_extent_item);
366 
367 			extent_offset += args->start - key.offset;
368 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
369 			btrfs_set_file_extent_num_bytes(leaf, fi,
370 							extent_end - args->start);
371 			btrfs_mark_buffer_dirty(trans, leaf);
372 
373 			if (update_refs && disk_bytenr > 0) {
374 				btrfs_init_generic_ref(&ref,
375 						BTRFS_ADD_DELAYED_REF,
376 						disk_bytenr, num_bytes, 0);
377 				btrfs_init_data_ref(&ref,
378 						root->root_key.objectid,
379 						new_key.objectid,
380 						args->start - extent_offset,
381 						0, false);
382 				ret = btrfs_inc_extent_ref(trans, &ref);
383 				if (ret) {
384 					btrfs_abort_transaction(trans, ret);
385 					break;
386 				}
387 			}
388 			key.offset = args->start;
389 		}
390 		/*
391 		 * From here on out we will have actually dropped something, so
392 		 * last_end can be updated.
393 		 */
394 		last_end = extent_end;
395 
396 		/*
397 		 *  | ---- range to drop ----- |
398 		 *      | -------- extent -------- |
399 		 */
400 		if (args->start <= key.offset && args->end < extent_end) {
401 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
402 				ret = -EOPNOTSUPP;
403 				break;
404 			}
405 
406 			memcpy(&new_key, &key, sizeof(new_key));
407 			new_key.offset = args->end;
408 			btrfs_set_item_key_safe(trans, path, &new_key);
409 
410 			extent_offset += args->end - key.offset;
411 			btrfs_set_file_extent_offset(leaf, fi, extent_offset);
412 			btrfs_set_file_extent_num_bytes(leaf, fi,
413 							extent_end - args->end);
414 			btrfs_mark_buffer_dirty(trans, leaf);
415 			if (update_refs && disk_bytenr > 0)
416 				args->bytes_found += args->end - key.offset;
417 			break;
418 		}
419 
420 		search_start = extent_end;
421 		/*
422 		 *       | ---- range to drop ----- |
423 		 *  | -------- extent -------- |
424 		 */
425 		if (args->start > key.offset && args->end >= extent_end) {
426 			BUG_ON(del_nr > 0);
427 			if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
428 				ret = -EOPNOTSUPP;
429 				break;
430 			}
431 
432 			btrfs_set_file_extent_num_bytes(leaf, fi,
433 							args->start - key.offset);
434 			btrfs_mark_buffer_dirty(trans, leaf);
435 			if (update_refs && disk_bytenr > 0)
436 				args->bytes_found += extent_end - args->start;
437 			if (args->end == extent_end)
438 				break;
439 
440 			path->slots[0]++;
441 			goto next_slot;
442 		}
443 
444 		/*
445 		 *  | ---- range to drop ----- |
446 		 *    | ------ extent ------ |
447 		 */
448 		if (args->start <= key.offset && args->end >= extent_end) {
449 delete_extent_item:
450 			if (del_nr == 0) {
451 				del_slot = path->slots[0];
452 				del_nr = 1;
453 			} else {
454 				BUG_ON(del_slot + del_nr != path->slots[0]);
455 				del_nr++;
456 			}
457 
458 			if (update_refs &&
459 			    extent_type == BTRFS_FILE_EXTENT_INLINE) {
460 				args->bytes_found += extent_end - key.offset;
461 				extent_end = ALIGN(extent_end,
462 						   fs_info->sectorsize);
463 			} else if (update_refs && disk_bytenr > 0) {
464 				btrfs_init_generic_ref(&ref,
465 						BTRFS_DROP_DELAYED_REF,
466 						disk_bytenr, num_bytes, 0);
467 				btrfs_init_data_ref(&ref,
468 						root->root_key.objectid,
469 						key.objectid,
470 						key.offset - extent_offset, 0,
471 						false);
472 				ret = btrfs_free_extent(trans, &ref);
473 				if (ret) {
474 					btrfs_abort_transaction(trans, ret);
475 					break;
476 				}
477 				args->bytes_found += extent_end - key.offset;
478 			}
479 
480 			if (args->end == extent_end)
481 				break;
482 
483 			if (path->slots[0] + 1 < btrfs_header_nritems(leaf)) {
484 				path->slots[0]++;
485 				goto next_slot;
486 			}
487 
488 			ret = btrfs_del_items(trans, root, path, del_slot,
489 					      del_nr);
490 			if (ret) {
491 				btrfs_abort_transaction(trans, ret);
492 				break;
493 			}
494 
495 			del_nr = 0;
496 			del_slot = 0;
497 
498 			btrfs_release_path(path);
499 			continue;
500 		}
501 
502 		BUG();
503 	}
504 
505 	if (!ret && del_nr > 0) {
506 		/*
507 		 * Set path->slots[0] to first slot, so that after the delete
508 		 * if items are move off from our leaf to its immediate left or
509 		 * right neighbor leafs, we end up with a correct and adjusted
510 		 * path->slots[0] for our insertion (if args->replace_extent).
511 		 */
512 		path->slots[0] = del_slot;
513 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
514 		if (ret)
515 			btrfs_abort_transaction(trans, ret);
516 	}
517 
518 	leaf = path->nodes[0];
519 	/*
520 	 * If btrfs_del_items() was called, it might have deleted a leaf, in
521 	 * which case it unlocked our path, so check path->locks[0] matches a
522 	 * write lock.
523 	 */
524 	if (!ret && args->replace_extent &&
525 	    path->locks[0] == BTRFS_WRITE_LOCK &&
526 	    btrfs_leaf_free_space(leaf) >=
527 	    sizeof(struct btrfs_item) + args->extent_item_size) {
528 
529 		key.objectid = ino;
530 		key.type = BTRFS_EXTENT_DATA_KEY;
531 		key.offset = args->start;
532 		if (!del_nr && path->slots[0] < btrfs_header_nritems(leaf)) {
533 			struct btrfs_key slot_key;
534 
535 			btrfs_item_key_to_cpu(leaf, &slot_key, path->slots[0]);
536 			if (btrfs_comp_cpu_keys(&key, &slot_key) > 0)
537 				path->slots[0]++;
538 		}
539 		btrfs_setup_item_for_insert(trans, root, path, &key,
540 					    args->extent_item_size);
541 		args->extent_inserted = true;
542 	}
543 
544 	if (!args->path)
545 		btrfs_free_path(path);
546 	else if (!args->extent_inserted)
547 		btrfs_release_path(path);
548 out:
549 	args->drop_end = found ? min(args->end, last_end) : args->end;
550 
551 	return ret;
552 }
553 
extent_mergeable(struct extent_buffer * leaf,int slot,u64 objectid,u64 bytenr,u64 orig_offset,u64 * start,u64 * end)554 static int extent_mergeable(struct extent_buffer *leaf, int slot,
555 			    u64 objectid, u64 bytenr, u64 orig_offset,
556 			    u64 *start, u64 *end)
557 {
558 	struct btrfs_file_extent_item *fi;
559 	struct btrfs_key key;
560 	u64 extent_end;
561 
562 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
563 		return 0;
564 
565 	btrfs_item_key_to_cpu(leaf, &key, slot);
566 	if (key.objectid != objectid || key.type != BTRFS_EXTENT_DATA_KEY)
567 		return 0;
568 
569 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
570 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
571 	    btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
572 	    btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
573 	    btrfs_file_extent_compression(leaf, fi) ||
574 	    btrfs_file_extent_encryption(leaf, fi) ||
575 	    btrfs_file_extent_other_encoding(leaf, fi))
576 		return 0;
577 
578 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
579 	if ((*start && *start != key.offset) || (*end && *end != extent_end))
580 		return 0;
581 
582 	*start = key.offset;
583 	*end = extent_end;
584 	return 1;
585 }
586 
587 /*
588  * Mark extent in the range start - end as written.
589  *
590  * This changes extent type from 'pre-allocated' to 'regular'. If only
591  * part of extent is marked as written, the extent will be split into
592  * two or three.
593  */
btrfs_mark_extent_written(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,u64 start,u64 end)594 int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
595 			      struct btrfs_inode *inode, u64 start, u64 end)
596 {
597 	struct btrfs_root *root = inode->root;
598 	struct extent_buffer *leaf;
599 	struct btrfs_path *path;
600 	struct btrfs_file_extent_item *fi;
601 	struct btrfs_ref ref = { 0 };
602 	struct btrfs_key key;
603 	struct btrfs_key new_key;
604 	u64 bytenr;
605 	u64 num_bytes;
606 	u64 extent_end;
607 	u64 orig_offset;
608 	u64 other_start;
609 	u64 other_end;
610 	u64 split;
611 	int del_nr = 0;
612 	int del_slot = 0;
613 	int recow;
614 	int ret = 0;
615 	u64 ino = btrfs_ino(inode);
616 
617 	path = btrfs_alloc_path();
618 	if (!path)
619 		return -ENOMEM;
620 again:
621 	recow = 0;
622 	split = start;
623 	key.objectid = ino;
624 	key.type = BTRFS_EXTENT_DATA_KEY;
625 	key.offset = split;
626 
627 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
628 	if (ret < 0)
629 		goto out;
630 	if (ret > 0 && path->slots[0] > 0)
631 		path->slots[0]--;
632 
633 	leaf = path->nodes[0];
634 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
635 	if (key.objectid != ino ||
636 	    key.type != BTRFS_EXTENT_DATA_KEY) {
637 		ret = -EINVAL;
638 		btrfs_abort_transaction(trans, ret);
639 		goto out;
640 	}
641 	fi = btrfs_item_ptr(leaf, path->slots[0],
642 			    struct btrfs_file_extent_item);
643 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_PREALLOC) {
644 		ret = -EINVAL;
645 		btrfs_abort_transaction(trans, ret);
646 		goto out;
647 	}
648 	extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
649 	if (key.offset > start || extent_end < end) {
650 		ret = -EINVAL;
651 		btrfs_abort_transaction(trans, ret);
652 		goto out;
653 	}
654 
655 	bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
656 	num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
657 	orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
658 	memcpy(&new_key, &key, sizeof(new_key));
659 
660 	if (start == key.offset && end < extent_end) {
661 		other_start = 0;
662 		other_end = start;
663 		if (extent_mergeable(leaf, path->slots[0] - 1,
664 				     ino, bytenr, orig_offset,
665 				     &other_start, &other_end)) {
666 			new_key.offset = end;
667 			btrfs_set_item_key_safe(trans, path, &new_key);
668 			fi = btrfs_item_ptr(leaf, path->slots[0],
669 					    struct btrfs_file_extent_item);
670 			btrfs_set_file_extent_generation(leaf, fi,
671 							 trans->transid);
672 			btrfs_set_file_extent_num_bytes(leaf, fi,
673 							extent_end - end);
674 			btrfs_set_file_extent_offset(leaf, fi,
675 						     end - orig_offset);
676 			fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
677 					    struct btrfs_file_extent_item);
678 			btrfs_set_file_extent_generation(leaf, fi,
679 							 trans->transid);
680 			btrfs_set_file_extent_num_bytes(leaf, fi,
681 							end - other_start);
682 			btrfs_mark_buffer_dirty(trans, leaf);
683 			goto out;
684 		}
685 	}
686 
687 	if (start > key.offset && end == extent_end) {
688 		other_start = end;
689 		other_end = 0;
690 		if (extent_mergeable(leaf, path->slots[0] + 1,
691 				     ino, bytenr, orig_offset,
692 				     &other_start, &other_end)) {
693 			fi = btrfs_item_ptr(leaf, path->slots[0],
694 					    struct btrfs_file_extent_item);
695 			btrfs_set_file_extent_num_bytes(leaf, fi,
696 							start - key.offset);
697 			btrfs_set_file_extent_generation(leaf, fi,
698 							 trans->transid);
699 			path->slots[0]++;
700 			new_key.offset = start;
701 			btrfs_set_item_key_safe(trans, path, &new_key);
702 
703 			fi = btrfs_item_ptr(leaf, path->slots[0],
704 					    struct btrfs_file_extent_item);
705 			btrfs_set_file_extent_generation(leaf, fi,
706 							 trans->transid);
707 			btrfs_set_file_extent_num_bytes(leaf, fi,
708 							other_end - start);
709 			btrfs_set_file_extent_offset(leaf, fi,
710 						     start - orig_offset);
711 			btrfs_mark_buffer_dirty(trans, leaf);
712 			goto out;
713 		}
714 	}
715 
716 	while (start > key.offset || end < extent_end) {
717 		if (key.offset == start)
718 			split = end;
719 
720 		new_key.offset = split;
721 		ret = btrfs_duplicate_item(trans, root, path, &new_key);
722 		if (ret == -EAGAIN) {
723 			btrfs_release_path(path);
724 			goto again;
725 		}
726 		if (ret < 0) {
727 			btrfs_abort_transaction(trans, ret);
728 			goto out;
729 		}
730 
731 		leaf = path->nodes[0];
732 		fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
733 				    struct btrfs_file_extent_item);
734 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
735 		btrfs_set_file_extent_num_bytes(leaf, fi,
736 						split - key.offset);
737 
738 		fi = btrfs_item_ptr(leaf, path->slots[0],
739 				    struct btrfs_file_extent_item);
740 
741 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
742 		btrfs_set_file_extent_offset(leaf, fi, split - orig_offset);
743 		btrfs_set_file_extent_num_bytes(leaf, fi,
744 						extent_end - split);
745 		btrfs_mark_buffer_dirty(trans, leaf);
746 
747 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
748 				       num_bytes, 0);
749 		btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
750 				    orig_offset, 0, false);
751 		ret = btrfs_inc_extent_ref(trans, &ref);
752 		if (ret) {
753 			btrfs_abort_transaction(trans, ret);
754 			goto out;
755 		}
756 
757 		if (split == start) {
758 			key.offset = start;
759 		} else {
760 			if (start != key.offset) {
761 				ret = -EINVAL;
762 				btrfs_abort_transaction(trans, ret);
763 				goto out;
764 			}
765 			path->slots[0]--;
766 			extent_end = end;
767 		}
768 		recow = 1;
769 	}
770 
771 	other_start = end;
772 	other_end = 0;
773 	btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
774 			       num_bytes, 0);
775 	btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset,
776 			    0, false);
777 	if (extent_mergeable(leaf, path->slots[0] + 1,
778 			     ino, bytenr, orig_offset,
779 			     &other_start, &other_end)) {
780 		if (recow) {
781 			btrfs_release_path(path);
782 			goto again;
783 		}
784 		extent_end = other_end;
785 		del_slot = path->slots[0] + 1;
786 		del_nr++;
787 		ret = btrfs_free_extent(trans, &ref);
788 		if (ret) {
789 			btrfs_abort_transaction(trans, ret);
790 			goto out;
791 		}
792 	}
793 	other_start = 0;
794 	other_end = start;
795 	if (extent_mergeable(leaf, path->slots[0] - 1,
796 			     ino, bytenr, orig_offset,
797 			     &other_start, &other_end)) {
798 		if (recow) {
799 			btrfs_release_path(path);
800 			goto again;
801 		}
802 		key.offset = other_start;
803 		del_slot = path->slots[0];
804 		del_nr++;
805 		ret = btrfs_free_extent(trans, &ref);
806 		if (ret) {
807 			btrfs_abort_transaction(trans, ret);
808 			goto out;
809 		}
810 	}
811 	if (del_nr == 0) {
812 		fi = btrfs_item_ptr(leaf, path->slots[0],
813 			   struct btrfs_file_extent_item);
814 		btrfs_set_file_extent_type(leaf, fi,
815 					   BTRFS_FILE_EXTENT_REG);
816 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
817 		btrfs_mark_buffer_dirty(trans, leaf);
818 	} else {
819 		fi = btrfs_item_ptr(leaf, del_slot - 1,
820 			   struct btrfs_file_extent_item);
821 		btrfs_set_file_extent_type(leaf, fi,
822 					   BTRFS_FILE_EXTENT_REG);
823 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
824 		btrfs_set_file_extent_num_bytes(leaf, fi,
825 						extent_end - key.offset);
826 		btrfs_mark_buffer_dirty(trans, leaf);
827 
828 		ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
829 		if (ret < 0) {
830 			btrfs_abort_transaction(trans, ret);
831 			goto out;
832 		}
833 	}
834 out:
835 	btrfs_free_path(path);
836 	return ret;
837 }
838 
839 /*
840  * on error we return an unlocked page and the error value
841  * on success we return a locked page and 0
842  */
prepare_uptodate_page(struct inode * inode,struct page * page,u64 pos,bool force_uptodate)843 static int prepare_uptodate_page(struct inode *inode,
844 				 struct page *page, u64 pos,
845 				 bool force_uptodate)
846 {
847 	struct folio *folio = page_folio(page);
848 	int ret = 0;
849 
850 	if (((pos & (PAGE_SIZE - 1)) || force_uptodate) &&
851 	    !PageUptodate(page)) {
852 		ret = btrfs_read_folio(NULL, folio);
853 		if (ret)
854 			return ret;
855 		lock_page(page);
856 		if (!PageUptodate(page)) {
857 			unlock_page(page);
858 			return -EIO;
859 		}
860 
861 		/*
862 		 * Since btrfs_read_folio() will unlock the folio before it
863 		 * returns, there is a window where btrfs_release_folio() can be
864 		 * called to release the page.  Here we check both inode
865 		 * mapping and PagePrivate() to make sure the page was not
866 		 * released.
867 		 *
868 		 * The private flag check is essential for subpage as we need
869 		 * to store extra bitmap using page->private.
870 		 */
871 		if (page->mapping != inode->i_mapping || !PagePrivate(page)) {
872 			unlock_page(page);
873 			return -EAGAIN;
874 		}
875 	}
876 	return 0;
877 }
878 
get_prepare_fgp_flags(bool nowait)879 static fgf_t get_prepare_fgp_flags(bool nowait)
880 {
881 	fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
882 
883 	if (nowait)
884 		fgp_flags |= FGP_NOWAIT;
885 
886 	return fgp_flags;
887 }
888 
get_prepare_gfp_flags(struct inode * inode,bool nowait)889 static gfp_t get_prepare_gfp_flags(struct inode *inode, bool nowait)
890 {
891 	gfp_t gfp;
892 
893 	gfp = btrfs_alloc_write_mask(inode->i_mapping);
894 	if (nowait) {
895 		gfp &= ~__GFP_DIRECT_RECLAIM;
896 		gfp |= GFP_NOWAIT;
897 	}
898 
899 	return gfp;
900 }
901 
902 /*
903  * this just gets pages into the page cache and locks them down.
904  */
prepare_pages(struct inode * inode,struct page ** pages,size_t num_pages,loff_t pos,size_t write_bytes,bool force_uptodate,bool nowait)905 static noinline int prepare_pages(struct inode *inode, struct page **pages,
906 				  size_t num_pages, loff_t pos,
907 				  size_t write_bytes, bool force_uptodate,
908 				  bool nowait)
909 {
910 	int i;
911 	unsigned long index = pos >> PAGE_SHIFT;
912 	gfp_t mask = get_prepare_gfp_flags(inode, nowait);
913 	fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
914 	int err = 0;
915 	int faili;
916 
917 	for (i = 0; i < num_pages; i++) {
918 again:
919 		pages[i] = pagecache_get_page(inode->i_mapping, index + i,
920 					      fgp_flags, mask | __GFP_WRITE);
921 		if (!pages[i]) {
922 			faili = i - 1;
923 			if (nowait)
924 				err = -EAGAIN;
925 			else
926 				err = -ENOMEM;
927 			goto fail;
928 		}
929 
930 		err = set_page_extent_mapped(pages[i]);
931 		if (err < 0) {
932 			faili = i;
933 			goto fail;
934 		}
935 
936 		if (i == 0)
937 			err = prepare_uptodate_page(inode, pages[i], pos,
938 						    force_uptodate);
939 		if (!err && i == num_pages - 1)
940 			err = prepare_uptodate_page(inode, pages[i],
941 						    pos + write_bytes, false);
942 		if (err) {
943 			put_page(pages[i]);
944 			if (!nowait && err == -EAGAIN) {
945 				err = 0;
946 				goto again;
947 			}
948 			faili = i - 1;
949 			goto fail;
950 		}
951 		wait_on_page_writeback(pages[i]);
952 	}
953 
954 	return 0;
955 fail:
956 	while (faili >= 0) {
957 		unlock_page(pages[faili]);
958 		put_page(pages[faili]);
959 		faili--;
960 	}
961 	return err;
962 
963 }
964 
965 /*
966  * This function locks the extent and properly waits for data=ordered extents
967  * to finish before allowing the pages to be modified if need.
968  *
969  * The return value:
970  * 1 - the extent is locked
971  * 0 - the extent is not locked, and everything is OK
972  * -EAGAIN - need re-prepare the pages
973  * the other < 0 number - Something wrong happens
974  */
975 static noinline int
lock_and_cleanup_extent_if_need(struct btrfs_inode * inode,struct page ** pages,size_t num_pages,loff_t pos,size_t write_bytes,u64 * lockstart,u64 * lockend,bool nowait,struct extent_state ** cached_state)976 lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
977 				size_t num_pages, loff_t pos,
978 				size_t write_bytes,
979 				u64 *lockstart, u64 *lockend, bool nowait,
980 				struct extent_state **cached_state)
981 {
982 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
983 	u64 start_pos;
984 	u64 last_pos;
985 	int i;
986 	int ret = 0;
987 
988 	start_pos = round_down(pos, fs_info->sectorsize);
989 	last_pos = round_up(pos + write_bytes, fs_info->sectorsize) - 1;
990 
991 	if (start_pos < inode->vfs_inode.i_size) {
992 		struct btrfs_ordered_extent *ordered;
993 
994 		if (nowait) {
995 			if (!try_lock_extent(&inode->io_tree, start_pos, last_pos,
996 					     cached_state)) {
997 				for (i = 0; i < num_pages; i++) {
998 					unlock_page(pages[i]);
999 					put_page(pages[i]);
1000 					pages[i] = NULL;
1001 				}
1002 
1003 				return -EAGAIN;
1004 			}
1005 		} else {
1006 			lock_extent(&inode->io_tree, start_pos, last_pos, cached_state);
1007 		}
1008 
1009 		ordered = btrfs_lookup_ordered_range(inode, start_pos,
1010 						     last_pos - start_pos + 1);
1011 		if (ordered &&
1012 		    ordered->file_offset + ordered->num_bytes > start_pos &&
1013 		    ordered->file_offset <= last_pos) {
1014 			unlock_extent(&inode->io_tree, start_pos, last_pos,
1015 				      cached_state);
1016 			for (i = 0; i < num_pages; i++) {
1017 				unlock_page(pages[i]);
1018 				put_page(pages[i]);
1019 			}
1020 			btrfs_start_ordered_extent(ordered);
1021 			btrfs_put_ordered_extent(ordered);
1022 			return -EAGAIN;
1023 		}
1024 		if (ordered)
1025 			btrfs_put_ordered_extent(ordered);
1026 
1027 		*lockstart = start_pos;
1028 		*lockend = last_pos;
1029 		ret = 1;
1030 	}
1031 
1032 	/*
1033 	 * We should be called after prepare_pages() which should have locked
1034 	 * all pages in the range.
1035 	 */
1036 	for (i = 0; i < num_pages; i++)
1037 		WARN_ON(!PageLocked(pages[i]));
1038 
1039 	return ret;
1040 }
1041 
1042 /*
1043  * Check if we can do nocow write into the range [@pos, @pos + @write_bytes)
1044  *
1045  * @pos:         File offset.
1046  * @write_bytes: The length to write, will be updated to the nocow writeable
1047  *               range.
1048  *
1049  * This function will flush ordered extents in the range to ensure proper
1050  * nocow checks.
1051  *
1052  * Return:
1053  * > 0          If we can nocow, and updates @write_bytes.
1054  *  0           If we can't do a nocow write.
1055  * -EAGAIN      If we can't do a nocow write because snapshoting of the inode's
1056  *              root is in progress.
1057  * < 0          If an error happened.
1058  *
1059  * NOTE: Callers need to call btrfs_check_nocow_unlock() if we return > 0.
1060  */
btrfs_check_nocow_lock(struct btrfs_inode * inode,loff_t pos,size_t * write_bytes,bool nowait)1061 int btrfs_check_nocow_lock(struct btrfs_inode *inode, loff_t pos,
1062 			   size_t *write_bytes, bool nowait)
1063 {
1064 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1065 	struct btrfs_root *root = inode->root;
1066 	struct extent_state *cached_state = NULL;
1067 	u64 lockstart, lockend;
1068 	u64 num_bytes;
1069 	int ret;
1070 
1071 	if (!(inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1072 		return 0;
1073 
1074 	if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
1075 		return -EAGAIN;
1076 
1077 	lockstart = round_down(pos, fs_info->sectorsize);
1078 	lockend = round_up(pos + *write_bytes,
1079 			   fs_info->sectorsize) - 1;
1080 	num_bytes = lockend - lockstart + 1;
1081 
1082 	if (nowait) {
1083 		if (!btrfs_try_lock_ordered_range(inode, lockstart, lockend,
1084 						  &cached_state)) {
1085 			btrfs_drew_write_unlock(&root->snapshot_lock);
1086 			return -EAGAIN;
1087 		}
1088 	} else {
1089 		btrfs_lock_and_flush_ordered_range(inode, lockstart, lockend,
1090 						   &cached_state);
1091 	}
1092 	ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
1093 			NULL, NULL, NULL, nowait, false);
1094 	if (ret <= 0)
1095 		btrfs_drew_write_unlock(&root->snapshot_lock);
1096 	else
1097 		*write_bytes = min_t(size_t, *write_bytes ,
1098 				     num_bytes - pos + lockstart);
1099 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
1100 
1101 	return ret;
1102 }
1103 
btrfs_check_nocow_unlock(struct btrfs_inode * inode)1104 void btrfs_check_nocow_unlock(struct btrfs_inode *inode)
1105 {
1106 	btrfs_drew_write_unlock(&inode->root->snapshot_lock);
1107 }
1108 
update_time_for_write(struct inode * inode)1109 static void update_time_for_write(struct inode *inode)
1110 {
1111 	struct timespec64 now, ctime;
1112 
1113 	if (IS_NOCMTIME(inode))
1114 		return;
1115 
1116 	now = current_time(inode);
1117 	if (!timespec64_equal(&inode->i_mtime, &now))
1118 		inode->i_mtime = now;
1119 
1120 	ctime = inode_get_ctime(inode);
1121 	if (!timespec64_equal(&ctime, &now))
1122 		inode_set_ctime_to_ts(inode, now);
1123 
1124 	if (IS_I_VERSION(inode))
1125 		inode_inc_iversion(inode);
1126 }
1127 
btrfs_write_check(struct kiocb * iocb,struct iov_iter * from,size_t count)1128 static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from,
1129 			     size_t count)
1130 {
1131 	struct file *file = iocb->ki_filp;
1132 	struct inode *inode = file_inode(file);
1133 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1134 	loff_t pos = iocb->ki_pos;
1135 	int ret;
1136 	loff_t oldsize;
1137 
1138 	/*
1139 	 * Quickly bail out on NOWAIT writes if we don't have the nodatacow or
1140 	 * prealloc flags, as without those flags we always have to COW. We will
1141 	 * later check if we can really COW into the target range (using
1142 	 * can_nocow_extent() at btrfs_get_blocks_direct_write()).
1143 	 */
1144 	if ((iocb->ki_flags & IOCB_NOWAIT) &&
1145 	    !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)))
1146 		return -EAGAIN;
1147 
1148 	ret = file_remove_privs(file);
1149 	if (ret)
1150 		return ret;
1151 
1152 	/*
1153 	 * We reserve space for updating the inode when we reserve space for the
1154 	 * extent we are going to write, so we will enospc out there.  We don't
1155 	 * need to start yet another transaction to update the inode as we will
1156 	 * update the inode when we finish writing whatever data we write.
1157 	 */
1158 	update_time_for_write(inode);
1159 
1160 	oldsize = i_size_read(inode);
1161 	if (pos > oldsize) {
1162 		/* Expand hole size to cover write data, preventing empty gap */
1163 		loff_t end_pos = round_up(pos + count, fs_info->sectorsize);
1164 
1165 		ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos);
1166 		if (ret)
1167 			return ret;
1168 	}
1169 
1170 	return 0;
1171 }
1172 
btrfs_buffered_write(struct kiocb * iocb,struct iov_iter * i)1173 static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
1174 					       struct iov_iter *i)
1175 {
1176 	struct file *file = iocb->ki_filp;
1177 	loff_t pos;
1178 	struct inode *inode = file_inode(file);
1179 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1180 	struct page **pages = NULL;
1181 	struct extent_changeset *data_reserved = NULL;
1182 	u64 release_bytes = 0;
1183 	u64 lockstart;
1184 	u64 lockend;
1185 	size_t num_written = 0;
1186 	int nrptrs;
1187 	ssize_t ret;
1188 	bool only_release_metadata = false;
1189 	bool force_page_uptodate = false;
1190 	loff_t old_isize = i_size_read(inode);
1191 	unsigned int ilock_flags = 0;
1192 	const bool nowait = (iocb->ki_flags & IOCB_NOWAIT);
1193 	unsigned int bdp_flags = (nowait ? BDP_ASYNC : 0);
1194 
1195 	if (nowait)
1196 		ilock_flags |= BTRFS_ILOCK_TRY;
1197 
1198 	ret = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1199 	if (ret < 0)
1200 		return ret;
1201 
1202 	ret = generic_write_checks(iocb, i);
1203 	if (ret <= 0)
1204 		goto out;
1205 
1206 	ret = btrfs_write_check(iocb, i, ret);
1207 	if (ret < 0)
1208 		goto out;
1209 
1210 	pos = iocb->ki_pos;
1211 	nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE),
1212 			PAGE_SIZE / (sizeof(struct page *)));
1213 	nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
1214 	nrptrs = max(nrptrs, 8);
1215 	pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL);
1216 	if (!pages) {
1217 		ret = -ENOMEM;
1218 		goto out;
1219 	}
1220 
1221 	while (iov_iter_count(i) > 0) {
1222 		struct extent_state *cached_state = NULL;
1223 		size_t offset = offset_in_page(pos);
1224 		size_t sector_offset;
1225 		size_t write_bytes = min(iov_iter_count(i),
1226 					 nrptrs * (size_t)PAGE_SIZE -
1227 					 offset);
1228 		size_t num_pages;
1229 		size_t reserve_bytes;
1230 		size_t dirty_pages;
1231 		size_t copied;
1232 		size_t dirty_sectors;
1233 		size_t num_sectors;
1234 		int extents_locked;
1235 
1236 		/*
1237 		 * Fault pages before locking them in prepare_pages
1238 		 * to avoid recursive lock
1239 		 */
1240 		if (unlikely(fault_in_iov_iter_readable(i, write_bytes))) {
1241 			ret = -EFAULT;
1242 			break;
1243 		}
1244 
1245 		only_release_metadata = false;
1246 		sector_offset = pos & (fs_info->sectorsize - 1);
1247 
1248 		extent_changeset_release(data_reserved);
1249 		ret = btrfs_check_data_free_space(BTRFS_I(inode),
1250 						  &data_reserved, pos,
1251 						  write_bytes, nowait);
1252 		if (ret < 0) {
1253 			int can_nocow;
1254 
1255 			if (nowait && (ret == -ENOSPC || ret == -EAGAIN)) {
1256 				ret = -EAGAIN;
1257 				break;
1258 			}
1259 
1260 			/*
1261 			 * If we don't have to COW at the offset, reserve
1262 			 * metadata only. write_bytes may get smaller than
1263 			 * requested here.
1264 			 */
1265 			can_nocow = btrfs_check_nocow_lock(BTRFS_I(inode), pos,
1266 							   &write_bytes, nowait);
1267 			if (can_nocow < 0)
1268 				ret = can_nocow;
1269 			if (can_nocow > 0)
1270 				ret = 0;
1271 			if (ret)
1272 				break;
1273 			only_release_metadata = true;
1274 		}
1275 
1276 		num_pages = DIV_ROUND_UP(write_bytes + offset, PAGE_SIZE);
1277 		WARN_ON(num_pages > nrptrs);
1278 		reserve_bytes = round_up(write_bytes + sector_offset,
1279 					 fs_info->sectorsize);
1280 		WARN_ON(reserve_bytes == 0);
1281 		ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
1282 						      reserve_bytes,
1283 						      reserve_bytes, nowait);
1284 		if (ret) {
1285 			if (!only_release_metadata)
1286 				btrfs_free_reserved_data_space(BTRFS_I(inode),
1287 						data_reserved, pos,
1288 						write_bytes);
1289 			else
1290 				btrfs_check_nocow_unlock(BTRFS_I(inode));
1291 
1292 			if (nowait && ret == -ENOSPC)
1293 				ret = -EAGAIN;
1294 			break;
1295 		}
1296 
1297 		release_bytes = reserve_bytes;
1298 again:
1299 		ret = balance_dirty_pages_ratelimited_flags(inode->i_mapping, bdp_flags);
1300 		if (ret) {
1301 			btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1302 			break;
1303 		}
1304 
1305 		/*
1306 		 * This is going to setup the pages array with the number of
1307 		 * pages we want, so we don't really need to worry about the
1308 		 * contents of pages from loop to loop
1309 		 */
1310 		ret = prepare_pages(inode, pages, num_pages,
1311 				    pos, write_bytes, force_page_uptodate, false);
1312 		if (ret) {
1313 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1314 						       reserve_bytes);
1315 			break;
1316 		}
1317 
1318 		extents_locked = lock_and_cleanup_extent_if_need(
1319 				BTRFS_I(inode), pages,
1320 				num_pages, pos, write_bytes, &lockstart,
1321 				&lockend, nowait, &cached_state);
1322 		if (extents_locked < 0) {
1323 			if (!nowait && extents_locked == -EAGAIN)
1324 				goto again;
1325 
1326 			btrfs_delalloc_release_extents(BTRFS_I(inode),
1327 						       reserve_bytes);
1328 			ret = extents_locked;
1329 			break;
1330 		}
1331 
1332 		copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
1333 
1334 		num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
1335 		dirty_sectors = round_up(copied + sector_offset,
1336 					fs_info->sectorsize);
1337 		dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
1338 
1339 		/*
1340 		 * if we have trouble faulting in the pages, fall
1341 		 * back to one page at a time
1342 		 */
1343 		if (copied < write_bytes)
1344 			nrptrs = 1;
1345 
1346 		if (copied == 0) {
1347 			force_page_uptodate = true;
1348 			dirty_sectors = 0;
1349 			dirty_pages = 0;
1350 		} else {
1351 			force_page_uptodate = false;
1352 			dirty_pages = DIV_ROUND_UP(copied + offset,
1353 						   PAGE_SIZE);
1354 		}
1355 
1356 		if (num_sectors > dirty_sectors) {
1357 			/* release everything except the sectors we dirtied */
1358 			release_bytes -= dirty_sectors << fs_info->sectorsize_bits;
1359 			if (only_release_metadata) {
1360 				btrfs_delalloc_release_metadata(BTRFS_I(inode),
1361 							release_bytes, true);
1362 			} else {
1363 				u64 __pos;
1364 
1365 				__pos = round_down(pos,
1366 						   fs_info->sectorsize) +
1367 					(dirty_pages << PAGE_SHIFT);
1368 				btrfs_delalloc_release_space(BTRFS_I(inode),
1369 						data_reserved, __pos,
1370 						release_bytes, true);
1371 			}
1372 		}
1373 
1374 		release_bytes = round_up(copied + sector_offset,
1375 					fs_info->sectorsize);
1376 
1377 		ret = btrfs_dirty_pages(BTRFS_I(inode), pages,
1378 					dirty_pages, pos, copied,
1379 					&cached_state, only_release_metadata);
1380 
1381 		/*
1382 		 * If we have not locked the extent range, because the range's
1383 		 * start offset is >= i_size, we might still have a non-NULL
1384 		 * cached extent state, acquired while marking the extent range
1385 		 * as delalloc through btrfs_dirty_pages(). Therefore free any
1386 		 * possible cached extent state to avoid a memory leak.
1387 		 */
1388 		if (extents_locked)
1389 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
1390 				      lockend, &cached_state);
1391 		else
1392 			free_extent_state(cached_state);
1393 
1394 		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
1395 		if (ret) {
1396 			btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1397 			break;
1398 		}
1399 
1400 		release_bytes = 0;
1401 		if (only_release_metadata)
1402 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1403 
1404 		btrfs_drop_pages(fs_info, pages, num_pages, pos, copied);
1405 
1406 		cond_resched();
1407 
1408 		pos += copied;
1409 		num_written += copied;
1410 	}
1411 
1412 	kfree(pages);
1413 
1414 	if (release_bytes) {
1415 		if (only_release_metadata) {
1416 			btrfs_check_nocow_unlock(BTRFS_I(inode));
1417 			btrfs_delalloc_release_metadata(BTRFS_I(inode),
1418 					release_bytes, true);
1419 		} else {
1420 			btrfs_delalloc_release_space(BTRFS_I(inode),
1421 					data_reserved,
1422 					round_down(pos, fs_info->sectorsize),
1423 					release_bytes, true);
1424 		}
1425 	}
1426 
1427 	extent_changeset_free(data_reserved);
1428 	if (num_written > 0) {
1429 		pagecache_isize_extended(inode, old_isize, iocb->ki_pos);
1430 		iocb->ki_pos += num_written;
1431 	}
1432 out:
1433 	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1434 	return num_written ? num_written : ret;
1435 }
1436 
check_direct_IO(struct btrfs_fs_info * fs_info,const struct iov_iter * iter,loff_t offset)1437 static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
1438 			       const struct iov_iter *iter, loff_t offset)
1439 {
1440 	const u32 blocksize_mask = fs_info->sectorsize - 1;
1441 
1442 	if (offset & blocksize_mask)
1443 		return -EINVAL;
1444 
1445 	if (iov_iter_alignment(iter) & blocksize_mask)
1446 		return -EINVAL;
1447 
1448 	return 0;
1449 }
1450 
btrfs_direct_write(struct kiocb * iocb,struct iov_iter * from)1451 static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
1452 {
1453 	struct file *file = iocb->ki_filp;
1454 	struct inode *inode = file_inode(file);
1455 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1456 	loff_t pos;
1457 	ssize_t written = 0;
1458 	ssize_t written_buffered;
1459 	size_t prev_left = 0;
1460 	loff_t endbyte;
1461 	ssize_t err;
1462 	unsigned int ilock_flags = 0;
1463 	struct iomap_dio *dio;
1464 
1465 	if (iocb->ki_flags & IOCB_NOWAIT)
1466 		ilock_flags |= BTRFS_ILOCK_TRY;
1467 
1468 	/*
1469 	 * If the write DIO is within EOF, use a shared lock and also only if
1470 	 * security bits will likely not be dropped by file_remove_privs() called
1471 	 * from btrfs_write_check(). Either will need to be rechecked after the
1472 	 * lock was acquired.
1473 	 */
1474 	if (iocb->ki_pos + iov_iter_count(from) <= i_size_read(inode) && IS_NOSEC(inode))
1475 		ilock_flags |= BTRFS_ILOCK_SHARED;
1476 
1477 relock:
1478 	err = btrfs_inode_lock(BTRFS_I(inode), ilock_flags);
1479 	if (err < 0)
1480 		return err;
1481 
1482 	/* Shared lock cannot be used with security bits set. */
1483 	if ((ilock_flags & BTRFS_ILOCK_SHARED) && !IS_NOSEC(inode)) {
1484 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1485 		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1486 		goto relock;
1487 	}
1488 
1489 	err = generic_write_checks(iocb, from);
1490 	if (err <= 0) {
1491 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1492 		return err;
1493 	}
1494 
1495 	err = btrfs_write_check(iocb, from, err);
1496 	if (err < 0) {
1497 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1498 		goto out;
1499 	}
1500 
1501 	pos = iocb->ki_pos;
1502 	/*
1503 	 * Re-check since file size may have changed just before taking the
1504 	 * lock or pos may have changed because of O_APPEND in generic_write_check()
1505 	 */
1506 	if ((ilock_flags & BTRFS_ILOCK_SHARED) &&
1507 	    pos + iov_iter_count(from) > i_size_read(inode)) {
1508 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1509 		ilock_flags &= ~BTRFS_ILOCK_SHARED;
1510 		goto relock;
1511 	}
1512 
1513 	if (check_direct_IO(fs_info, from, pos)) {
1514 		btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1515 		goto buffered;
1516 	}
1517 
1518 	/*
1519 	 * The iov_iter can be mapped to the same file range we are writing to.
1520 	 * If that's the case, then we will deadlock in the iomap code, because
1521 	 * it first calls our callback btrfs_dio_iomap_begin(), which will create
1522 	 * an ordered extent, and after that it will fault in the pages that the
1523 	 * iov_iter refers to. During the fault in we end up in the readahead
1524 	 * pages code (starting at btrfs_readahead()), which will lock the range,
1525 	 * find that ordered extent and then wait for it to complete (at
1526 	 * btrfs_lock_and_flush_ordered_range()), resulting in a deadlock since
1527 	 * obviously the ordered extent can never complete as we didn't submit
1528 	 * yet the respective bio(s). This always happens when the buffer is
1529 	 * memory mapped to the same file range, since the iomap DIO code always
1530 	 * invalidates pages in the target file range (after starting and waiting
1531 	 * for any writeback).
1532 	 *
1533 	 * So here we disable page faults in the iov_iter and then retry if we
1534 	 * got -EFAULT, faulting in the pages before the retry.
1535 	 */
1536 again:
1537 	from->nofault = true;
1538 	dio = btrfs_dio_write(iocb, from, written);
1539 	from->nofault = false;
1540 
1541 	if (IS_ERR_OR_NULL(dio)) {
1542 		err = PTR_ERR_OR_ZERO(dio);
1543 	} else {
1544 		/*
1545 		 * If we have a synchoronous write, we must make sure the fsync
1546 		 * triggered by the iomap_dio_complete() call below doesn't
1547 		 * deadlock on the inode lock - we are already holding it and we
1548 		 * can't call it after unlocking because we may need to complete
1549 		 * partial writes due to the input buffer (or parts of it) not
1550 		 * being already faulted in.
1551 		 */
1552 		ASSERT(current->journal_info == NULL);
1553 		current->journal_info = BTRFS_TRANS_DIO_WRITE_STUB;
1554 		err = iomap_dio_complete(dio);
1555 		current->journal_info = NULL;
1556 	}
1557 
1558 	/* No increment (+=) because iomap returns a cumulative value. */
1559 	if (err > 0)
1560 		written = err;
1561 
1562 	if (iov_iter_count(from) > 0 && (err == -EFAULT || err > 0)) {
1563 		const size_t left = iov_iter_count(from);
1564 		/*
1565 		 * We have more data left to write. Try to fault in as many as
1566 		 * possible of the remainder pages and retry. We do this without
1567 		 * releasing and locking again the inode, to prevent races with
1568 		 * truncate.
1569 		 *
1570 		 * Also, in case the iov refers to pages in the file range of the
1571 		 * file we want to write to (due to a mmap), we could enter an
1572 		 * infinite loop if we retry after faulting the pages in, since
1573 		 * iomap will invalidate any pages in the range early on, before
1574 		 * it tries to fault in the pages of the iov. So we keep track of
1575 		 * how much was left of iov in the previous EFAULT and fallback
1576 		 * to buffered IO in case we haven't made any progress.
1577 		 */
1578 		if (left == prev_left) {
1579 			err = -ENOTBLK;
1580 		} else {
1581 			fault_in_iov_iter_readable(from, left);
1582 			prev_left = left;
1583 			goto again;
1584 		}
1585 	}
1586 
1587 	btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
1588 
1589 	/*
1590 	 * If 'err' is -ENOTBLK or we have not written all data, then it means
1591 	 * we must fallback to buffered IO.
1592 	 */
1593 	if ((err < 0 && err != -ENOTBLK) || !iov_iter_count(from))
1594 		goto out;
1595 
1596 buffered:
1597 	/*
1598 	 * If we are in a NOWAIT context, then return -EAGAIN to signal the caller
1599 	 * it must retry the operation in a context where blocking is acceptable,
1600 	 * because even if we end up not blocking during the buffered IO attempt
1601 	 * below, we will block when flushing and waiting for the IO.
1602 	 */
1603 	if (iocb->ki_flags & IOCB_NOWAIT) {
1604 		err = -EAGAIN;
1605 		goto out;
1606 	}
1607 
1608 	pos = iocb->ki_pos;
1609 	written_buffered = btrfs_buffered_write(iocb, from);
1610 	if (written_buffered < 0) {
1611 		err = written_buffered;
1612 		goto out;
1613 	}
1614 	/*
1615 	 * Ensure all data is persisted. We want the next direct IO read to be
1616 	 * able to read what was just written.
1617 	 */
1618 	endbyte = pos + written_buffered - 1;
1619 	err = btrfs_fdatawrite_range(inode, pos, endbyte);
1620 	if (err)
1621 		goto out;
1622 	err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1623 	if (err)
1624 		goto out;
1625 	written += written_buffered;
1626 	iocb->ki_pos = pos + written_buffered;
1627 	invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT,
1628 				 endbyte >> PAGE_SHIFT);
1629 out:
1630 	return err < 0 ? err : written;
1631 }
1632 
btrfs_encoded_write(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)1633 static ssize_t btrfs_encoded_write(struct kiocb *iocb, struct iov_iter *from,
1634 			const struct btrfs_ioctl_encoded_io_args *encoded)
1635 {
1636 	struct file *file = iocb->ki_filp;
1637 	struct inode *inode = file_inode(file);
1638 	loff_t count;
1639 	ssize_t ret;
1640 
1641 	btrfs_inode_lock(BTRFS_I(inode), 0);
1642 	count = encoded->len;
1643 	ret = generic_write_checks_count(iocb, &count);
1644 	if (ret == 0 && count != encoded->len) {
1645 		/*
1646 		 * The write got truncated by generic_write_checks_count(). We
1647 		 * can't do a partial encoded write.
1648 		 */
1649 		ret = -EFBIG;
1650 	}
1651 	if (ret || encoded->len == 0)
1652 		goto out;
1653 
1654 	ret = btrfs_write_check(iocb, from, encoded->len);
1655 	if (ret < 0)
1656 		goto out;
1657 
1658 	ret = btrfs_do_encoded_write(iocb, from, encoded);
1659 out:
1660 	btrfs_inode_unlock(BTRFS_I(inode), 0);
1661 	return ret;
1662 }
1663 
btrfs_do_write_iter(struct kiocb * iocb,struct iov_iter * from,const struct btrfs_ioctl_encoded_io_args * encoded)1664 ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from,
1665 			    const struct btrfs_ioctl_encoded_io_args *encoded)
1666 {
1667 	struct file *file = iocb->ki_filp;
1668 	struct btrfs_inode *inode = BTRFS_I(file_inode(file));
1669 	ssize_t num_written, num_sync;
1670 
1671 	/*
1672 	 * If the fs flips readonly due to some impossible error, although we
1673 	 * have opened a file as writable, we have to stop this write operation
1674 	 * to ensure consistency.
1675 	 */
1676 	if (BTRFS_FS_ERROR(inode->root->fs_info))
1677 		return -EROFS;
1678 
1679 	if (encoded && (iocb->ki_flags & IOCB_NOWAIT))
1680 		return -EOPNOTSUPP;
1681 
1682 	if (encoded) {
1683 		num_written = btrfs_encoded_write(iocb, from, encoded);
1684 		num_sync = encoded->len;
1685 	} else if (iocb->ki_flags & IOCB_DIRECT) {
1686 		num_written = btrfs_direct_write(iocb, from);
1687 		num_sync = num_written;
1688 	} else {
1689 		num_written = btrfs_buffered_write(iocb, from);
1690 		num_sync = num_written;
1691 	}
1692 
1693 	btrfs_set_inode_last_sub_trans(inode);
1694 
1695 	if (num_sync > 0) {
1696 		num_sync = generic_write_sync(iocb, num_sync);
1697 		if (num_sync < 0)
1698 			num_written = num_sync;
1699 	}
1700 
1701 	return num_written;
1702 }
1703 
btrfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)1704 static ssize_t btrfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1705 {
1706 	return btrfs_do_write_iter(iocb, from, NULL);
1707 }
1708 
btrfs_release_file(struct inode * inode,struct file * filp)1709 int btrfs_release_file(struct inode *inode, struct file *filp)
1710 {
1711 	struct btrfs_file_private *private = filp->private_data;
1712 
1713 	if (private) {
1714 		kfree(private->filldir_buf);
1715 		free_extent_state(private->llseek_cached_state);
1716 		kfree(private);
1717 		filp->private_data = NULL;
1718 	}
1719 
1720 	/*
1721 	 * Set by setattr when we are about to truncate a file from a non-zero
1722 	 * size to a zero size.  This tries to flush down new bytes that may
1723 	 * have been written if the application were using truncate to replace
1724 	 * a file in place.
1725 	 */
1726 	if (test_and_clear_bit(BTRFS_INODE_FLUSH_ON_CLOSE,
1727 			       &BTRFS_I(inode)->runtime_flags))
1728 			filemap_flush(inode->i_mapping);
1729 	return 0;
1730 }
1731 
start_ordered_ops(struct inode * inode,loff_t start,loff_t end)1732 static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1733 {
1734 	int ret;
1735 	struct blk_plug plug;
1736 
1737 	/*
1738 	 * This is only called in fsync, which would do synchronous writes, so
1739 	 * a plug can merge adjacent IOs as much as possible.  Esp. in case of
1740 	 * multiple disks using raid profile, a large IO can be split to
1741 	 * several segments of stripe length (currently 64K).
1742 	 */
1743 	blk_start_plug(&plug);
1744 	ret = btrfs_fdatawrite_range(inode, start, end);
1745 	blk_finish_plug(&plug);
1746 
1747 	return ret;
1748 }
1749 
skip_inode_logging(const struct btrfs_log_ctx * ctx)1750 static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
1751 {
1752 	struct btrfs_inode *inode = BTRFS_I(ctx->inode);
1753 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
1754 
1755 	if (btrfs_inode_in_log(inode, fs_info->generation) &&
1756 	    list_empty(&ctx->ordered_extents))
1757 		return true;
1758 
1759 	/*
1760 	 * If we are doing a fast fsync we can not bail out if the inode's
1761 	 * last_trans is <= then the last committed transaction, because we only
1762 	 * update the last_trans of the inode during ordered extent completion,
1763 	 * and for a fast fsync we don't wait for that, we only wait for the
1764 	 * writeback to complete.
1765 	 */
1766 	if (inode->last_trans <= fs_info->last_trans_committed &&
1767 	    (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) ||
1768 	     list_empty(&ctx->ordered_extents)))
1769 		return true;
1770 
1771 	return false;
1772 }
1773 
1774 /*
1775  * fsync call for both files and directories.  This logs the inode into
1776  * the tree log instead of forcing full commits whenever possible.
1777  *
1778  * It needs to call filemap_fdatawait so that all ordered extent updates are
1779  * in the metadata btree are up to date for copying to the log.
1780  *
1781  * It drops the inode mutex before doing the tree log commit.  This is an
1782  * important optimization for directories because holding the mutex prevents
1783  * new operations on the dir while we write to disk.
1784  */
btrfs_sync_file(struct file * file,loff_t start,loff_t end,int datasync)1785 int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1786 {
1787 	struct dentry *dentry = file_dentry(file);
1788 	struct inode *inode = d_inode(dentry);
1789 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
1790 	struct btrfs_root *root = BTRFS_I(inode)->root;
1791 	struct btrfs_trans_handle *trans;
1792 	struct btrfs_log_ctx ctx;
1793 	int ret = 0, err;
1794 	u64 len;
1795 	bool full_sync;
1796 	bool skip_ilock = false;
1797 
1798 	if (current->journal_info == BTRFS_TRANS_DIO_WRITE_STUB) {
1799 		skip_ilock = true;
1800 		current->journal_info = NULL;
1801 		lockdep_assert_held(&inode->i_rwsem);
1802 	}
1803 
1804 	trace_btrfs_sync_file(file, datasync);
1805 
1806 	btrfs_init_log_ctx(&ctx, inode);
1807 
1808 	/*
1809 	 * Always set the range to a full range, otherwise we can get into
1810 	 * several problems, from missing file extent items to represent holes
1811 	 * when not using the NO_HOLES feature, to log tree corruption due to
1812 	 * races between hole detection during logging and completion of ordered
1813 	 * extents outside the range, to missing checksums due to ordered extents
1814 	 * for which we flushed only a subset of their pages.
1815 	 */
1816 	start = 0;
1817 	end = LLONG_MAX;
1818 	len = (u64)LLONG_MAX + 1;
1819 
1820 	/*
1821 	 * We write the dirty pages in the range and wait until they complete
1822 	 * out of the ->i_mutex. If so, we can flush the dirty pages by
1823 	 * multi-task, and make the performance up.  See
1824 	 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1825 	 */
1826 	ret = start_ordered_ops(inode, start, end);
1827 	if (ret)
1828 		goto out;
1829 
1830 	if (skip_ilock)
1831 		down_write(&BTRFS_I(inode)->i_mmap_lock);
1832 	else
1833 		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1834 
1835 	atomic_inc(&root->log_batch);
1836 
1837 	/*
1838 	 * Before we acquired the inode's lock and the mmap lock, someone may
1839 	 * have dirtied more pages in the target range. We need to make sure
1840 	 * that writeback for any such pages does not start while we are logging
1841 	 * the inode, because if it does, any of the following might happen when
1842 	 * we are not doing a full inode sync:
1843 	 *
1844 	 * 1) We log an extent after its writeback finishes but before its
1845 	 *    checksums are added to the csum tree, leading to -EIO errors
1846 	 *    when attempting to read the extent after a log replay.
1847 	 *
1848 	 * 2) We can end up logging an extent before its writeback finishes.
1849 	 *    Therefore after the log replay we will have a file extent item
1850 	 *    pointing to an unwritten extent (and no data checksums as well).
1851 	 *
1852 	 * So trigger writeback for any eventual new dirty pages and then we
1853 	 * wait for all ordered extents to complete below.
1854 	 */
1855 	ret = start_ordered_ops(inode, start, end);
1856 	if (ret) {
1857 		if (skip_ilock)
1858 			up_write(&BTRFS_I(inode)->i_mmap_lock);
1859 		else
1860 			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1861 		goto out;
1862 	}
1863 
1864 	/*
1865 	 * Always check for the full sync flag while holding the inode's lock,
1866 	 * to avoid races with other tasks. The flag must be either set all the
1867 	 * time during logging or always off all the time while logging.
1868 	 * We check the flag here after starting delalloc above, because when
1869 	 * running delalloc the full sync flag may be set if we need to drop
1870 	 * extra extent map ranges due to temporary memory allocation failures.
1871 	 */
1872 	full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1873 			     &BTRFS_I(inode)->runtime_flags);
1874 
1875 	/*
1876 	 * We have to do this here to avoid the priority inversion of waiting on
1877 	 * IO of a lower priority task while holding a transaction open.
1878 	 *
1879 	 * For a full fsync we wait for the ordered extents to complete while
1880 	 * for a fast fsync we wait just for writeback to complete, and then
1881 	 * attach the ordered extents to the transaction so that a transaction
1882 	 * commit waits for their completion, to avoid data loss if we fsync,
1883 	 * the current transaction commits before the ordered extents complete
1884 	 * and a power failure happens right after that.
1885 	 *
1886 	 * For zoned filesystem, if a write IO uses a ZONE_APPEND command, the
1887 	 * logical address recorded in the ordered extent may change. We need
1888 	 * to wait for the IO to stabilize the logical address.
1889 	 */
1890 	if (full_sync || btrfs_is_zoned(fs_info)) {
1891 		ret = btrfs_wait_ordered_range(inode, start, len);
1892 	} else {
1893 		/*
1894 		 * Get our ordered extents as soon as possible to avoid doing
1895 		 * checksum lookups in the csum tree, and use instead the
1896 		 * checksums attached to the ordered extents.
1897 		 */
1898 		btrfs_get_ordered_extents_for_logging(BTRFS_I(inode),
1899 						      &ctx.ordered_extents);
1900 		ret = filemap_fdatawait_range(inode->i_mapping, start, end);
1901 	}
1902 
1903 	if (ret)
1904 		goto out_release_extents;
1905 
1906 	atomic_inc(&root->log_batch);
1907 
1908 	smp_mb();
1909 	if (skip_inode_logging(&ctx)) {
1910 		/*
1911 		 * We've had everything committed since the last time we were
1912 		 * modified so clear this flag in case it was set for whatever
1913 		 * reason, it's no longer relevant.
1914 		 */
1915 		clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1916 			  &BTRFS_I(inode)->runtime_flags);
1917 		/*
1918 		 * An ordered extent might have started before and completed
1919 		 * already with io errors, in which case the inode was not
1920 		 * updated and we end up here. So check the inode's mapping
1921 		 * for any errors that might have happened since we last
1922 		 * checked called fsync.
1923 		 */
1924 		ret = filemap_check_wb_err(inode->i_mapping, file->f_wb_err);
1925 		goto out_release_extents;
1926 	}
1927 
1928 	/*
1929 	 * We use start here because we will need to wait on the IO to complete
1930 	 * in btrfs_sync_log, which could require joining a transaction (for
1931 	 * example checking cross references in the nocow path).  If we use join
1932 	 * here we could get into a situation where we're waiting on IO to
1933 	 * happen that is blocked on a transaction trying to commit.  With start
1934 	 * we inc the extwriter counter, so we wait for all extwriters to exit
1935 	 * before we start blocking joiners.  This comment is to keep somebody
1936 	 * from thinking they are super smart and changing this to
1937 	 * btrfs_join_transaction *cough*Josef*cough*.
1938 	 */
1939 	trans = btrfs_start_transaction(root, 0);
1940 	if (IS_ERR(trans)) {
1941 		ret = PTR_ERR(trans);
1942 		goto out_release_extents;
1943 	}
1944 	trans->in_fsync = true;
1945 
1946 	ret = btrfs_log_dentry_safe(trans, dentry, &ctx);
1947 	btrfs_release_log_ctx_extents(&ctx);
1948 	if (ret < 0) {
1949 		/* Fallthrough and commit/free transaction. */
1950 		ret = BTRFS_LOG_FORCE_COMMIT;
1951 	}
1952 
1953 	/* we've logged all the items and now have a consistent
1954 	 * version of the file in the log.  It is possible that
1955 	 * someone will come in and modify the file, but that's
1956 	 * fine because the log is consistent on disk, and we
1957 	 * have references to all of the file's extents
1958 	 *
1959 	 * It is possible that someone will come in and log the
1960 	 * file again, but that will end up using the synchronization
1961 	 * inside btrfs_sync_log to keep things safe.
1962 	 */
1963 	if (skip_ilock)
1964 		up_write(&BTRFS_I(inode)->i_mmap_lock);
1965 	else
1966 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
1967 
1968 	if (ret == BTRFS_NO_LOG_SYNC) {
1969 		ret = btrfs_end_transaction(trans);
1970 		goto out;
1971 	}
1972 
1973 	/* We successfully logged the inode, attempt to sync the log. */
1974 	if (!ret) {
1975 		ret = btrfs_sync_log(trans, root, &ctx);
1976 		if (!ret) {
1977 			ret = btrfs_end_transaction(trans);
1978 			goto out;
1979 		}
1980 	}
1981 
1982 	/*
1983 	 * At this point we need to commit the transaction because we had
1984 	 * btrfs_need_log_full_commit() or some other error.
1985 	 *
1986 	 * If we didn't do a full sync we have to stop the trans handle, wait on
1987 	 * the ordered extents, start it again and commit the transaction.  If
1988 	 * we attempt to wait on the ordered extents here we could deadlock with
1989 	 * something like fallocate() that is holding the extent lock trying to
1990 	 * start a transaction while some other thread is trying to commit the
1991 	 * transaction while we (fsync) are currently holding the transaction
1992 	 * open.
1993 	 */
1994 	if (!full_sync) {
1995 		ret = btrfs_end_transaction(trans);
1996 		if (ret)
1997 			goto out;
1998 		ret = btrfs_wait_ordered_range(inode, start, len);
1999 		if (ret)
2000 			goto out;
2001 
2002 		/*
2003 		 * This is safe to use here because we're only interested in
2004 		 * making sure the transaction that had the ordered extents is
2005 		 * committed.  We aren't waiting on anything past this point,
2006 		 * we're purely getting the transaction and committing it.
2007 		 */
2008 		trans = btrfs_attach_transaction_barrier(root);
2009 		if (IS_ERR(trans)) {
2010 			ret = PTR_ERR(trans);
2011 
2012 			/*
2013 			 * We committed the transaction and there's no currently
2014 			 * running transaction, this means everything we care
2015 			 * about made it to disk and we are done.
2016 			 */
2017 			if (ret == -ENOENT)
2018 				ret = 0;
2019 			goto out;
2020 		}
2021 	}
2022 
2023 	ret = btrfs_commit_transaction(trans);
2024 out:
2025 	ASSERT(list_empty(&ctx.list));
2026 	ASSERT(list_empty(&ctx.conflict_inodes));
2027 	err = file_check_and_advance_wb_err(file);
2028 	if (!ret)
2029 		ret = err;
2030 	return ret > 0 ? -EIO : ret;
2031 
2032 out_release_extents:
2033 	btrfs_release_log_ctx_extents(&ctx);
2034 	if (skip_ilock)
2035 		up_write(&BTRFS_I(inode)->i_mmap_lock);
2036 	else
2037 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2038 	goto out;
2039 }
2040 
2041 static const struct vm_operations_struct btrfs_file_vm_ops = {
2042 	.fault		= filemap_fault,
2043 	.map_pages	= filemap_map_pages,
2044 	.page_mkwrite	= btrfs_page_mkwrite,
2045 };
2046 
btrfs_file_mmap(struct file * filp,struct vm_area_struct * vma)2047 static int btrfs_file_mmap(struct file	*filp, struct vm_area_struct *vma)
2048 {
2049 	struct address_space *mapping = filp->f_mapping;
2050 
2051 	if (!mapping->a_ops->read_folio)
2052 		return -ENOEXEC;
2053 
2054 	file_accessed(filp);
2055 	vma->vm_ops = &btrfs_file_vm_ops;
2056 
2057 	return 0;
2058 }
2059 
hole_mergeable(struct btrfs_inode * inode,struct extent_buffer * leaf,int slot,u64 start,u64 end)2060 static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf,
2061 			  int slot, u64 start, u64 end)
2062 {
2063 	struct btrfs_file_extent_item *fi;
2064 	struct btrfs_key key;
2065 
2066 	if (slot < 0 || slot >= btrfs_header_nritems(leaf))
2067 		return 0;
2068 
2069 	btrfs_item_key_to_cpu(leaf, &key, slot);
2070 	if (key.objectid != btrfs_ino(inode) ||
2071 	    key.type != BTRFS_EXTENT_DATA_KEY)
2072 		return 0;
2073 
2074 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2075 
2076 	if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
2077 		return 0;
2078 
2079 	if (btrfs_file_extent_disk_bytenr(leaf, fi))
2080 		return 0;
2081 
2082 	if (key.offset == end)
2083 		return 1;
2084 	if (key.offset + btrfs_file_extent_num_bytes(leaf, fi) == start)
2085 		return 1;
2086 	return 0;
2087 }
2088 
fill_holes(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path,u64 offset,u64 end)2089 static int fill_holes(struct btrfs_trans_handle *trans,
2090 		struct btrfs_inode *inode,
2091 		struct btrfs_path *path, u64 offset, u64 end)
2092 {
2093 	struct btrfs_fs_info *fs_info = trans->fs_info;
2094 	struct btrfs_root *root = inode->root;
2095 	struct extent_buffer *leaf;
2096 	struct btrfs_file_extent_item *fi;
2097 	struct extent_map *hole_em;
2098 	struct btrfs_key key;
2099 	int ret;
2100 
2101 	if (btrfs_fs_incompat(fs_info, NO_HOLES))
2102 		goto out;
2103 
2104 	key.objectid = btrfs_ino(inode);
2105 	key.type = BTRFS_EXTENT_DATA_KEY;
2106 	key.offset = offset;
2107 
2108 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2109 	if (ret <= 0) {
2110 		/*
2111 		 * We should have dropped this offset, so if we find it then
2112 		 * something has gone horribly wrong.
2113 		 */
2114 		if (ret == 0)
2115 			ret = -EINVAL;
2116 		return ret;
2117 	}
2118 
2119 	leaf = path->nodes[0];
2120 	if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) {
2121 		u64 num_bytes;
2122 
2123 		path->slots[0]--;
2124 		fi = btrfs_item_ptr(leaf, path->slots[0],
2125 				    struct btrfs_file_extent_item);
2126 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) +
2127 			end - offset;
2128 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2129 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2130 		btrfs_set_file_extent_offset(leaf, fi, 0);
2131 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2132 		btrfs_mark_buffer_dirty(trans, leaf);
2133 		goto out;
2134 	}
2135 
2136 	if (hole_mergeable(inode, leaf, path->slots[0], offset, end)) {
2137 		u64 num_bytes;
2138 
2139 		key.offset = offset;
2140 		btrfs_set_item_key_safe(trans, path, &key);
2141 		fi = btrfs_item_ptr(leaf, path->slots[0],
2142 				    struct btrfs_file_extent_item);
2143 		num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
2144 			offset;
2145 		btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
2146 		btrfs_set_file_extent_ram_bytes(leaf, fi, num_bytes);
2147 		btrfs_set_file_extent_offset(leaf, fi, 0);
2148 		btrfs_set_file_extent_generation(leaf, fi, trans->transid);
2149 		btrfs_mark_buffer_dirty(trans, leaf);
2150 		goto out;
2151 	}
2152 	btrfs_release_path(path);
2153 
2154 	ret = btrfs_insert_hole_extent(trans, root, btrfs_ino(inode), offset,
2155 				       end - offset);
2156 	if (ret)
2157 		return ret;
2158 
2159 out:
2160 	btrfs_release_path(path);
2161 
2162 	hole_em = alloc_extent_map();
2163 	if (!hole_em) {
2164 		btrfs_drop_extent_map_range(inode, offset, end - 1, false);
2165 		btrfs_set_inode_full_sync(inode);
2166 	} else {
2167 		hole_em->start = offset;
2168 		hole_em->len = end - offset;
2169 		hole_em->ram_bytes = hole_em->len;
2170 		hole_em->orig_start = offset;
2171 
2172 		hole_em->block_start = EXTENT_MAP_HOLE;
2173 		hole_em->block_len = 0;
2174 		hole_em->orig_block_len = 0;
2175 		hole_em->compress_type = BTRFS_COMPRESS_NONE;
2176 		hole_em->generation = trans->transid;
2177 
2178 		ret = btrfs_replace_extent_map_range(inode, hole_em, true);
2179 		free_extent_map(hole_em);
2180 		if (ret)
2181 			btrfs_set_inode_full_sync(inode);
2182 	}
2183 
2184 	return 0;
2185 }
2186 
2187 /*
2188  * Find a hole extent on given inode and change start/len to the end of hole
2189  * extent.(hole/vacuum extent whose em->start <= start &&
2190  *	   em->start + em->len > start)
2191  * When a hole extent is found, return 1 and modify start/len.
2192  */
find_first_non_hole(struct btrfs_inode * inode,u64 * start,u64 * len)2193 static int find_first_non_hole(struct btrfs_inode *inode, u64 *start, u64 *len)
2194 {
2195 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
2196 	struct extent_map *em;
2197 	int ret = 0;
2198 
2199 	em = btrfs_get_extent(inode, NULL, 0,
2200 			      round_down(*start, fs_info->sectorsize),
2201 			      round_up(*len, fs_info->sectorsize));
2202 	if (IS_ERR(em))
2203 		return PTR_ERR(em);
2204 
2205 	/* Hole or vacuum extent(only exists in no-hole mode) */
2206 	if (em->block_start == EXTENT_MAP_HOLE) {
2207 		ret = 1;
2208 		*len = em->start + em->len > *start + *len ?
2209 		       0 : *start + *len - em->start - em->len;
2210 		*start = em->start + em->len;
2211 	}
2212 	free_extent_map(em);
2213 	return ret;
2214 }
2215 
btrfs_punch_hole_lock_range(struct inode * inode,const u64 lockstart,const u64 lockend,struct extent_state ** cached_state)2216 static void btrfs_punch_hole_lock_range(struct inode *inode,
2217 					const u64 lockstart,
2218 					const u64 lockend,
2219 					struct extent_state **cached_state)
2220 {
2221 	/*
2222 	 * For subpage case, if the range is not at page boundary, we could
2223 	 * have pages at the leading/tailing part of the range.
2224 	 * This could lead to dead loop since filemap_range_has_page()
2225 	 * will always return true.
2226 	 * So here we need to do extra page alignment for
2227 	 * filemap_range_has_page().
2228 	 */
2229 	const u64 page_lockstart = round_up(lockstart, PAGE_SIZE);
2230 	const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1;
2231 
2232 	while (1) {
2233 		truncate_pagecache_range(inode, lockstart, lockend);
2234 
2235 		lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2236 			    cached_state);
2237 		/*
2238 		 * We can't have ordered extents in the range, nor dirty/writeback
2239 		 * pages, because we have locked the inode's VFS lock in exclusive
2240 		 * mode, we have locked the inode's i_mmap_lock in exclusive mode,
2241 		 * we have flushed all delalloc in the range and we have waited
2242 		 * for any ordered extents in the range to complete.
2243 		 * We can race with anyone reading pages from this range, so after
2244 		 * locking the range check if we have pages in the range, and if
2245 		 * we do, unlock the range and retry.
2246 		 */
2247 		if (!filemap_range_has_page(inode->i_mapping, page_lockstart,
2248 					    page_lockend))
2249 			break;
2250 
2251 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2252 			      cached_state);
2253 	}
2254 
2255 	btrfs_assert_inode_range_clean(BTRFS_I(inode), lockstart, lockend);
2256 }
2257 
btrfs_insert_replace_extent(struct btrfs_trans_handle * trans,struct btrfs_inode * inode,struct btrfs_path * path,struct btrfs_replace_extent_info * extent_info,const u64 replace_len,const u64 bytes_to_drop)2258 static int btrfs_insert_replace_extent(struct btrfs_trans_handle *trans,
2259 				     struct btrfs_inode *inode,
2260 				     struct btrfs_path *path,
2261 				     struct btrfs_replace_extent_info *extent_info,
2262 				     const u64 replace_len,
2263 				     const u64 bytes_to_drop)
2264 {
2265 	struct btrfs_fs_info *fs_info = trans->fs_info;
2266 	struct btrfs_root *root = inode->root;
2267 	struct btrfs_file_extent_item *extent;
2268 	struct extent_buffer *leaf;
2269 	struct btrfs_key key;
2270 	int slot;
2271 	struct btrfs_ref ref = { 0 };
2272 	int ret;
2273 
2274 	if (replace_len == 0)
2275 		return 0;
2276 
2277 	if (extent_info->disk_offset == 0 &&
2278 	    btrfs_fs_incompat(fs_info, NO_HOLES)) {
2279 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2280 		return 0;
2281 	}
2282 
2283 	key.objectid = btrfs_ino(inode);
2284 	key.type = BTRFS_EXTENT_DATA_KEY;
2285 	key.offset = extent_info->file_offset;
2286 	ret = btrfs_insert_empty_item(trans, root, path, &key,
2287 				      sizeof(struct btrfs_file_extent_item));
2288 	if (ret)
2289 		return ret;
2290 	leaf = path->nodes[0];
2291 	slot = path->slots[0];
2292 	write_extent_buffer(leaf, extent_info->extent_buf,
2293 			    btrfs_item_ptr_offset(leaf, slot),
2294 			    sizeof(struct btrfs_file_extent_item));
2295 	extent = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
2296 	ASSERT(btrfs_file_extent_type(leaf, extent) != BTRFS_FILE_EXTENT_INLINE);
2297 	btrfs_set_file_extent_offset(leaf, extent, extent_info->data_offset);
2298 	btrfs_set_file_extent_num_bytes(leaf, extent, replace_len);
2299 	if (extent_info->is_new_extent)
2300 		btrfs_set_file_extent_generation(leaf, extent, trans->transid);
2301 	btrfs_mark_buffer_dirty(trans, leaf);
2302 	btrfs_release_path(path);
2303 
2304 	ret = btrfs_inode_set_file_extent_range(inode, extent_info->file_offset,
2305 						replace_len);
2306 	if (ret)
2307 		return ret;
2308 
2309 	/* If it's a hole, nothing more needs to be done. */
2310 	if (extent_info->disk_offset == 0) {
2311 		btrfs_update_inode_bytes(inode, 0, bytes_to_drop);
2312 		return 0;
2313 	}
2314 
2315 	btrfs_update_inode_bytes(inode, replace_len, bytes_to_drop);
2316 
2317 	if (extent_info->is_new_extent && extent_info->insertions == 0) {
2318 		key.objectid = extent_info->disk_offset;
2319 		key.type = BTRFS_EXTENT_ITEM_KEY;
2320 		key.offset = extent_info->disk_len;
2321 		ret = btrfs_alloc_reserved_file_extent(trans, root,
2322 						       btrfs_ino(inode),
2323 						       extent_info->file_offset,
2324 						       extent_info->qgroup_reserved,
2325 						       &key);
2326 	} else {
2327 		u64 ref_offset;
2328 
2329 		btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2330 				       extent_info->disk_offset,
2331 				       extent_info->disk_len, 0);
2332 		ref_offset = extent_info->file_offset - extent_info->data_offset;
2333 		btrfs_init_data_ref(&ref, root->root_key.objectid,
2334 				    btrfs_ino(inode), ref_offset, 0, false);
2335 		ret = btrfs_inc_extent_ref(trans, &ref);
2336 	}
2337 
2338 	extent_info->insertions++;
2339 
2340 	return ret;
2341 }
2342 
2343 /*
2344  * The respective range must have been previously locked, as well as the inode.
2345  * The end offset is inclusive (last byte of the range).
2346  * @extent_info is NULL for fallocate's hole punching and non-NULL when replacing
2347  * the file range with an extent.
2348  * When not punching a hole, we don't want to end up in a state where we dropped
2349  * extents without inserting a new one, so we must abort the transaction to avoid
2350  * a corruption.
2351  */
btrfs_replace_file_extents(struct btrfs_inode * inode,struct btrfs_path * path,const u64 start,const u64 end,struct btrfs_replace_extent_info * extent_info,struct btrfs_trans_handle ** trans_out)2352 int btrfs_replace_file_extents(struct btrfs_inode *inode,
2353 			       struct btrfs_path *path, const u64 start,
2354 			       const u64 end,
2355 			       struct btrfs_replace_extent_info *extent_info,
2356 			       struct btrfs_trans_handle **trans_out)
2357 {
2358 	struct btrfs_drop_extents_args drop_args = { 0 };
2359 	struct btrfs_root *root = inode->root;
2360 	struct btrfs_fs_info *fs_info = root->fs_info;
2361 	u64 min_size = btrfs_calc_insert_metadata_size(fs_info, 1);
2362 	u64 ino_size = round_up(inode->vfs_inode.i_size, fs_info->sectorsize);
2363 	struct btrfs_trans_handle *trans = NULL;
2364 	struct btrfs_block_rsv *rsv;
2365 	unsigned int rsv_count;
2366 	u64 cur_offset;
2367 	u64 len = end - start;
2368 	int ret = 0;
2369 
2370 	if (end <= start)
2371 		return -EINVAL;
2372 
2373 	rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
2374 	if (!rsv) {
2375 		ret = -ENOMEM;
2376 		goto out;
2377 	}
2378 	rsv->size = btrfs_calc_insert_metadata_size(fs_info, 1);
2379 	rsv->failfast = true;
2380 
2381 	/*
2382 	 * 1 - update the inode
2383 	 * 1 - removing the extents in the range
2384 	 * 1 - adding the hole extent if no_holes isn't set or if we are
2385 	 *     replacing the range with a new extent
2386 	 */
2387 	if (!btrfs_fs_incompat(fs_info, NO_HOLES) || extent_info)
2388 		rsv_count = 3;
2389 	else
2390 		rsv_count = 2;
2391 
2392 	trans = btrfs_start_transaction(root, rsv_count);
2393 	if (IS_ERR(trans)) {
2394 		ret = PTR_ERR(trans);
2395 		trans = NULL;
2396 		goto out_free;
2397 	}
2398 
2399 	ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
2400 				      min_size, false);
2401 	if (WARN_ON(ret))
2402 		goto out_trans;
2403 	trans->block_rsv = rsv;
2404 
2405 	cur_offset = start;
2406 	drop_args.path = path;
2407 	drop_args.end = end + 1;
2408 	drop_args.drop_cache = true;
2409 	while (cur_offset < end) {
2410 		drop_args.start = cur_offset;
2411 		ret = btrfs_drop_extents(trans, root, inode, &drop_args);
2412 		/* If we are punching a hole decrement the inode's byte count */
2413 		if (!extent_info)
2414 			btrfs_update_inode_bytes(inode, 0,
2415 						 drop_args.bytes_found);
2416 		if (ret != -ENOSPC) {
2417 			/*
2418 			 * The only time we don't want to abort is if we are
2419 			 * attempting to clone a partial inline extent, in which
2420 			 * case we'll get EOPNOTSUPP.  However if we aren't
2421 			 * clone we need to abort no matter what, because if we
2422 			 * got EOPNOTSUPP via prealloc then we messed up and
2423 			 * need to abort.
2424 			 */
2425 			if (ret &&
2426 			    (ret != -EOPNOTSUPP ||
2427 			     (extent_info && extent_info->is_new_extent)))
2428 				btrfs_abort_transaction(trans, ret);
2429 			break;
2430 		}
2431 
2432 		trans->block_rsv = &fs_info->trans_block_rsv;
2433 
2434 		if (!extent_info && cur_offset < drop_args.drop_end &&
2435 		    cur_offset < ino_size) {
2436 			ret = fill_holes(trans, inode, path, cur_offset,
2437 					 drop_args.drop_end);
2438 			if (ret) {
2439 				/*
2440 				 * If we failed then we didn't insert our hole
2441 				 * entries for the area we dropped, so now the
2442 				 * fs is corrupted, so we must abort the
2443 				 * transaction.
2444 				 */
2445 				btrfs_abort_transaction(trans, ret);
2446 				break;
2447 			}
2448 		} else if (!extent_info && cur_offset < drop_args.drop_end) {
2449 			/*
2450 			 * We are past the i_size here, but since we didn't
2451 			 * insert holes we need to clear the mapped area so we
2452 			 * know to not set disk_i_size in this area until a new
2453 			 * file extent is inserted here.
2454 			 */
2455 			ret = btrfs_inode_clear_file_extent_range(inode,
2456 					cur_offset,
2457 					drop_args.drop_end - cur_offset);
2458 			if (ret) {
2459 				/*
2460 				 * We couldn't clear our area, so we could
2461 				 * presumably adjust up and corrupt the fs, so
2462 				 * we need to abort.
2463 				 */
2464 				btrfs_abort_transaction(trans, ret);
2465 				break;
2466 			}
2467 		}
2468 
2469 		if (extent_info &&
2470 		    drop_args.drop_end > extent_info->file_offset) {
2471 			u64 replace_len = drop_args.drop_end -
2472 					  extent_info->file_offset;
2473 
2474 			ret = btrfs_insert_replace_extent(trans, inode,	path,
2475 					extent_info, replace_len,
2476 					drop_args.bytes_found);
2477 			if (ret) {
2478 				btrfs_abort_transaction(trans, ret);
2479 				break;
2480 			}
2481 			extent_info->data_len -= replace_len;
2482 			extent_info->data_offset += replace_len;
2483 			extent_info->file_offset += replace_len;
2484 		}
2485 
2486 		/*
2487 		 * We are releasing our handle on the transaction, balance the
2488 		 * dirty pages of the btree inode and flush delayed items, and
2489 		 * then get a new transaction handle, which may now point to a
2490 		 * new transaction in case someone else may have committed the
2491 		 * transaction we used to replace/drop file extent items. So
2492 		 * bump the inode's iversion and update mtime and ctime except
2493 		 * if we are called from a dedupe context. This is because a
2494 		 * power failure/crash may happen after the transaction is
2495 		 * committed and before we finish replacing/dropping all the
2496 		 * file extent items we need.
2497 		 */
2498 		inode_inc_iversion(&inode->vfs_inode);
2499 
2500 		if (!extent_info || extent_info->update_times)
2501 			inode->vfs_inode.i_mtime = inode_set_ctime_current(&inode->vfs_inode);
2502 
2503 		ret = btrfs_update_inode(trans, root, inode);
2504 		if (ret)
2505 			break;
2506 
2507 		btrfs_end_transaction(trans);
2508 		btrfs_btree_balance_dirty(fs_info);
2509 
2510 		trans = btrfs_start_transaction(root, rsv_count);
2511 		if (IS_ERR(trans)) {
2512 			ret = PTR_ERR(trans);
2513 			trans = NULL;
2514 			break;
2515 		}
2516 
2517 		ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
2518 					      rsv, min_size, false);
2519 		if (WARN_ON(ret))
2520 			break;
2521 		trans->block_rsv = rsv;
2522 
2523 		cur_offset = drop_args.drop_end;
2524 		len = end - cur_offset;
2525 		if (!extent_info && len) {
2526 			ret = find_first_non_hole(inode, &cur_offset, &len);
2527 			if (unlikely(ret < 0))
2528 				break;
2529 			if (ret && !len) {
2530 				ret = 0;
2531 				break;
2532 			}
2533 		}
2534 	}
2535 
2536 	/*
2537 	 * If we were cloning, force the next fsync to be a full one since we
2538 	 * we replaced (or just dropped in the case of cloning holes when
2539 	 * NO_HOLES is enabled) file extent items and did not setup new extent
2540 	 * maps for the replacement extents (or holes).
2541 	 */
2542 	if (extent_info && !extent_info->is_new_extent)
2543 		btrfs_set_inode_full_sync(inode);
2544 
2545 	if (ret)
2546 		goto out_trans;
2547 
2548 	trans->block_rsv = &fs_info->trans_block_rsv;
2549 	/*
2550 	 * If we are using the NO_HOLES feature we might have had already an
2551 	 * hole that overlaps a part of the region [lockstart, lockend] and
2552 	 * ends at (or beyond) lockend. Since we have no file extent items to
2553 	 * represent holes, drop_end can be less than lockend and so we must
2554 	 * make sure we have an extent map representing the existing hole (the
2555 	 * call to __btrfs_drop_extents() might have dropped the existing extent
2556 	 * map representing the existing hole), otherwise the fast fsync path
2557 	 * will not record the existence of the hole region
2558 	 * [existing_hole_start, lockend].
2559 	 */
2560 	if (drop_args.drop_end <= end)
2561 		drop_args.drop_end = end + 1;
2562 	/*
2563 	 * Don't insert file hole extent item if it's for a range beyond eof
2564 	 * (because it's useless) or if it represents a 0 bytes range (when
2565 	 * cur_offset == drop_end).
2566 	 */
2567 	if (!extent_info && cur_offset < ino_size &&
2568 	    cur_offset < drop_args.drop_end) {
2569 		ret = fill_holes(trans, inode, path, cur_offset,
2570 				 drop_args.drop_end);
2571 		if (ret) {
2572 			/* Same comment as above. */
2573 			btrfs_abort_transaction(trans, ret);
2574 			goto out_trans;
2575 		}
2576 	} else if (!extent_info && cur_offset < drop_args.drop_end) {
2577 		/* See the comment in the loop above for the reasoning here. */
2578 		ret = btrfs_inode_clear_file_extent_range(inode, cur_offset,
2579 					drop_args.drop_end - cur_offset);
2580 		if (ret) {
2581 			btrfs_abort_transaction(trans, ret);
2582 			goto out_trans;
2583 		}
2584 
2585 	}
2586 	if (extent_info) {
2587 		ret = btrfs_insert_replace_extent(trans, inode, path,
2588 				extent_info, extent_info->data_len,
2589 				drop_args.bytes_found);
2590 		if (ret) {
2591 			btrfs_abort_transaction(trans, ret);
2592 			goto out_trans;
2593 		}
2594 	}
2595 
2596 out_trans:
2597 	if (!trans)
2598 		goto out_free;
2599 
2600 	trans->block_rsv = &fs_info->trans_block_rsv;
2601 	if (ret)
2602 		btrfs_end_transaction(trans);
2603 	else
2604 		*trans_out = trans;
2605 out_free:
2606 	btrfs_free_block_rsv(fs_info, rsv);
2607 out:
2608 	return ret;
2609 }
2610 
btrfs_punch_hole(struct file * file,loff_t offset,loff_t len)2611 static int btrfs_punch_hole(struct file *file, loff_t offset, loff_t len)
2612 {
2613 	struct inode *inode = file_inode(file);
2614 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2615 	struct btrfs_root *root = BTRFS_I(inode)->root;
2616 	struct extent_state *cached_state = NULL;
2617 	struct btrfs_path *path;
2618 	struct btrfs_trans_handle *trans = NULL;
2619 	u64 lockstart;
2620 	u64 lockend;
2621 	u64 tail_start;
2622 	u64 tail_len;
2623 	u64 orig_start = offset;
2624 	int ret = 0;
2625 	bool same_block;
2626 	u64 ino_size;
2627 	bool truncated_block = false;
2628 	bool updated_inode = false;
2629 
2630 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2631 
2632 	ret = btrfs_wait_ordered_range(inode, offset, len);
2633 	if (ret)
2634 		goto out_only_mutex;
2635 
2636 	ino_size = round_up(inode->i_size, fs_info->sectorsize);
2637 	ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2638 	if (ret < 0)
2639 		goto out_only_mutex;
2640 	if (ret && !len) {
2641 		/* Already in a large hole */
2642 		ret = 0;
2643 		goto out_only_mutex;
2644 	}
2645 
2646 	ret = file_modified(file);
2647 	if (ret)
2648 		goto out_only_mutex;
2649 
2650 	lockstart = round_up(offset, fs_info->sectorsize);
2651 	lockend = round_down(offset + len, fs_info->sectorsize) - 1;
2652 	same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
2653 		== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
2654 	/*
2655 	 * We needn't truncate any block which is beyond the end of the file
2656 	 * because we are sure there is no data there.
2657 	 */
2658 	/*
2659 	 * Only do this if we are in the same block and we aren't doing the
2660 	 * entire block.
2661 	 */
2662 	if (same_block && len < fs_info->sectorsize) {
2663 		if (offset < ino_size) {
2664 			truncated_block = true;
2665 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2666 						   0);
2667 		} else {
2668 			ret = 0;
2669 		}
2670 		goto out_only_mutex;
2671 	}
2672 
2673 	/* zero back part of the first block */
2674 	if (offset < ino_size) {
2675 		truncated_block = true;
2676 		ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2677 		if (ret) {
2678 			btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2679 			return ret;
2680 		}
2681 	}
2682 
2683 	/* Check the aligned pages after the first unaligned page,
2684 	 * if offset != orig_start, which means the first unaligned page
2685 	 * including several following pages are already in holes,
2686 	 * the extra check can be skipped */
2687 	if (offset == orig_start) {
2688 		/* after truncate page, check hole again */
2689 		len = offset + len - lockstart;
2690 		offset = lockstart;
2691 		ret = find_first_non_hole(BTRFS_I(inode), &offset, &len);
2692 		if (ret < 0)
2693 			goto out_only_mutex;
2694 		if (ret && !len) {
2695 			ret = 0;
2696 			goto out_only_mutex;
2697 		}
2698 		lockstart = offset;
2699 	}
2700 
2701 	/* Check the tail unaligned part is in a hole */
2702 	tail_start = lockend + 1;
2703 	tail_len = offset + len - tail_start;
2704 	if (tail_len) {
2705 		ret = find_first_non_hole(BTRFS_I(inode), &tail_start, &tail_len);
2706 		if (unlikely(ret < 0))
2707 			goto out_only_mutex;
2708 		if (!ret) {
2709 			/* zero the front end of the last page */
2710 			if (tail_start + tail_len < ino_size) {
2711 				truncated_block = true;
2712 				ret = btrfs_truncate_block(BTRFS_I(inode),
2713 							tail_start + tail_len,
2714 							0, 1);
2715 				if (ret)
2716 					goto out_only_mutex;
2717 			}
2718 		}
2719 	}
2720 
2721 	if (lockend < lockstart) {
2722 		ret = 0;
2723 		goto out_only_mutex;
2724 	}
2725 
2726 	btrfs_punch_hole_lock_range(inode, lockstart, lockend, &cached_state);
2727 
2728 	path = btrfs_alloc_path();
2729 	if (!path) {
2730 		ret = -ENOMEM;
2731 		goto out;
2732 	}
2733 
2734 	ret = btrfs_replace_file_extents(BTRFS_I(inode), path, lockstart,
2735 					 lockend, NULL, &trans);
2736 	btrfs_free_path(path);
2737 	if (ret)
2738 		goto out;
2739 
2740 	ASSERT(trans != NULL);
2741 	inode_inc_iversion(inode);
2742 	inode->i_mtime = inode_set_ctime_current(inode);
2743 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2744 	updated_inode = true;
2745 	btrfs_end_transaction(trans);
2746 	btrfs_btree_balance_dirty(fs_info);
2747 out:
2748 	unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
2749 		      &cached_state);
2750 out_only_mutex:
2751 	if (!updated_inode && truncated_block && !ret) {
2752 		/*
2753 		 * If we only end up zeroing part of a page, we still need to
2754 		 * update the inode item, so that all the time fields are
2755 		 * updated as well as the necessary btrfs inode in memory fields
2756 		 * for detecting, at fsync time, if the inode isn't yet in the
2757 		 * log tree or it's there but not up to date.
2758 		 */
2759 		struct timespec64 now = inode_set_ctime_current(inode);
2760 
2761 		inode_inc_iversion(inode);
2762 		inode->i_mtime = now;
2763 		trans = btrfs_start_transaction(root, 1);
2764 		if (IS_ERR(trans)) {
2765 			ret = PTR_ERR(trans);
2766 		} else {
2767 			int ret2;
2768 
2769 			ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2770 			ret2 = btrfs_end_transaction(trans);
2771 			if (!ret)
2772 				ret = ret2;
2773 		}
2774 	}
2775 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
2776 	return ret;
2777 }
2778 
2779 /* Helper structure to record which range is already reserved */
2780 struct falloc_range {
2781 	struct list_head list;
2782 	u64 start;
2783 	u64 len;
2784 };
2785 
2786 /*
2787  * Helper function to add falloc range
2788  *
2789  * Caller should have locked the larger range of extent containing
2790  * [start, len)
2791  */
add_falloc_range(struct list_head * head,u64 start,u64 len)2792 static int add_falloc_range(struct list_head *head, u64 start, u64 len)
2793 {
2794 	struct falloc_range *range = NULL;
2795 
2796 	if (!list_empty(head)) {
2797 		/*
2798 		 * As fallocate iterates by bytenr order, we only need to check
2799 		 * the last range.
2800 		 */
2801 		range = list_last_entry(head, struct falloc_range, list);
2802 		if (range->start + range->len == start) {
2803 			range->len += len;
2804 			return 0;
2805 		}
2806 	}
2807 
2808 	range = kmalloc(sizeof(*range), GFP_KERNEL);
2809 	if (!range)
2810 		return -ENOMEM;
2811 	range->start = start;
2812 	range->len = len;
2813 	list_add_tail(&range->list, head);
2814 	return 0;
2815 }
2816 
btrfs_fallocate_update_isize(struct inode * inode,const u64 end,const int mode)2817 static int btrfs_fallocate_update_isize(struct inode *inode,
2818 					const u64 end,
2819 					const int mode)
2820 {
2821 	struct btrfs_trans_handle *trans;
2822 	struct btrfs_root *root = BTRFS_I(inode)->root;
2823 	int ret;
2824 	int ret2;
2825 
2826 	if (mode & FALLOC_FL_KEEP_SIZE || end <= i_size_read(inode))
2827 		return 0;
2828 
2829 	trans = btrfs_start_transaction(root, 1);
2830 	if (IS_ERR(trans))
2831 		return PTR_ERR(trans);
2832 
2833 	inode_set_ctime_current(inode);
2834 	i_size_write(inode, end);
2835 	btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
2836 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2837 	ret2 = btrfs_end_transaction(trans);
2838 
2839 	return ret ? ret : ret2;
2840 }
2841 
2842 enum {
2843 	RANGE_BOUNDARY_WRITTEN_EXTENT,
2844 	RANGE_BOUNDARY_PREALLOC_EXTENT,
2845 	RANGE_BOUNDARY_HOLE,
2846 };
2847 
btrfs_zero_range_check_range_boundary(struct btrfs_inode * inode,u64 offset)2848 static int btrfs_zero_range_check_range_boundary(struct btrfs_inode *inode,
2849 						 u64 offset)
2850 {
2851 	const u64 sectorsize = inode->root->fs_info->sectorsize;
2852 	struct extent_map *em;
2853 	int ret;
2854 
2855 	offset = round_down(offset, sectorsize);
2856 	em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize);
2857 	if (IS_ERR(em))
2858 		return PTR_ERR(em);
2859 
2860 	if (em->block_start == EXTENT_MAP_HOLE)
2861 		ret = RANGE_BOUNDARY_HOLE;
2862 	else if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2863 		ret = RANGE_BOUNDARY_PREALLOC_EXTENT;
2864 	else
2865 		ret = RANGE_BOUNDARY_WRITTEN_EXTENT;
2866 
2867 	free_extent_map(em);
2868 	return ret;
2869 }
2870 
btrfs_zero_range(struct inode * inode,loff_t offset,loff_t len,const int mode)2871 static int btrfs_zero_range(struct inode *inode,
2872 			    loff_t offset,
2873 			    loff_t len,
2874 			    const int mode)
2875 {
2876 	struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
2877 	struct extent_map *em;
2878 	struct extent_changeset *data_reserved = NULL;
2879 	int ret;
2880 	u64 alloc_hint = 0;
2881 	const u64 sectorsize = fs_info->sectorsize;
2882 	u64 alloc_start = round_down(offset, sectorsize);
2883 	u64 alloc_end = round_up(offset + len, sectorsize);
2884 	u64 bytes_to_reserve = 0;
2885 	bool space_reserved = false;
2886 
2887 	em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2888 			      alloc_end - alloc_start);
2889 	if (IS_ERR(em)) {
2890 		ret = PTR_ERR(em);
2891 		goto out;
2892 	}
2893 
2894 	/*
2895 	 * Avoid hole punching and extent allocation for some cases. More cases
2896 	 * could be considered, but these are unlikely common and we keep things
2897 	 * as simple as possible for now. Also, intentionally, if the target
2898 	 * range contains one or more prealloc extents together with regular
2899 	 * extents and holes, we drop all the existing extents and allocate a
2900 	 * new prealloc extent, so that we get a larger contiguous disk extent.
2901 	 */
2902 	if (em->start <= alloc_start &&
2903 	    test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2904 		const u64 em_end = em->start + em->len;
2905 
2906 		if (em_end >= offset + len) {
2907 			/*
2908 			 * The whole range is already a prealloc extent,
2909 			 * do nothing except updating the inode's i_size if
2910 			 * needed.
2911 			 */
2912 			free_extent_map(em);
2913 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2914 							   mode);
2915 			goto out;
2916 		}
2917 		/*
2918 		 * Part of the range is already a prealloc extent, so operate
2919 		 * only on the remaining part of the range.
2920 		 */
2921 		alloc_start = em_end;
2922 		ASSERT(IS_ALIGNED(alloc_start, sectorsize));
2923 		len = offset + len - alloc_start;
2924 		offset = alloc_start;
2925 		alloc_hint = em->block_start + em->len;
2926 	}
2927 	free_extent_map(em);
2928 
2929 	if (BTRFS_BYTES_TO_BLKS(fs_info, offset) ==
2930 	    BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1)) {
2931 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, alloc_start,
2932 				      sectorsize);
2933 		if (IS_ERR(em)) {
2934 			ret = PTR_ERR(em);
2935 			goto out;
2936 		}
2937 
2938 		if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
2939 			free_extent_map(em);
2940 			ret = btrfs_fallocate_update_isize(inode, offset + len,
2941 							   mode);
2942 			goto out;
2943 		}
2944 		if (len < sectorsize && em->block_start != EXTENT_MAP_HOLE) {
2945 			free_extent_map(em);
2946 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, len,
2947 						   0);
2948 			if (!ret)
2949 				ret = btrfs_fallocate_update_isize(inode,
2950 								   offset + len,
2951 								   mode);
2952 			return ret;
2953 		}
2954 		free_extent_map(em);
2955 		alloc_start = round_down(offset, sectorsize);
2956 		alloc_end = alloc_start + sectorsize;
2957 		goto reserve_space;
2958 	}
2959 
2960 	alloc_start = round_up(offset, sectorsize);
2961 	alloc_end = round_down(offset + len, sectorsize);
2962 
2963 	/*
2964 	 * For unaligned ranges, check the pages at the boundaries, they might
2965 	 * map to an extent, in which case we need to partially zero them, or
2966 	 * they might map to a hole, in which case we need our allocation range
2967 	 * to cover them.
2968 	 */
2969 	if (!IS_ALIGNED(offset, sectorsize)) {
2970 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2971 							    offset);
2972 		if (ret < 0)
2973 			goto out;
2974 		if (ret == RANGE_BOUNDARY_HOLE) {
2975 			alloc_start = round_down(offset, sectorsize);
2976 			ret = 0;
2977 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2978 			ret = btrfs_truncate_block(BTRFS_I(inode), offset, 0, 0);
2979 			if (ret)
2980 				goto out;
2981 		} else {
2982 			ret = 0;
2983 		}
2984 	}
2985 
2986 	if (!IS_ALIGNED(offset + len, sectorsize)) {
2987 		ret = btrfs_zero_range_check_range_boundary(BTRFS_I(inode),
2988 							    offset + len);
2989 		if (ret < 0)
2990 			goto out;
2991 		if (ret == RANGE_BOUNDARY_HOLE) {
2992 			alloc_end = round_up(offset + len, sectorsize);
2993 			ret = 0;
2994 		} else if (ret == RANGE_BOUNDARY_WRITTEN_EXTENT) {
2995 			ret = btrfs_truncate_block(BTRFS_I(inode), offset + len,
2996 						   0, 1);
2997 			if (ret)
2998 				goto out;
2999 		} else {
3000 			ret = 0;
3001 		}
3002 	}
3003 
3004 reserve_space:
3005 	if (alloc_start < alloc_end) {
3006 		struct extent_state *cached_state = NULL;
3007 		const u64 lockstart = alloc_start;
3008 		const u64 lockend = alloc_end - 1;
3009 
3010 		bytes_to_reserve = alloc_end - alloc_start;
3011 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3012 						      bytes_to_reserve);
3013 		if (ret < 0)
3014 			goto out;
3015 		space_reserved = true;
3016 		btrfs_punch_hole_lock_range(inode, lockstart, lockend,
3017 					    &cached_state);
3018 		ret = btrfs_qgroup_reserve_data(BTRFS_I(inode), &data_reserved,
3019 						alloc_start, bytes_to_reserve);
3020 		if (ret) {
3021 			unlock_extent(&BTRFS_I(inode)->io_tree, lockstart,
3022 				      lockend, &cached_state);
3023 			goto out;
3024 		}
3025 		ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
3026 						alloc_end - alloc_start,
3027 						i_blocksize(inode),
3028 						offset + len, &alloc_hint);
3029 		unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend,
3030 			      &cached_state);
3031 		/* btrfs_prealloc_file_range releases reserved space on error */
3032 		if (ret) {
3033 			space_reserved = false;
3034 			goto out;
3035 		}
3036 	}
3037 	ret = btrfs_fallocate_update_isize(inode, offset + len, mode);
3038  out:
3039 	if (ret && space_reserved)
3040 		btrfs_free_reserved_data_space(BTRFS_I(inode), data_reserved,
3041 					       alloc_start, bytes_to_reserve);
3042 	extent_changeset_free(data_reserved);
3043 
3044 	return ret;
3045 }
3046 
btrfs_fallocate(struct file * file,int mode,loff_t offset,loff_t len)3047 static long btrfs_fallocate(struct file *file, int mode,
3048 			    loff_t offset, loff_t len)
3049 {
3050 	struct inode *inode = file_inode(file);
3051 	struct extent_state *cached_state = NULL;
3052 	struct extent_changeset *data_reserved = NULL;
3053 	struct falloc_range *range;
3054 	struct falloc_range *tmp;
3055 	LIST_HEAD(reserve_list);
3056 	u64 cur_offset;
3057 	u64 last_byte;
3058 	u64 alloc_start;
3059 	u64 alloc_end;
3060 	u64 alloc_hint = 0;
3061 	u64 locked_end;
3062 	u64 actual_end = 0;
3063 	u64 data_space_needed = 0;
3064 	u64 data_space_reserved = 0;
3065 	u64 qgroup_reserved = 0;
3066 	struct extent_map *em;
3067 	int blocksize = BTRFS_I(inode)->root->fs_info->sectorsize;
3068 	int ret;
3069 
3070 	/* Do not allow fallocate in ZONED mode */
3071 	if (btrfs_is_zoned(btrfs_sb(inode->i_sb)))
3072 		return -EOPNOTSUPP;
3073 
3074 	alloc_start = round_down(offset, blocksize);
3075 	alloc_end = round_up(offset + len, blocksize);
3076 	cur_offset = alloc_start;
3077 
3078 	/* Make sure we aren't being give some crap mode */
3079 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
3080 		     FALLOC_FL_ZERO_RANGE))
3081 		return -EOPNOTSUPP;
3082 
3083 	if (mode & FALLOC_FL_PUNCH_HOLE)
3084 		return btrfs_punch_hole(file, offset, len);
3085 
3086 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3087 
3088 	if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) {
3089 		ret = inode_newsize_ok(inode, offset + len);
3090 		if (ret)
3091 			goto out;
3092 	}
3093 
3094 	ret = file_modified(file);
3095 	if (ret)
3096 		goto out;
3097 
3098 	/*
3099 	 * TODO: Move these two operations after we have checked
3100 	 * accurate reserved space, or fallocate can still fail but
3101 	 * with page truncated or size expanded.
3102 	 *
3103 	 * But that's a minor problem and won't do much harm BTW.
3104 	 */
3105 	if (alloc_start > inode->i_size) {
3106 		ret = btrfs_cont_expand(BTRFS_I(inode), i_size_read(inode),
3107 					alloc_start);
3108 		if (ret)
3109 			goto out;
3110 	} else if (offset + len > inode->i_size) {
3111 		/*
3112 		 * If we are fallocating from the end of the file onward we
3113 		 * need to zero out the end of the block if i_size lands in the
3114 		 * middle of a block.
3115 		 */
3116 		ret = btrfs_truncate_block(BTRFS_I(inode), inode->i_size, 0, 0);
3117 		if (ret)
3118 			goto out;
3119 	}
3120 
3121 	/*
3122 	 * We have locked the inode at the VFS level (in exclusive mode) and we
3123 	 * have locked the i_mmap_lock lock (in exclusive mode). Now before
3124 	 * locking the file range, flush all dealloc in the range and wait for
3125 	 * all ordered extents in the range to complete. After this we can lock
3126 	 * the file range and, due to the previous locking we did, we know there
3127 	 * can't be more delalloc or ordered extents in the range.
3128 	 */
3129 	ret = btrfs_wait_ordered_range(inode, alloc_start,
3130 				       alloc_end - alloc_start);
3131 	if (ret)
3132 		goto out;
3133 
3134 	if (mode & FALLOC_FL_ZERO_RANGE) {
3135 		ret = btrfs_zero_range(inode, offset, len, mode);
3136 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3137 		return ret;
3138 	}
3139 
3140 	locked_end = alloc_end - 1;
3141 	lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3142 		    &cached_state);
3143 
3144 	btrfs_assert_inode_range_clean(BTRFS_I(inode), alloc_start, locked_end);
3145 
3146 	/* First, check if we exceed the qgroup limit */
3147 	while (cur_offset < alloc_end) {
3148 		em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset,
3149 				      alloc_end - cur_offset);
3150 		if (IS_ERR(em)) {
3151 			ret = PTR_ERR(em);
3152 			break;
3153 		}
3154 		last_byte = min(extent_map_end(em), alloc_end);
3155 		actual_end = min_t(u64, extent_map_end(em), offset + len);
3156 		last_byte = ALIGN(last_byte, blocksize);
3157 		if (em->block_start == EXTENT_MAP_HOLE ||
3158 		    (cur_offset >= inode->i_size &&
3159 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
3160 			const u64 range_len = last_byte - cur_offset;
3161 
3162 			ret = add_falloc_range(&reserve_list, cur_offset, range_len);
3163 			if (ret < 0) {
3164 				free_extent_map(em);
3165 				break;
3166 			}
3167 			ret = btrfs_qgroup_reserve_data(BTRFS_I(inode),
3168 					&data_reserved, cur_offset, range_len);
3169 			if (ret < 0) {
3170 				free_extent_map(em);
3171 				break;
3172 			}
3173 			qgroup_reserved += range_len;
3174 			data_space_needed += range_len;
3175 		}
3176 		free_extent_map(em);
3177 		cur_offset = last_byte;
3178 	}
3179 
3180 	if (!ret && data_space_needed > 0) {
3181 		/*
3182 		 * We are safe to reserve space here as we can't have delalloc
3183 		 * in the range, see above.
3184 		 */
3185 		ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode),
3186 						      data_space_needed);
3187 		if (!ret)
3188 			data_space_reserved = data_space_needed;
3189 	}
3190 
3191 	/*
3192 	 * If ret is still 0, means we're OK to fallocate.
3193 	 * Or just cleanup the list and exit.
3194 	 */
3195 	list_for_each_entry_safe(range, tmp, &reserve_list, list) {
3196 		if (!ret) {
3197 			ret = btrfs_prealloc_file_range(inode, mode,
3198 					range->start,
3199 					range->len, i_blocksize(inode),
3200 					offset + len, &alloc_hint);
3201 			/*
3202 			 * btrfs_prealloc_file_range() releases space even
3203 			 * if it returns an error.
3204 			 */
3205 			data_space_reserved -= range->len;
3206 			qgroup_reserved -= range->len;
3207 		} else if (data_space_reserved > 0) {
3208 			btrfs_free_reserved_data_space(BTRFS_I(inode),
3209 					       data_reserved, range->start,
3210 					       range->len);
3211 			data_space_reserved -= range->len;
3212 			qgroup_reserved -= range->len;
3213 		} else if (qgroup_reserved > 0) {
3214 			btrfs_qgroup_free_data(BTRFS_I(inode), data_reserved,
3215 					       range->start, range->len, NULL);
3216 			qgroup_reserved -= range->len;
3217 		}
3218 		list_del(&range->list);
3219 		kfree(range);
3220 	}
3221 	if (ret < 0)
3222 		goto out_unlock;
3223 
3224 	/*
3225 	 * We didn't need to allocate any more space, but we still extended the
3226 	 * size of the file so we need to update i_size and the inode item.
3227 	 */
3228 	ret = btrfs_fallocate_update_isize(inode, actual_end, mode);
3229 out_unlock:
3230 	unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
3231 		      &cached_state);
3232 out:
3233 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_MMAP);
3234 	extent_changeset_free(data_reserved);
3235 	return ret;
3236 }
3237 
3238 /*
3239  * Helper for btrfs_find_delalloc_in_range(). Find a subrange in a given range
3240  * that has unflushed and/or flushing delalloc. There might be other adjacent
3241  * subranges after the one it found, so btrfs_find_delalloc_in_range() keeps
3242  * looping while it gets adjacent subranges, and merging them together.
3243  */
find_delalloc_subrange(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state,bool * search_io_tree,u64 * delalloc_start_ret,u64 * delalloc_end_ret)3244 static bool find_delalloc_subrange(struct btrfs_inode *inode, u64 start, u64 end,
3245 				   struct extent_state **cached_state,
3246 				   bool *search_io_tree,
3247 				   u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3248 {
3249 	u64 len = end + 1 - start;
3250 	u64 delalloc_len = 0;
3251 	struct btrfs_ordered_extent *oe;
3252 	u64 oe_start;
3253 	u64 oe_end;
3254 
3255 	/*
3256 	 * Search the io tree first for EXTENT_DELALLOC. If we find any, it
3257 	 * means we have delalloc (dirty pages) for which writeback has not
3258 	 * started yet.
3259 	 */
3260 	if (*search_io_tree) {
3261 		spin_lock(&inode->lock);
3262 		if (inode->delalloc_bytes > 0) {
3263 			spin_unlock(&inode->lock);
3264 			*delalloc_start_ret = start;
3265 			delalloc_len = count_range_bits(&inode->io_tree,
3266 							delalloc_start_ret, end,
3267 							len, EXTENT_DELALLOC, 1,
3268 							cached_state);
3269 		} else {
3270 			spin_unlock(&inode->lock);
3271 		}
3272 	}
3273 
3274 	if (delalloc_len > 0) {
3275 		/*
3276 		 * If delalloc was found then *delalloc_start_ret has a sector size
3277 		 * aligned value (rounded down).
3278 		 */
3279 		*delalloc_end_ret = *delalloc_start_ret + delalloc_len - 1;
3280 
3281 		if (*delalloc_start_ret == start) {
3282 			/* Delalloc for the whole range, nothing more to do. */
3283 			if (*delalloc_end_ret == end)
3284 				return true;
3285 			/* Else trim our search range for ordered extents. */
3286 			start = *delalloc_end_ret + 1;
3287 			len = end + 1 - start;
3288 		}
3289 	} else {
3290 		/* No delalloc, future calls don't need to search again. */
3291 		*search_io_tree = false;
3292 	}
3293 
3294 	/*
3295 	 * Now also check if there's any ordered extent in the range.
3296 	 * We do this because:
3297 	 *
3298 	 * 1) When delalloc is flushed, the file range is locked, we clear the
3299 	 *    EXTENT_DELALLOC bit from the io tree and create an extent map and
3300 	 *    an ordered extent for the write. So we might just have been called
3301 	 *    after delalloc is flushed and before the ordered extent completes
3302 	 *    and inserts the new file extent item in the subvolume's btree;
3303 	 *
3304 	 * 2) We may have an ordered extent created by flushing delalloc for a
3305 	 *    subrange that starts before the subrange we found marked with
3306 	 *    EXTENT_DELALLOC in the io tree.
3307 	 *
3308 	 * We could also use the extent map tree to find such delalloc that is
3309 	 * being flushed, but using the ordered extents tree is more efficient
3310 	 * because it's usually much smaller as ordered extents are removed from
3311 	 * the tree once they complete. With the extent maps, we mau have them
3312 	 * in the extent map tree for a very long time, and they were either
3313 	 * created by previous writes or loaded by read operations.
3314 	 */
3315 	oe = btrfs_lookup_first_ordered_range(inode, start, len);
3316 	if (!oe)
3317 		return (delalloc_len > 0);
3318 
3319 	/* The ordered extent may span beyond our search range. */
3320 	oe_start = max(oe->file_offset, start);
3321 	oe_end = min(oe->file_offset + oe->num_bytes - 1, end);
3322 
3323 	btrfs_put_ordered_extent(oe);
3324 
3325 	/* Don't have unflushed delalloc, return the ordered extent range. */
3326 	if (delalloc_len == 0) {
3327 		*delalloc_start_ret = oe_start;
3328 		*delalloc_end_ret = oe_end;
3329 		return true;
3330 	}
3331 
3332 	/*
3333 	 * We have both unflushed delalloc (io_tree) and an ordered extent.
3334 	 * If the ranges are adjacent returned a combined range, otherwise
3335 	 * return the leftmost range.
3336 	 */
3337 	if (oe_start < *delalloc_start_ret) {
3338 		if (oe_end < *delalloc_start_ret)
3339 			*delalloc_end_ret = oe_end;
3340 		*delalloc_start_ret = oe_start;
3341 	} else if (*delalloc_end_ret + 1 == oe_start) {
3342 		*delalloc_end_ret = oe_end;
3343 	}
3344 
3345 	return true;
3346 }
3347 
3348 /*
3349  * Check if there's delalloc in a given range.
3350  *
3351  * @inode:               The inode.
3352  * @start:               The start offset of the range. It does not need to be
3353  *                       sector size aligned.
3354  * @end:                 The end offset (inclusive value) of the search range.
3355  *                       It does not need to be sector size aligned.
3356  * @cached_state:        Extent state record used for speeding up delalloc
3357  *                       searches in the inode's io_tree. Can be NULL.
3358  * @delalloc_start_ret:  Output argument, set to the start offset of the
3359  *                       subrange found with delalloc (may not be sector size
3360  *                       aligned).
3361  * @delalloc_end_ret:    Output argument, set to he end offset (inclusive value)
3362  *                       of the subrange found with delalloc.
3363  *
3364  * Returns true if a subrange with delalloc is found within the given range, and
3365  * if so it sets @delalloc_start_ret and @delalloc_end_ret with the start and
3366  * end offsets of the subrange.
3367  */
btrfs_find_delalloc_in_range(struct btrfs_inode * inode,u64 start,u64 end,struct extent_state ** cached_state,u64 * delalloc_start_ret,u64 * delalloc_end_ret)3368 bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
3369 				  struct extent_state **cached_state,
3370 				  u64 *delalloc_start_ret, u64 *delalloc_end_ret)
3371 {
3372 	u64 cur_offset = round_down(start, inode->root->fs_info->sectorsize);
3373 	u64 prev_delalloc_end = 0;
3374 	bool search_io_tree = true;
3375 	bool ret = false;
3376 
3377 	while (cur_offset <= end) {
3378 		u64 delalloc_start;
3379 		u64 delalloc_end;
3380 		bool delalloc;
3381 
3382 		delalloc = find_delalloc_subrange(inode, cur_offset, end,
3383 						  cached_state, &search_io_tree,
3384 						  &delalloc_start,
3385 						  &delalloc_end);
3386 		if (!delalloc)
3387 			break;
3388 
3389 		if (prev_delalloc_end == 0) {
3390 			/* First subrange found. */
3391 			*delalloc_start_ret = max(delalloc_start, start);
3392 			*delalloc_end_ret = delalloc_end;
3393 			ret = true;
3394 		} else if (delalloc_start == prev_delalloc_end + 1) {
3395 			/* Subrange adjacent to the previous one, merge them. */
3396 			*delalloc_end_ret = delalloc_end;
3397 		} else {
3398 			/* Subrange not adjacent to the previous one, exit. */
3399 			break;
3400 		}
3401 
3402 		prev_delalloc_end = delalloc_end;
3403 		cur_offset = delalloc_end + 1;
3404 		cond_resched();
3405 	}
3406 
3407 	return ret;
3408 }
3409 
3410 /*
3411  * Check if there's a hole or delalloc range in a range representing a hole (or
3412  * prealloc extent) found in the inode's subvolume btree.
3413  *
3414  * @inode:      The inode.
3415  * @whence:     Seek mode (SEEK_DATA or SEEK_HOLE).
3416  * @start:      Start offset of the hole region. It does not need to be sector
3417  *              size aligned.
3418  * @end:        End offset (inclusive value) of the hole region. It does not
3419  *              need to be sector size aligned.
3420  * @start_ret:  Return parameter, used to set the start of the subrange in the
3421  *              hole that matches the search criteria (seek mode), if such
3422  *              subrange is found (return value of the function is true).
3423  *              The value returned here may not be sector size aligned.
3424  *
3425  * Returns true if a subrange matching the given seek mode is found, and if one
3426  * is found, it updates @start_ret with the start of the subrange.
3427  */
find_desired_extent_in_hole(struct btrfs_inode * inode,int whence,struct extent_state ** cached_state,u64 start,u64 end,u64 * start_ret)3428 static bool find_desired_extent_in_hole(struct btrfs_inode *inode, int whence,
3429 					struct extent_state **cached_state,
3430 					u64 start, u64 end, u64 *start_ret)
3431 {
3432 	u64 delalloc_start;
3433 	u64 delalloc_end;
3434 	bool delalloc;
3435 
3436 	delalloc = btrfs_find_delalloc_in_range(inode, start, end, cached_state,
3437 						&delalloc_start, &delalloc_end);
3438 	if (delalloc && whence == SEEK_DATA) {
3439 		*start_ret = delalloc_start;
3440 		return true;
3441 	}
3442 
3443 	if (delalloc && whence == SEEK_HOLE) {
3444 		/*
3445 		 * We found delalloc but it starts after out start offset. So we
3446 		 * have a hole between our start offset and the delalloc start.
3447 		 */
3448 		if (start < delalloc_start) {
3449 			*start_ret = start;
3450 			return true;
3451 		}
3452 		/*
3453 		 * Delalloc range starts at our start offset.
3454 		 * If the delalloc range's length is smaller than our range,
3455 		 * then it means we have a hole that starts where the delalloc
3456 		 * subrange ends.
3457 		 */
3458 		if (delalloc_end < end) {
3459 			*start_ret = delalloc_end + 1;
3460 			return true;
3461 		}
3462 
3463 		/* There's delalloc for the whole range. */
3464 		return false;
3465 	}
3466 
3467 	if (!delalloc && whence == SEEK_HOLE) {
3468 		*start_ret = start;
3469 		return true;
3470 	}
3471 
3472 	/*
3473 	 * No delalloc in the range and we are seeking for data. The caller has
3474 	 * to iterate to the next extent item in the subvolume btree.
3475 	 */
3476 	return false;
3477 }
3478 
find_desired_extent(struct file * file,loff_t offset,int whence)3479 static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
3480 {
3481 	struct btrfs_inode *inode = BTRFS_I(file->f_mapping->host);
3482 	struct btrfs_file_private *private;
3483 	struct btrfs_fs_info *fs_info = inode->root->fs_info;
3484 	struct extent_state *cached_state = NULL;
3485 	struct extent_state **delalloc_cached_state;
3486 	const loff_t i_size = i_size_read(&inode->vfs_inode);
3487 	const u64 ino = btrfs_ino(inode);
3488 	struct btrfs_root *root = inode->root;
3489 	struct btrfs_path *path;
3490 	struct btrfs_key key;
3491 	u64 last_extent_end;
3492 	u64 lockstart;
3493 	u64 lockend;
3494 	u64 start;
3495 	int ret;
3496 	bool found = false;
3497 
3498 	if (i_size == 0 || offset >= i_size)
3499 		return -ENXIO;
3500 
3501 	/*
3502 	 * Quick path. If the inode has no prealloc extents and its number of
3503 	 * bytes used matches its i_size, then it can not have holes.
3504 	 */
3505 	if (whence == SEEK_HOLE &&
3506 	    !(inode->flags & BTRFS_INODE_PREALLOC) &&
3507 	    inode_get_bytes(&inode->vfs_inode) == i_size)
3508 		return i_size;
3509 
3510 	spin_lock(&inode->lock);
3511 	private = file->private_data;
3512 	spin_unlock(&inode->lock);
3513 
3514 	if (private && private->owner_task != current) {
3515 		/*
3516 		 * Not allocated by us, don't use it as its cached state is used
3517 		 * by the task that allocated it and we don't want neither to
3518 		 * mess with it nor get incorrect results because it reflects an
3519 		 * invalid state for the current task.
3520 		 */
3521 		private = NULL;
3522 	} else if (!private) {
3523 		private = kzalloc(sizeof(*private), GFP_KERNEL);
3524 		/*
3525 		 * No worries if memory allocation failed.
3526 		 * The private structure is used only for speeding up multiple
3527 		 * lseek SEEK_HOLE/DATA calls to a file when there's delalloc,
3528 		 * so everything will still be correct.
3529 		 */
3530 		if (private) {
3531 			bool free = false;
3532 
3533 			private->owner_task = current;
3534 
3535 			spin_lock(&inode->lock);
3536 			if (file->private_data)
3537 				free = true;
3538 			else
3539 				file->private_data = private;
3540 			spin_unlock(&inode->lock);
3541 
3542 			if (free) {
3543 				kfree(private);
3544 				private = NULL;
3545 			}
3546 		}
3547 	}
3548 
3549 	if (private)
3550 		delalloc_cached_state = &private->llseek_cached_state;
3551 	else
3552 		delalloc_cached_state = NULL;
3553 
3554 	/*
3555 	 * offset can be negative, in this case we start finding DATA/HOLE from
3556 	 * the very start of the file.
3557 	 */
3558 	start = max_t(loff_t, 0, offset);
3559 
3560 	lockstart = round_down(start, fs_info->sectorsize);
3561 	lockend = round_up(i_size, fs_info->sectorsize);
3562 	if (lockend <= lockstart)
3563 		lockend = lockstart + fs_info->sectorsize;
3564 	lockend--;
3565 
3566 	path = btrfs_alloc_path();
3567 	if (!path)
3568 		return -ENOMEM;
3569 	path->reada = READA_FORWARD;
3570 
3571 	key.objectid = ino;
3572 	key.type = BTRFS_EXTENT_DATA_KEY;
3573 	key.offset = start;
3574 
3575 	last_extent_end = lockstart;
3576 
3577 	lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3578 
3579 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3580 	if (ret < 0) {
3581 		goto out;
3582 	} else if (ret > 0 && path->slots[0] > 0) {
3583 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
3584 		if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
3585 			path->slots[0]--;
3586 	}
3587 
3588 	while (start < i_size) {
3589 		struct extent_buffer *leaf = path->nodes[0];
3590 		struct btrfs_file_extent_item *extent;
3591 		u64 extent_end;
3592 		u8 type;
3593 
3594 		if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3595 			ret = btrfs_next_leaf(root, path);
3596 			if (ret < 0)
3597 				goto out;
3598 			else if (ret > 0)
3599 				break;
3600 
3601 			leaf = path->nodes[0];
3602 		}
3603 
3604 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3605 		if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
3606 			break;
3607 
3608 		extent_end = btrfs_file_extent_end(path);
3609 
3610 		/*
3611 		 * In the first iteration we may have a slot that points to an
3612 		 * extent that ends before our start offset, so skip it.
3613 		 */
3614 		if (extent_end <= start) {
3615 			path->slots[0]++;
3616 			continue;
3617 		}
3618 
3619 		/* We have an implicit hole, NO_HOLES feature is likely set. */
3620 		if (last_extent_end < key.offset) {
3621 			u64 search_start = last_extent_end;
3622 			u64 found_start;
3623 
3624 			/*
3625 			 * First iteration, @start matches @offset and it's
3626 			 * within the hole.
3627 			 */
3628 			if (start == offset)
3629 				search_start = offset;
3630 
3631 			found = find_desired_extent_in_hole(inode, whence,
3632 							    delalloc_cached_state,
3633 							    search_start,
3634 							    key.offset - 1,
3635 							    &found_start);
3636 			if (found) {
3637 				start = found_start;
3638 				break;
3639 			}
3640 			/*
3641 			 * Didn't find data or a hole (due to delalloc) in the
3642 			 * implicit hole range, so need to analyze the extent.
3643 			 */
3644 		}
3645 
3646 		extent = btrfs_item_ptr(leaf, path->slots[0],
3647 					struct btrfs_file_extent_item);
3648 		type = btrfs_file_extent_type(leaf, extent);
3649 
3650 		/*
3651 		 * Can't access the extent's disk_bytenr field if this is an
3652 		 * inline extent, since at that offset, it's where the extent
3653 		 * data starts.
3654 		 */
3655 		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
3656 		    (type == BTRFS_FILE_EXTENT_REG &&
3657 		     btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
3658 			/*
3659 			 * Explicit hole or prealloc extent, search for delalloc.
3660 			 * A prealloc extent is treated like a hole.
3661 			 */
3662 			u64 search_start = key.offset;
3663 			u64 found_start;
3664 
3665 			/*
3666 			 * First iteration, @start matches @offset and it's
3667 			 * within the hole.
3668 			 */
3669 			if (start == offset)
3670 				search_start = offset;
3671 
3672 			found = find_desired_extent_in_hole(inode, whence,
3673 							    delalloc_cached_state,
3674 							    search_start,
3675 							    extent_end - 1,
3676 							    &found_start);
3677 			if (found) {
3678 				start = found_start;
3679 				break;
3680 			}
3681 			/*
3682 			 * Didn't find data or a hole (due to delalloc) in the
3683 			 * implicit hole range, so need to analyze the next
3684 			 * extent item.
3685 			 */
3686 		} else {
3687 			/*
3688 			 * Found a regular or inline extent.
3689 			 * If we are seeking for data, adjust the start offset
3690 			 * and stop, we're done.
3691 			 */
3692 			if (whence == SEEK_DATA) {
3693 				start = max_t(u64, key.offset, offset);
3694 				found = true;
3695 				break;
3696 			}
3697 			/*
3698 			 * Else, we are seeking for a hole, check the next file
3699 			 * extent item.
3700 			 */
3701 		}
3702 
3703 		start = extent_end;
3704 		last_extent_end = extent_end;
3705 		path->slots[0]++;
3706 		if (fatal_signal_pending(current)) {
3707 			ret = -EINTR;
3708 			goto out;
3709 		}
3710 		cond_resched();
3711 	}
3712 
3713 	/* We have an implicit hole from the last extent found up to i_size. */
3714 	if (!found && start < i_size) {
3715 		found = find_desired_extent_in_hole(inode, whence,
3716 						    delalloc_cached_state, start,
3717 						    i_size - 1, &start);
3718 		if (!found)
3719 			start = i_size;
3720 	}
3721 
3722 out:
3723 	unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
3724 	btrfs_free_path(path);
3725 
3726 	if (ret < 0)
3727 		return ret;
3728 
3729 	if (whence == SEEK_DATA && start >= i_size)
3730 		return -ENXIO;
3731 
3732 	return min_t(loff_t, start, i_size);
3733 }
3734 
btrfs_file_llseek(struct file * file,loff_t offset,int whence)3735 static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
3736 {
3737 	struct inode *inode = file->f_mapping->host;
3738 
3739 	switch (whence) {
3740 	default:
3741 		return generic_file_llseek(file, offset, whence);
3742 	case SEEK_DATA:
3743 	case SEEK_HOLE:
3744 		btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3745 		offset = find_desired_extent(file, offset, whence);
3746 		btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3747 		break;
3748 	}
3749 
3750 	if (offset < 0)
3751 		return offset;
3752 
3753 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3754 }
3755 
btrfs_file_open(struct inode * inode,struct file * filp)3756 static int btrfs_file_open(struct inode *inode, struct file *filp)
3757 {
3758 	int ret;
3759 
3760 	filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC |
3761 		        FMODE_CAN_ODIRECT;
3762 
3763 	ret = fsverity_file_open(inode, filp);
3764 	if (ret)
3765 		return ret;
3766 	return generic_file_open(inode, filp);
3767 }
3768 
check_direct_read(struct btrfs_fs_info * fs_info,const struct iov_iter * iter,loff_t offset)3769 static int check_direct_read(struct btrfs_fs_info *fs_info,
3770 			     const struct iov_iter *iter, loff_t offset)
3771 {
3772 	int ret;
3773 	int i, seg;
3774 
3775 	ret = check_direct_IO(fs_info, iter, offset);
3776 	if (ret < 0)
3777 		return ret;
3778 
3779 	if (!iter_is_iovec(iter))
3780 		return 0;
3781 
3782 	for (seg = 0; seg < iter->nr_segs; seg++) {
3783 		for (i = seg + 1; i < iter->nr_segs; i++) {
3784 			const struct iovec *iov1 = iter_iov(iter) + seg;
3785 			const struct iovec *iov2 = iter_iov(iter) + i;
3786 
3787 			if (iov1->iov_base == iov2->iov_base)
3788 				return -EINVAL;
3789 		}
3790 	}
3791 	return 0;
3792 }
3793 
btrfs_direct_read(struct kiocb * iocb,struct iov_iter * to)3794 static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
3795 {
3796 	struct inode *inode = file_inode(iocb->ki_filp);
3797 	size_t prev_left = 0;
3798 	ssize_t read = 0;
3799 	ssize_t ret;
3800 
3801 	if (fsverity_active(inode))
3802 		return 0;
3803 
3804 	if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
3805 		return 0;
3806 
3807 	btrfs_inode_lock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3808 again:
3809 	/*
3810 	 * This is similar to what we do for direct IO writes, see the comment
3811 	 * at btrfs_direct_write(), but we also disable page faults in addition
3812 	 * to disabling them only at the iov_iter level. This is because when
3813 	 * reading from a hole or prealloc extent, iomap calls iov_iter_zero(),
3814 	 * which can still trigger page fault ins despite having set ->nofault
3815 	 * to true of our 'to' iov_iter.
3816 	 *
3817 	 * The difference to direct IO writes is that we deadlock when trying
3818 	 * to lock the extent range in the inode's tree during he page reads
3819 	 * triggered by the fault in (while for writes it is due to waiting for
3820 	 * our own ordered extent). This is because for direct IO reads,
3821 	 * btrfs_dio_iomap_begin() returns with the extent range locked, which
3822 	 * is only unlocked in the endio callback (end_bio_extent_readpage()).
3823 	 */
3824 	pagefault_disable();
3825 	to->nofault = true;
3826 	ret = btrfs_dio_read(iocb, to, read);
3827 	to->nofault = false;
3828 	pagefault_enable();
3829 
3830 	/* No increment (+=) because iomap returns a cumulative value. */
3831 	if (ret > 0)
3832 		read = ret;
3833 
3834 	if (iov_iter_count(to) > 0 && (ret == -EFAULT || ret > 0)) {
3835 		const size_t left = iov_iter_count(to);
3836 
3837 		if (left == prev_left) {
3838 			/*
3839 			 * We didn't make any progress since the last attempt,
3840 			 * fallback to a buffered read for the remainder of the
3841 			 * range. This is just to avoid any possibility of looping
3842 			 * for too long.
3843 			 */
3844 			ret = read;
3845 		} else {
3846 			/*
3847 			 * We made some progress since the last retry or this is
3848 			 * the first time we are retrying. Fault in as many pages
3849 			 * as possible and retry.
3850 			 */
3851 			fault_in_iov_iter_writeable(to, left);
3852 			prev_left = left;
3853 			goto again;
3854 		}
3855 	}
3856 	btrfs_inode_unlock(BTRFS_I(inode), BTRFS_ILOCK_SHARED);
3857 	return ret < 0 ? ret : read;
3858 }
3859 
btrfs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)3860 static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
3861 {
3862 	ssize_t ret = 0;
3863 
3864 	if (iocb->ki_flags & IOCB_DIRECT) {
3865 		ret = btrfs_direct_read(iocb, to);
3866 		if (ret < 0 || !iov_iter_count(to) ||
3867 		    iocb->ki_pos >= i_size_read(file_inode(iocb->ki_filp)))
3868 			return ret;
3869 	}
3870 
3871 	return filemap_read(iocb, to, ret);
3872 }
3873 
3874 const struct file_operations btrfs_file_operations = {
3875 	.llseek		= btrfs_file_llseek,
3876 	.read_iter      = btrfs_file_read_iter,
3877 	.splice_read	= filemap_splice_read,
3878 	.write_iter	= btrfs_file_write_iter,
3879 	.splice_write	= iter_file_splice_write,
3880 	.mmap		= btrfs_file_mmap,
3881 	.open		= btrfs_file_open,
3882 	.release	= btrfs_release_file,
3883 	.get_unmapped_area = thp_get_unmapped_area,
3884 	.fsync		= btrfs_sync_file,
3885 	.fallocate	= btrfs_fallocate,
3886 	.unlocked_ioctl	= btrfs_ioctl,
3887 #ifdef CONFIG_COMPAT
3888 	.compat_ioctl	= btrfs_compat_ioctl,
3889 #endif
3890 	.remap_file_range = btrfs_remap_file_range,
3891 };
3892 
btrfs_fdatawrite_range(struct inode * inode,loff_t start,loff_t end)3893 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
3894 {
3895 	int ret;
3896 
3897 	/*
3898 	 * So with compression we will find and lock a dirty page and clear the
3899 	 * first one as dirty, setup an async extent, and immediately return
3900 	 * with the entire range locked but with nobody actually marked with
3901 	 * writeback.  So we can't just filemap_write_and_wait_range() and
3902 	 * expect it to work since it will just kick off a thread to do the
3903 	 * actual work.  So we need to call filemap_fdatawrite_range _again_
3904 	 * since it will wait on the page lock, which won't be unlocked until
3905 	 * after the pages have been marked as writeback and so we're good to go
3906 	 * from there.  We have to do this otherwise we'll miss the ordered
3907 	 * extents and that results in badness.  Please Josef, do not think you
3908 	 * know better and pull this out at some point in the future, it is
3909 	 * right and you are wrong.
3910 	 */
3911 	ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3912 	if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
3913 			     &BTRFS_I(inode)->runtime_flags))
3914 		ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
3915 
3916 	return ret;
3917 }
3918