xref: /openbmc/linux/fs/btrfs/verity.c (revision 0030d7d6)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/init.h>
4 #include <linux/fs.h>
5 #include <linux/slab.h>
6 #include <linux/rwsem.h>
7 #include <linux/xattr.h>
8 #include <linux/security.h>
9 #include <linux/posix_acl_xattr.h>
10 #include <linux/iversion.h>
11 #include <linux/fsverity.h>
12 #include <linux/sched/mm.h>
13 #include "messages.h"
14 #include "ctree.h"
15 #include "btrfs_inode.h"
16 #include "transaction.h"
17 #include "disk-io.h"
18 #include "locking.h"
19 #include "fs.h"
20 #include "accessors.h"
21 #include "ioctl.h"
22 #include "verity.h"
23 #include "orphan.h"
24 
25 /*
26  * Implementation of the interface defined in struct fsverity_operations.
27  *
28  * The main question is how and where to store the verity descriptor and the
29  * Merkle tree. We store both in dedicated btree items in the filesystem tree,
30  * together with the rest of the inode metadata. This means we'll need to do
31  * extra work to encrypt them once encryption is supported in btrfs, but btrfs
32  * has a lot of careful code around i_size and it seems better to make a new key
33  * type than try and adjust all of our expectations for i_size.
34  *
35  * Note that this differs from the implementation in ext4 and f2fs, where
36  * this data is stored as if it were in the file, but past EOF. However, btrfs
37  * does not have a widespread mechanism for caching opaque metadata pages, so we
38  * do pretend that the Merkle tree pages themselves are past EOF for the
39  * purposes of caching them (as opposed to creating a virtual inode).
40  *
41  * fs verity items are stored under two different key types on disk.
42  * The descriptor items:
43  * [ inode objectid, BTRFS_VERITY_DESC_ITEM_KEY, offset ]
44  *
45  * At offset 0, we store a btrfs_verity_descriptor_item which tracks the
46  * size of the descriptor item and some extra data for encryption.
47  * Starting at offset 1, these hold the generic fs verity descriptor.
48  * The latter are opaque to btrfs, we just read and write them as a blob for
49  * the higher level verity code.  The most common descriptor size is 256 bytes.
50  *
51  * The merkle tree items:
52  * [ inode objectid, BTRFS_VERITY_MERKLE_ITEM_KEY, offset ]
53  *
54  * These also start at offset 0, and correspond to the merkle tree bytes.
55  * So when fsverity asks for page 0 of the merkle tree, we pull up one page
56  * starting at offset 0 for this key type.  These are also opaque to btrfs,
57  * we're blindly storing whatever fsverity sends down.
58  *
59  * Another important consideration is the fact that the Merkle tree data scales
60  * linearly with the size of the file (with 4K pages/blocks and SHA-256, it's
61  * ~1/127th the size) so for large files, writing the tree can be a lengthy
62  * operation. For that reason, we guard the whole enable verity operation
63  * (between begin_enable_verity and end_enable_verity) with an orphan item.
64  * Again, because the data can be pretty large, it's quite possible that we
65  * could run out of space writing it, so we try our best to handle errors by
66  * stopping and rolling back rather than aborting the victim transaction.
67  */
68 
69 #define MERKLE_START_ALIGN			65536
70 
71 /*
72  * Compute the logical file offset where we cache the Merkle tree.
73  *
74  * @inode:  inode of the verity file
75  *
76  * For the purposes of caching the Merkle tree pages, as required by
77  * fs-verity, it is convenient to do size computations in terms of a file
78  * offset, rather than in terms of page indices.
79  *
80  * Use 64K to be sure it's past the last page in the file, even with 64K pages.
81  * That rounding operation itself can overflow loff_t, so we do it in u64 and
82  * check.
83  *
84  * Returns the file offset on success, negative error code on failure.
85  */
86 static loff_t merkle_file_pos(const struct inode *inode)
87 {
88 	u64 sz = inode->i_size;
89 	u64 rounded = round_up(sz, MERKLE_START_ALIGN);
90 
91 	if (rounded > inode->i_sb->s_maxbytes)
92 		return -EFBIG;
93 
94 	return rounded;
95 }
96 
97 /*
98  * Drop all the items for this inode with this key_type.
99  *
100  * @inode:     inode to drop items for
101  * @key_type:  type of items to drop (BTRFS_VERITY_DESC_ITEM or
102  *             BTRFS_VERITY_MERKLE_ITEM)
103  *
104  * Before doing a verity enable we cleanup any existing verity items.
105  * This is also used to clean up if a verity enable failed half way through.
106  *
107  * Returns number of dropped items on success, negative error code on failure.
108  */
109 static int drop_verity_items(struct btrfs_inode *inode, u8 key_type)
110 {
111 	struct btrfs_trans_handle *trans;
112 	struct btrfs_root *root = inode->root;
113 	struct btrfs_path *path;
114 	struct btrfs_key key;
115 	int count = 0;
116 	int ret;
117 
118 	path = btrfs_alloc_path();
119 	if (!path)
120 		return -ENOMEM;
121 
122 	while (1) {
123 		/* 1 for the item being dropped */
124 		trans = btrfs_start_transaction(root, 1);
125 		if (IS_ERR(trans)) {
126 			ret = PTR_ERR(trans);
127 			goto out;
128 		}
129 
130 		/*
131 		 * Walk backwards through all the items until we find one that
132 		 * isn't from our key type or objectid
133 		 */
134 		key.objectid = btrfs_ino(inode);
135 		key.type = key_type;
136 		key.offset = (u64)-1;
137 
138 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
139 		if (ret > 0) {
140 			ret = 0;
141 			/* No more keys of this type, we're done */
142 			if (path->slots[0] == 0)
143 				break;
144 			path->slots[0]--;
145 		} else if (ret < 0) {
146 			btrfs_end_transaction(trans);
147 			goto out;
148 		}
149 
150 		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
151 
152 		/* No more keys of this type, we're done */
153 		if (key.objectid != btrfs_ino(inode) || key.type != key_type)
154 			break;
155 
156 		/*
157 		 * This shouldn't be a performance sensitive function because
158 		 * it's not used as part of truncate.  If it ever becomes
159 		 * perf sensitive, change this to walk forward and bulk delete
160 		 * items
161 		 */
162 		ret = btrfs_del_items(trans, root, path, path->slots[0], 1);
163 		if (ret) {
164 			btrfs_end_transaction(trans);
165 			goto out;
166 		}
167 		count++;
168 		btrfs_release_path(path);
169 		btrfs_end_transaction(trans);
170 	}
171 	ret = count;
172 	btrfs_end_transaction(trans);
173 out:
174 	btrfs_free_path(path);
175 	return ret;
176 }
177 
178 /*
179  * Drop all verity items
180  *
181  * @inode:  inode to drop verity items for
182  *
183  * In most contexts where we are dropping verity items, we want to do it for all
184  * the types of verity items, not a particular one.
185  *
186  * Returns: 0 on success, negative error code on failure.
187  */
188 int btrfs_drop_verity_items(struct btrfs_inode *inode)
189 {
190 	int ret;
191 
192 	ret = drop_verity_items(inode, BTRFS_VERITY_DESC_ITEM_KEY);
193 	if (ret < 0)
194 		return ret;
195 	ret = drop_verity_items(inode, BTRFS_VERITY_MERKLE_ITEM_KEY);
196 	if (ret < 0)
197 		return ret;
198 
199 	return 0;
200 }
201 
202 /*
203  * Insert and write inode items with a given key type and offset.
204  *
205  * @inode:     inode to insert for
206  * @key_type:  key type to insert
207  * @offset:    item offset to insert at
208  * @src:       source data to write
209  * @len:       length of source data to write
210  *
211  * Write len bytes from src into items of up to 2K length.
212  * The inserted items will have key (ino, key_type, offset + off) where off is
213  * consecutively increasing from 0 up to the last item ending at offset + len.
214  *
215  * Returns 0 on success and a negative error code on failure.
216  */
217 static int write_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
218 			   const char *src, u64 len)
219 {
220 	struct btrfs_trans_handle *trans;
221 	struct btrfs_path *path;
222 	struct btrfs_root *root = inode->root;
223 	struct extent_buffer *leaf;
224 	struct btrfs_key key;
225 	unsigned long copy_bytes;
226 	unsigned long src_offset = 0;
227 	void *data;
228 	int ret = 0;
229 
230 	path = btrfs_alloc_path();
231 	if (!path)
232 		return -ENOMEM;
233 
234 	while (len > 0) {
235 		/* 1 for the new item being inserted */
236 		trans = btrfs_start_transaction(root, 1);
237 		if (IS_ERR(trans)) {
238 			ret = PTR_ERR(trans);
239 			break;
240 		}
241 
242 		key.objectid = btrfs_ino(inode);
243 		key.type = key_type;
244 		key.offset = offset;
245 
246 		/*
247 		 * Insert 2K at a time mostly to be friendly for smaller leaf
248 		 * size filesystems
249 		 */
250 		copy_bytes = min_t(u64, len, 2048);
251 
252 		ret = btrfs_insert_empty_item(trans, root, path, &key, copy_bytes);
253 		if (ret) {
254 			btrfs_end_transaction(trans);
255 			break;
256 		}
257 
258 		leaf = path->nodes[0];
259 
260 		data = btrfs_item_ptr(leaf, path->slots[0], void);
261 		write_extent_buffer(leaf, src + src_offset,
262 				    (unsigned long)data, copy_bytes);
263 		offset += copy_bytes;
264 		src_offset += copy_bytes;
265 		len -= copy_bytes;
266 
267 		btrfs_release_path(path);
268 		btrfs_end_transaction(trans);
269 	}
270 
271 	btrfs_free_path(path);
272 	return ret;
273 }
274 
275 /*
276  * Read inode items of the given key type and offset from the btree.
277  *
278  * @inode:      inode to read items of
279  * @key_type:   key type to read
280  * @offset:     item offset to read from
281  * @dest:       Buffer to read into. This parameter has slightly tricky
282  *              semantics.  If it is NULL, the function will not do any copying
283  *              and will just return the size of all the items up to len bytes.
284  *              If dest_page is passed, then the function will kmap_local the
285  *              page and ignore dest, but it must still be non-NULL to avoid the
286  *              counting-only behavior.
287  * @len:        length in bytes to read
288  * @dest_page:  copy into this page instead of the dest buffer
289  *
290  * Helper function to read items from the btree.  This returns the number of
291  * bytes read or < 0 for errors.  We can return short reads if the items don't
292  * exist on disk or aren't big enough to fill the desired length.  Supports
293  * reading into a provided buffer (dest) or into the page cache
294  *
295  * Returns number of bytes read or a negative error code on failure.
296  */
297 static int read_key_bytes(struct btrfs_inode *inode, u8 key_type, u64 offset,
298 			  char *dest, u64 len, struct page *dest_page)
299 {
300 	struct btrfs_path *path;
301 	struct btrfs_root *root = inode->root;
302 	struct extent_buffer *leaf;
303 	struct btrfs_key key;
304 	u64 item_end;
305 	u64 copy_end;
306 	int copied = 0;
307 	u32 copy_offset;
308 	unsigned long copy_bytes;
309 	unsigned long dest_offset = 0;
310 	void *data;
311 	char *kaddr = dest;
312 	int ret;
313 
314 	path = btrfs_alloc_path();
315 	if (!path)
316 		return -ENOMEM;
317 
318 	if (dest_page)
319 		path->reada = READA_FORWARD;
320 
321 	key.objectid = btrfs_ino(inode);
322 	key.type = key_type;
323 	key.offset = offset;
324 
325 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
326 	if (ret < 0) {
327 		goto out;
328 	} else if (ret > 0) {
329 		ret = 0;
330 		if (path->slots[0] == 0)
331 			goto out;
332 		path->slots[0]--;
333 	}
334 
335 	while (len > 0) {
336 		leaf = path->nodes[0];
337 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
338 
339 		if (key.objectid != btrfs_ino(inode) || key.type != key_type)
340 			break;
341 
342 		item_end = btrfs_item_size(leaf, path->slots[0]) + key.offset;
343 
344 		if (copied > 0) {
345 			/*
346 			 * Once we've copied something, we want all of the items
347 			 * to be sequential
348 			 */
349 			if (key.offset != offset)
350 				break;
351 		} else {
352 			/*
353 			 * Our initial offset might be in the middle of an
354 			 * item.  Make sure it all makes sense.
355 			 */
356 			if (key.offset > offset)
357 				break;
358 			if (item_end <= offset)
359 				break;
360 		}
361 
362 		/* desc = NULL to just sum all the item lengths */
363 		if (!dest)
364 			copy_end = item_end;
365 		else
366 			copy_end = min(offset + len, item_end);
367 
368 		/* Number of bytes in this item we want to copy */
369 		copy_bytes = copy_end - offset;
370 
371 		/* Offset from the start of item for copying */
372 		copy_offset = offset - key.offset;
373 
374 		if (dest) {
375 			if (dest_page)
376 				kaddr = kmap_local_page(dest_page);
377 
378 			data = btrfs_item_ptr(leaf, path->slots[0], void);
379 			read_extent_buffer(leaf, kaddr + dest_offset,
380 					   (unsigned long)data + copy_offset,
381 					   copy_bytes);
382 
383 			if (dest_page)
384 				kunmap_local(kaddr);
385 		}
386 
387 		offset += copy_bytes;
388 		dest_offset += copy_bytes;
389 		len -= copy_bytes;
390 		copied += copy_bytes;
391 
392 		path->slots[0]++;
393 		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
394 			/*
395 			 * We've reached the last slot in this leaf and we need
396 			 * to go to the next leaf.
397 			 */
398 			ret = btrfs_next_leaf(root, path);
399 			if (ret < 0) {
400 				break;
401 			} else if (ret > 0) {
402 				ret = 0;
403 				break;
404 			}
405 		}
406 	}
407 out:
408 	btrfs_free_path(path);
409 	if (!ret)
410 		ret = copied;
411 	return ret;
412 }
413 
414 /*
415  * Delete an fsverity orphan
416  *
417  * @trans:  transaction to do the delete in
418  * @inode:  inode to orphan
419  *
420  * Capture verity orphan specific logic that is repeated in the couple places
421  * we delete verity orphans. Specifically, handling ENOENT and ignoring inodes
422  * with 0 links.
423  *
424  * Returns zero on success or a negative error code on failure.
425  */
426 static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inode)
427 {
428 	struct btrfs_root *root = inode->root;
429 	int ret;
430 
431 	/*
432 	 * If the inode has no links, it is either already unlinked, or was
433 	 * created with O_TMPFILE. In either case, it should have an orphan from
434 	 * that other operation. Rather than reference count the orphans, we
435 	 * simply ignore them here, because we only invoke the verity path in
436 	 * the orphan logic when i_nlink is 1.
437 	 */
438 	if (!inode->vfs_inode.i_nlink)
439 		return 0;
440 
441 	ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
442 	if (ret == -ENOENT)
443 		ret = 0;
444 	return ret;
445 }
446 
447 /*
448  * Rollback in-progress verity if we encounter an error.
449  *
450  * @inode:  inode verity had an error for
451  *
452  * We try to handle recoverable errors while enabling verity by rolling it back
453  * and just failing the operation, rather than having an fs level error no
454  * matter what. However, any error in rollback is unrecoverable.
455  *
456  * Returns 0 on success, negative error code on failure.
457  */
458 static int rollback_verity(struct btrfs_inode *inode)
459 {
460 	struct btrfs_trans_handle *trans = NULL;
461 	struct btrfs_root *root = inode->root;
462 	int ret;
463 
464 	ASSERT(inode_is_locked(&inode->vfs_inode));
465 	truncate_inode_pages(inode->vfs_inode.i_mapping, inode->vfs_inode.i_size);
466 	clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
467 	ret = btrfs_drop_verity_items(inode);
468 	if (ret) {
469 		btrfs_handle_fs_error(root->fs_info, ret,
470 				"failed to drop verity items in rollback %llu",
471 				(u64)inode->vfs_inode.i_ino);
472 		goto out;
473 	}
474 
475 	/*
476 	 * 1 for updating the inode flag
477 	 * 1 for deleting the orphan
478 	 */
479 	trans = btrfs_start_transaction(root, 2);
480 	if (IS_ERR(trans)) {
481 		ret = PTR_ERR(trans);
482 		trans = NULL;
483 		btrfs_handle_fs_error(root->fs_info, ret,
484 			"failed to start transaction in verity rollback %llu",
485 			(u64)inode->vfs_inode.i_ino);
486 		goto out;
487 	}
488 	inode->ro_flags &= ~BTRFS_INODE_RO_VERITY;
489 	btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
490 	ret = btrfs_update_inode(trans, root, inode);
491 	if (ret) {
492 		btrfs_abort_transaction(trans, ret);
493 		goto out;
494 	}
495 	ret = del_orphan(trans, inode);
496 	if (ret) {
497 		btrfs_abort_transaction(trans, ret);
498 		goto out;
499 	}
500 out:
501 	if (trans)
502 		btrfs_end_transaction(trans);
503 	return ret;
504 }
505 
506 /*
507  * Finalize making the file a valid verity file
508  *
509  * @inode:      inode to be marked as verity
510  * @desc:       contents of the verity descriptor to write (not NULL)
511  * @desc_size:  size of the verity descriptor
512  *
513  * Do the actual work of finalizing verity after successfully writing the Merkle
514  * tree:
515  *
516  * - write out the descriptor items
517  * - mark the inode with the verity flag
518  * - delete the orphan item
519  * - mark the ro compat bit
520  * - clear the in progress bit
521  *
522  * Returns 0 on success, negative error code on failure.
523  */
524 static int finish_verity(struct btrfs_inode *inode, const void *desc,
525 			 size_t desc_size)
526 {
527 	struct btrfs_trans_handle *trans = NULL;
528 	struct btrfs_root *root = inode->root;
529 	struct btrfs_verity_descriptor_item item;
530 	int ret;
531 
532 	/* Write out the descriptor item */
533 	memset(&item, 0, sizeof(item));
534 	btrfs_set_stack_verity_descriptor_size(&item, desc_size);
535 	ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 0,
536 			      (const char *)&item, sizeof(item));
537 	if (ret)
538 		goto out;
539 
540 	/* Write out the descriptor itself */
541 	ret = write_key_bytes(inode, BTRFS_VERITY_DESC_ITEM_KEY, 1,
542 			      desc, desc_size);
543 	if (ret)
544 		goto out;
545 
546 	/*
547 	 * 1 for updating the inode flag
548 	 * 1 for deleting the orphan
549 	 */
550 	trans = btrfs_start_transaction(root, 2);
551 	if (IS_ERR(trans)) {
552 		ret = PTR_ERR(trans);
553 		goto out;
554 	}
555 	inode->ro_flags |= BTRFS_INODE_RO_VERITY;
556 	btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode);
557 	ret = btrfs_update_inode(trans, root, inode);
558 	if (ret)
559 		goto end_trans;
560 	ret = del_orphan(trans, inode);
561 	if (ret)
562 		goto end_trans;
563 	clear_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
564 	btrfs_set_fs_compat_ro(root->fs_info, VERITY);
565 end_trans:
566 	btrfs_end_transaction(trans);
567 out:
568 	return ret;
569 
570 }
571 
572 /*
573  * fsverity op that begins enabling verity.
574  *
575  * @filp:  file to enable verity on
576  *
577  * Begin enabling fsverity for the file. We drop any existing verity items, add
578  * an orphan and set the in progress bit.
579  *
580  * Returns 0 on success, negative error code on failure.
581  */
582 static int btrfs_begin_enable_verity(struct file *filp)
583 {
584 	struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
585 	struct btrfs_root *root = inode->root;
586 	struct btrfs_trans_handle *trans;
587 	int ret;
588 
589 	ASSERT(inode_is_locked(file_inode(filp)));
590 
591 	if (test_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags))
592 		return -EBUSY;
593 
594 	/*
595 	 * This should almost never do anything, but theoretically, it's
596 	 * possible that we failed to enable verity on a file, then were
597 	 * interrupted or failed while rolling back, failed to cleanup the
598 	 * orphan, and finally attempt to enable verity again.
599 	 */
600 	ret = btrfs_drop_verity_items(inode);
601 	if (ret)
602 		return ret;
603 
604 	/* 1 for the orphan item */
605 	trans = btrfs_start_transaction(root, 1);
606 	if (IS_ERR(trans))
607 		return PTR_ERR(trans);
608 
609 	ret = btrfs_orphan_add(trans, inode);
610 	if (!ret)
611 		set_bit(BTRFS_INODE_VERITY_IN_PROGRESS, &inode->runtime_flags);
612 	btrfs_end_transaction(trans);
613 
614 	return 0;
615 }
616 
617 /*
618  * fsverity op that ends enabling verity.
619  *
620  * @filp:              file we are finishing enabling verity on
621  * @desc:              verity descriptor to write out (NULL in error conditions)
622  * @desc_size:         size of the verity descriptor (variable with signatures)
623  * @merkle_tree_size:  size of the merkle tree in bytes
624  *
625  * If desc is null, then VFS is signaling an error occurred during verity
626  * enable, and we should try to rollback. Otherwise, attempt to finish verity.
627  *
628  * Returns 0 on success, negative error code on error.
629  */
630 static int btrfs_end_enable_verity(struct file *filp, const void *desc,
631 				   size_t desc_size, u64 merkle_tree_size)
632 {
633 	struct btrfs_inode *inode = BTRFS_I(file_inode(filp));
634 	int ret = 0;
635 	int rollback_ret;
636 
637 	ASSERT(inode_is_locked(file_inode(filp)));
638 
639 	if (desc == NULL)
640 		goto rollback;
641 
642 	ret = finish_verity(inode, desc, desc_size);
643 	if (ret)
644 		goto rollback;
645 	return ret;
646 
647 rollback:
648 	rollback_ret = rollback_verity(inode);
649 	if (rollback_ret)
650 		btrfs_err(inode->root->fs_info,
651 			  "failed to rollback verity items: %d", rollback_ret);
652 	return ret;
653 }
654 
655 /*
656  * fsverity op that gets the struct fsverity_descriptor.
657  *
658  * @inode:     inode to get the descriptor of
659  * @buf:       output buffer for the descriptor contents
660  * @buf_size:  size of the output buffer. 0 to query the size
661  *
662  * fsverity does a two pass setup for reading the descriptor, in the first pass
663  * it calls with buf_size = 0 to query the size of the descriptor, and then in
664  * the second pass it actually reads the descriptor off disk.
665  *
666  * Returns the size on success or a negative error code on failure.
667  */
668 int btrfs_get_verity_descriptor(struct inode *inode, void *buf, size_t buf_size)
669 {
670 	u64 true_size;
671 	int ret = 0;
672 	struct btrfs_verity_descriptor_item item;
673 
674 	memset(&item, 0, sizeof(item));
675 	ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 0,
676 			     (char *)&item, sizeof(item), NULL);
677 	if (ret < 0)
678 		return ret;
679 
680 	if (item.reserved[0] != 0 || item.reserved[1] != 0)
681 		return -EUCLEAN;
682 
683 	true_size = btrfs_stack_verity_descriptor_size(&item);
684 	if (true_size > INT_MAX)
685 		return -EUCLEAN;
686 
687 	if (buf_size == 0)
688 		return true_size;
689 	if (buf_size < true_size)
690 		return -ERANGE;
691 
692 	ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_DESC_ITEM_KEY, 1,
693 			     buf, buf_size, NULL);
694 	if (ret < 0)
695 		return ret;
696 	if (ret != true_size)
697 		return -EIO;
698 
699 	return true_size;
700 }
701 
702 /*
703  * fsverity op that reads and caches a merkle tree page.
704  *
705  * @inode:         inode to read a merkle tree page for
706  * @index:         page index relative to the start of the merkle tree
707  * @num_ra_pages:  number of pages to readahead. Optional, we ignore it
708  *
709  * The Merkle tree is stored in the filesystem btree, but its pages are cached
710  * with a logical position past EOF in the inode's mapping.
711  *
712  * Returns the page we read, or an ERR_PTR on error.
713  */
714 static struct page *btrfs_read_merkle_tree_page(struct inode *inode,
715 						pgoff_t index,
716 						unsigned long num_ra_pages)
717 {
718 	struct page *page;
719 	u64 off = (u64)index << PAGE_SHIFT;
720 	loff_t merkle_pos = merkle_file_pos(inode);
721 	int ret;
722 
723 	if (merkle_pos < 0)
724 		return ERR_PTR(merkle_pos);
725 	if (merkle_pos > inode->i_sb->s_maxbytes - off - PAGE_SIZE)
726 		return ERR_PTR(-EFBIG);
727 	index += merkle_pos >> PAGE_SHIFT;
728 again:
729 	page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
730 	if (page) {
731 		if (PageUptodate(page))
732 			return page;
733 
734 		lock_page(page);
735 		/*
736 		 * We only insert uptodate pages, so !Uptodate has to be
737 		 * an error
738 		 */
739 		if (!PageUptodate(page)) {
740 			unlock_page(page);
741 			put_page(page);
742 			return ERR_PTR(-EIO);
743 		}
744 		unlock_page(page);
745 		return page;
746 	}
747 
748 	page = __page_cache_alloc(mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
749 	if (!page)
750 		return ERR_PTR(-ENOMEM);
751 
752 	/*
753 	 * Merkle item keys are indexed from byte 0 in the merkle tree.
754 	 * They have the form:
755 	 *
756 	 * [ inode objectid, BTRFS_MERKLE_ITEM_KEY, offset in bytes ]
757 	 */
758 	ret = read_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY, off,
759 			     page_address(page), PAGE_SIZE, page);
760 	if (ret < 0) {
761 		put_page(page);
762 		return ERR_PTR(ret);
763 	}
764 	if (ret < PAGE_SIZE)
765 		memzero_page(page, ret, PAGE_SIZE - ret);
766 
767 	SetPageUptodate(page);
768 	ret = add_to_page_cache_lru(page, inode->i_mapping, index, GFP_NOFS);
769 
770 	if (!ret) {
771 		/* Inserted and ready for fsverity */
772 		unlock_page(page);
773 	} else {
774 		put_page(page);
775 		/* Did someone race us into inserting this page? */
776 		if (ret == -EEXIST)
777 			goto again;
778 		page = ERR_PTR(ret);
779 	}
780 	return page;
781 }
782 
783 /*
784  * fsverity op that writes a Merkle tree block into the btree.
785  *
786  * @inode:          inode to write a Merkle tree block for
787  * @buf:            Merkle tree data block to write
788  * @index:          index of the block in the Merkle tree
789  * @log_blocksize:  log base 2 of the Merkle tree block size
790  *
791  * Note that the block size could be different from the page size, so it is not
792  * safe to assume that index is a page index.
793  *
794  * Returns 0 on success or negative error code on failure
795  */
796 static int btrfs_write_merkle_tree_block(struct inode *inode, const void *buf,
797 					u64 index, int log_blocksize)
798 {
799 	u64 off = index << log_blocksize;
800 	u64 len = 1ULL << log_blocksize;
801 	loff_t merkle_pos = merkle_file_pos(inode);
802 
803 	if (merkle_pos < 0)
804 		return merkle_pos;
805 	if (merkle_pos > inode->i_sb->s_maxbytes - off - len)
806 		return -EFBIG;
807 
808 	return write_key_bytes(BTRFS_I(inode), BTRFS_VERITY_MERKLE_ITEM_KEY,
809 			       off, buf, len);
810 }
811 
812 const struct fsverity_operations btrfs_verityops = {
813 	.begin_enable_verity     = btrfs_begin_enable_verity,
814 	.end_enable_verity       = btrfs_end_enable_verity,
815 	.get_verity_descriptor   = btrfs_get_verity_descriptor,
816 	.read_merkle_tree_page   = btrfs_read_merkle_tree_page,
817 	.write_merkle_tree_block = btrfs_write_merkle_tree_block,
818 };
819