xref: /openbmc/linux/fs/ext4/verity.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * fs/ext4/verity.c: fs-verity support for ext4
4  *
5  * Copyright 2019 Google LLC
6  */
7 
8 /*
9  * Implementation of fsverity_operations for ext4.
10  *
11  * ext4 stores the verity metadata (Merkle tree and fsverity_descriptor) past
12  * the end of the file, starting at the first 64K boundary beyond i_size.  This
13  * approach works because (a) verity files are readonly, and (b) pages fully
14  * beyond i_size aren't visible to userspace but can be read/written internally
15  * by ext4 with only some relatively small changes to ext4.  This approach
16  * avoids having to depend on the EA_INODE feature and on rearchitecturing
17  * ext4's xattr support to support paging multi-gigabyte xattrs into memory, and
18  * to support encrypting xattrs.  Note that the verity metadata *must* be
19  * encrypted when the file is, since it contains hashes of the plaintext data.
20  *
21  * Using a 64K boundary rather than a 4K one keeps things ready for
22  * architectures with 64K pages, and it doesn't necessarily waste space on-disk
23  * since there can be a hole between i_size and the start of the Merkle tree.
24  */
25 
26 #include <linux/quotaops.h>
27 
28 #include "ext4.h"
29 #include "ext4_extents.h"
30 #include "ext4_jbd2.h"
31 
32 static inline loff_t ext4_verity_metadata_pos(const struct inode *inode)
33 {
34 	return round_up(inode->i_size, 65536);
35 }
36 
37 /*
38  * Read some verity metadata from the inode.  __vfs_read() can't be used because
39  * we need to read beyond i_size.
40  */
41 static int pagecache_read(struct inode *inode, void *buf, size_t count,
42 			  loff_t pos)
43 {
44 	while (count) {
45 		size_t n = min_t(size_t, count,
46 				 PAGE_SIZE - offset_in_page(pos));
47 		struct page *page;
48 		void *addr;
49 
50 		page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
51 					 NULL);
52 		if (IS_ERR(page))
53 			return PTR_ERR(page);
54 
55 		addr = kmap_atomic(page);
56 		memcpy(buf, addr + offset_in_page(pos), n);
57 		kunmap_atomic(addr);
58 
59 		put_page(page);
60 
61 		buf += n;
62 		pos += n;
63 		count -= n;
64 	}
65 	return 0;
66 }
67 
68 /*
69  * Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
70  * kernel_write() can't be used because the file descriptor is readonly.
71  */
72 static int pagecache_write(struct inode *inode, const void *buf, size_t count,
73 			   loff_t pos)
74 {
75 	if (pos + count > inode->i_sb->s_maxbytes)
76 		return -EFBIG;
77 
78 	while (count) {
79 		size_t n = min_t(size_t, count,
80 				 PAGE_SIZE - offset_in_page(pos));
81 		struct page *page;
82 		void *fsdata;
83 		void *addr;
84 		int res;
85 
86 		res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
87 					    &page, &fsdata);
88 		if (res)
89 			return res;
90 
91 		addr = kmap_atomic(page);
92 		memcpy(addr + offset_in_page(pos), buf, n);
93 		kunmap_atomic(addr);
94 
95 		res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
96 					  page, fsdata);
97 		if (res < 0)
98 			return res;
99 		if (res != n)
100 			return -EIO;
101 
102 		buf += n;
103 		pos += n;
104 		count -= n;
105 	}
106 	return 0;
107 }
108 
109 static int ext4_begin_enable_verity(struct file *filp)
110 {
111 	struct inode *inode = file_inode(filp);
112 	const int credits = 2; /* superblock and inode for ext4_orphan_add() */
113 	handle_t *handle;
114 	int err;
115 
116 	if (IS_DAX(inode) || ext4_test_inode_flag(inode, EXT4_INODE_DAX))
117 		return -EINVAL;
118 
119 	if (ext4_verity_in_progress(inode))
120 		return -EBUSY;
121 
122 	/*
123 	 * Since the file was opened readonly, we have to initialize the jbd
124 	 * inode and quotas here and not rely on ->open() doing it.  This must
125 	 * be done before evicting the inline data.
126 	 */
127 
128 	err = ext4_inode_attach_jinode(inode);
129 	if (err)
130 		return err;
131 
132 	err = dquot_initialize(inode);
133 	if (err)
134 		return err;
135 
136 	err = ext4_convert_inline_data(inode);
137 	if (err)
138 		return err;
139 
140 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
141 		ext4_warning_inode(inode,
142 				   "verity is only allowed on extent-based files");
143 		return -EOPNOTSUPP;
144 	}
145 
146 	/*
147 	 * ext4 uses the last allocated block to find the verity descriptor, so
148 	 * we must remove any other blocks past EOF which might confuse things.
149 	 */
150 	err = ext4_truncate(inode);
151 	if (err)
152 		return err;
153 
154 	handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
155 	if (IS_ERR(handle))
156 		return PTR_ERR(handle);
157 
158 	err = ext4_orphan_add(handle, inode);
159 	if (err == 0)
160 		ext4_set_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
161 
162 	ext4_journal_stop(handle);
163 	return err;
164 }
165 
166 /*
167  * ext4 stores the verity descriptor beginning on the next filesystem block
168  * boundary after the Merkle tree.  Then, the descriptor size is stored in the
169  * last 4 bytes of the last allocated filesystem block --- which is either the
170  * block in which the descriptor ends, or the next block after that if there
171  * weren't at least 4 bytes remaining.
172  *
173  * We can't simply store the descriptor in an xattr because it *must* be
174  * encrypted when ext4 encryption is used, but ext4 encryption doesn't encrypt
175  * xattrs.  Also, if the descriptor includes a large signature blob it may be
176  * too large to store in an xattr without the EA_INODE feature.
177  */
178 static int ext4_write_verity_descriptor(struct inode *inode, const void *desc,
179 					size_t desc_size, u64 merkle_tree_size)
180 {
181 	const u64 desc_pos = round_up(ext4_verity_metadata_pos(inode) +
182 				      merkle_tree_size, i_blocksize(inode));
183 	const u64 desc_end = desc_pos + desc_size;
184 	const __le32 desc_size_disk = cpu_to_le32(desc_size);
185 	const u64 desc_size_pos = round_up(desc_end + sizeof(desc_size_disk),
186 					   i_blocksize(inode)) -
187 				  sizeof(desc_size_disk);
188 	int err;
189 
190 	err = pagecache_write(inode, desc, desc_size, desc_pos);
191 	if (err)
192 		return err;
193 
194 	return pagecache_write(inode, &desc_size_disk, sizeof(desc_size_disk),
195 			       desc_size_pos);
196 }
197 
198 static int ext4_end_enable_verity(struct file *filp, const void *desc,
199 				  size_t desc_size, u64 merkle_tree_size)
200 {
201 	struct inode *inode = file_inode(filp);
202 	const int credits = 2; /* superblock and inode for ext4_orphan_del() */
203 	handle_t *handle;
204 	int err = 0;
205 	int err2;
206 
207 	if (desc != NULL) {
208 		/* Succeeded; write the verity descriptor. */
209 		err = ext4_write_verity_descriptor(inode, desc, desc_size,
210 						   merkle_tree_size);
211 
212 		/* Write all pages before clearing VERITY_IN_PROGRESS. */
213 		if (!err)
214 			err = filemap_write_and_wait(inode->i_mapping);
215 	}
216 
217 	/* If we failed, truncate anything we wrote past i_size. */
218 	if (desc == NULL || err)
219 		ext4_truncate(inode);
220 
221 	/*
222 	 * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
223 	 * deleting the inode from the orphan list, even if something failed.
224 	 * If everything succeeded, we'll also set the verity bit in the same
225 	 * transaction.
226 	 */
227 
228 	ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
229 
230 	handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
231 	if (IS_ERR(handle)) {
232 		ext4_orphan_del(NULL, inode);
233 		return PTR_ERR(handle);
234 	}
235 
236 	err2 = ext4_orphan_del(handle, inode);
237 	if (err2)
238 		goto out_stop;
239 
240 	if (desc != NULL && !err) {
241 		struct ext4_iloc iloc;
242 
243 		err = ext4_reserve_inode_write(handle, inode, &iloc);
244 		if (err)
245 			goto out_stop;
246 		ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
247 		ext4_set_inode_flags(inode, false);
248 		err = ext4_mark_iloc_dirty(handle, inode, &iloc);
249 	}
250 out_stop:
251 	ext4_journal_stop(handle);
252 	return err ?: err2;
253 }
254 
255 static int ext4_get_verity_descriptor_location(struct inode *inode,
256 					       size_t *desc_size_ret,
257 					       u64 *desc_pos_ret)
258 {
259 	struct ext4_ext_path *path;
260 	struct ext4_extent *last_extent;
261 	u32 end_lblk;
262 	u64 desc_size_pos;
263 	__le32 desc_size_disk;
264 	u32 desc_size;
265 	u64 desc_pos;
266 	int err;
267 
268 	/*
269 	 * Descriptor size is in last 4 bytes of last allocated block.
270 	 * See ext4_write_verity_descriptor().
271 	 */
272 
273 	if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
274 		EXT4_ERROR_INODE(inode, "verity file doesn't use extents");
275 		return -EFSCORRUPTED;
276 	}
277 
278 	path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
279 	if (IS_ERR(path))
280 		return PTR_ERR(path);
281 
282 	last_extent = path[path->p_depth].p_ext;
283 	if (!last_extent) {
284 		EXT4_ERROR_INODE(inode, "verity file has no extents");
285 		ext4_ext_drop_refs(path);
286 		kfree(path);
287 		return -EFSCORRUPTED;
288 	}
289 
290 	end_lblk = le32_to_cpu(last_extent->ee_block) +
291 		   ext4_ext_get_actual_len(last_extent);
292 	desc_size_pos = (u64)end_lblk << inode->i_blkbits;
293 	ext4_ext_drop_refs(path);
294 	kfree(path);
295 
296 	if (desc_size_pos < sizeof(desc_size_disk))
297 		goto bad;
298 	desc_size_pos -= sizeof(desc_size_disk);
299 
300 	err = pagecache_read(inode, &desc_size_disk, sizeof(desc_size_disk),
301 			     desc_size_pos);
302 	if (err)
303 		return err;
304 	desc_size = le32_to_cpu(desc_size_disk);
305 
306 	/*
307 	 * The descriptor is stored just before the desc_size_disk, but starting
308 	 * on a filesystem block boundary.
309 	 */
310 
311 	if (desc_size > INT_MAX || desc_size > desc_size_pos)
312 		goto bad;
313 
314 	desc_pos = round_down(desc_size_pos - desc_size, i_blocksize(inode));
315 	if (desc_pos < ext4_verity_metadata_pos(inode))
316 		goto bad;
317 
318 	*desc_size_ret = desc_size;
319 	*desc_pos_ret = desc_pos;
320 	return 0;
321 
322 bad:
323 	EXT4_ERROR_INODE(inode, "verity file corrupted; can't find descriptor");
324 	return -EFSCORRUPTED;
325 }
326 
327 static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
328 				      size_t buf_size)
329 {
330 	size_t desc_size = 0;
331 	u64 desc_pos = 0;
332 	int err;
333 
334 	err = ext4_get_verity_descriptor_location(inode, &desc_size, &desc_pos);
335 	if (err)
336 		return err;
337 
338 	if (buf_size) {
339 		if (desc_size > buf_size)
340 			return -ERANGE;
341 		err = pagecache_read(inode, buf, desc_size, desc_pos);
342 		if (err)
343 			return err;
344 	}
345 	return desc_size;
346 }
347 
348 static struct page *ext4_read_merkle_tree_page(struct inode *inode,
349 					       pgoff_t index,
350 					       unsigned long num_ra_pages)
351 {
352 	DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, index);
353 	struct page *page;
354 
355 	index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
356 
357 	page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
358 	if (!page || !PageUptodate(page)) {
359 		if (page)
360 			put_page(page);
361 		else if (num_ra_pages > 1)
362 			page_cache_ra_unbounded(&ractl, num_ra_pages, 0);
363 		page = read_mapping_page(inode->i_mapping, index, NULL);
364 	}
365 	return page;
366 }
367 
368 static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
369 					u64 index, int log_blocksize)
370 {
371 	loff_t pos = ext4_verity_metadata_pos(inode) + (index << log_blocksize);
372 
373 	return pagecache_write(inode, buf, 1 << log_blocksize, pos);
374 }
375 
376 const struct fsverity_operations ext4_verityops = {
377 	.begin_enable_verity	= ext4_begin_enable_verity,
378 	.end_enable_verity	= ext4_end_enable_verity,
379 	.get_verity_descriptor	= ext4_get_verity_descriptor,
380 	.read_merkle_tree_page	= ext4_read_merkle_tree_page,
381 	.write_merkle_tree_block = ext4_write_merkle_tree_block,
382 };
383