1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Data verification functions, i.e. hooks for ->readahead() 4 * 5 * Copyright 2019 Google LLC 6 */ 7 8 #include "fsverity_private.h" 9 10 #include <crypto/hash.h> 11 #include <linux/bio.h> 12 13 static struct workqueue_struct *fsverity_read_workqueue; 14 15 /** 16 * hash_at_level() - compute the location of the block's hash at the given level 17 * 18 * @params: (in) the Merkle tree parameters 19 * @dindex: (in) the index of the data block being verified 20 * @level: (in) the level of hash we want (0 is leaf level) 21 * @hindex: (out) the index of the hash block containing the wanted hash 22 * @hoffset: (out) the byte offset to the wanted hash within the hash block 23 */ 24 static void hash_at_level(const struct merkle_tree_params *params, 25 pgoff_t dindex, unsigned int level, pgoff_t *hindex, 26 unsigned int *hoffset) 27 { 28 pgoff_t position; 29 30 /* Offset of the hash within the level's region, in hashes */ 31 position = dindex >> (level * params->log_arity); 32 33 /* Index of the hash block in the tree overall */ 34 *hindex = params->level_start[level] + (position >> params->log_arity); 35 36 /* Offset of the wanted hash (in bytes) within the hash block */ 37 *hoffset = (position & ((1 << params->log_arity) - 1)) << 38 params->log_digestsize; 39 } 40 41 static inline int cmp_hashes(const struct fsverity_info *vi, 42 const u8 *want_hash, const u8 *real_hash, 43 pgoff_t index, int level) 44 { 45 const unsigned int hsize = vi->tree_params.digest_size; 46 47 if (memcmp(want_hash, real_hash, hsize) == 0) 48 return 0; 49 50 fsverity_err(vi->inode, 51 "FILE CORRUPTED! index=%lu, level=%d, want_hash=%s:%*phN, real_hash=%s:%*phN", 52 index, level, 53 vi->tree_params.hash_alg->name, hsize, want_hash, 54 vi->tree_params.hash_alg->name, hsize, real_hash); 55 return -EBADMSG; 56 } 57 58 /* 59 * Verify a single data page against the file's Merkle tree. 60 * 61 * In principle, we need to verify the entire path to the root node. However, 62 * for efficiency the filesystem may cache the hash pages. Therefore we need 63 * only ascend the tree until an already-verified page is seen, as indicated by 64 * the PageChecked bit being set; then verify the path to that page. 65 * 66 * This code currently only supports the case where the verity block size is 67 * equal to PAGE_SIZE. Doing otherwise would be possible but tricky, since we 68 * wouldn't be able to use the PageChecked bit. 69 * 70 * Note that multiple processes may race to verify a hash page and mark it 71 * Checked, but it doesn't matter; the result will be the same either way. 72 * 73 * Return: true if the page is valid, else false. 74 */ 75 static bool verify_page(struct inode *inode, const struct fsverity_info *vi, 76 struct ahash_request *req, struct page *data_page, 77 unsigned long max_ra_pages) 78 { 79 const struct merkle_tree_params *params = &vi->tree_params; 80 const unsigned int hsize = params->digest_size; 81 const pgoff_t index = data_page->index; 82 int level; 83 u8 _want_hash[FS_VERITY_MAX_DIGEST_SIZE]; 84 const u8 *want_hash; 85 u8 real_hash[FS_VERITY_MAX_DIGEST_SIZE]; 86 struct page *hpages[FS_VERITY_MAX_LEVELS]; 87 unsigned int hoffsets[FS_VERITY_MAX_LEVELS]; 88 int err; 89 90 if (WARN_ON_ONCE(!PageLocked(data_page) || PageUptodate(data_page))) 91 return false; 92 93 /* 94 * Starting at the leaf level, ascend the tree saving hash pages along 95 * the way until we find a verified hash page, indicated by PageChecked; 96 * or until we reach the root. 97 */ 98 for (level = 0; level < params->num_levels; level++) { 99 pgoff_t hindex; 100 unsigned int hoffset; 101 struct page *hpage; 102 103 hash_at_level(params, index, level, &hindex, &hoffset); 104 105 hpage = inode->i_sb->s_vop->read_merkle_tree_page(inode, hindex, 106 level == 0 ? min(max_ra_pages, 107 params->tree_pages - hindex) : 0); 108 if (IS_ERR(hpage)) { 109 err = PTR_ERR(hpage); 110 fsverity_err(inode, 111 "Error %d reading Merkle tree page %lu", 112 err, hindex); 113 goto out; 114 } 115 116 if (PageChecked(hpage)) { 117 memcpy_from_page(_want_hash, hpage, hoffset, hsize); 118 want_hash = _want_hash; 119 put_page(hpage); 120 goto descend; 121 } 122 hpages[level] = hpage; 123 hoffsets[level] = hoffset; 124 } 125 126 want_hash = vi->root_hash; 127 descend: 128 /* Descend the tree verifying hash pages */ 129 for (; level > 0; level--) { 130 struct page *hpage = hpages[level - 1]; 131 unsigned int hoffset = hoffsets[level - 1]; 132 133 err = fsverity_hash_page(params, inode, req, hpage, real_hash); 134 if (err) 135 goto out; 136 err = cmp_hashes(vi, want_hash, real_hash, index, level - 1); 137 if (err) 138 goto out; 139 SetPageChecked(hpage); 140 memcpy_from_page(_want_hash, hpage, hoffset, hsize); 141 want_hash = _want_hash; 142 put_page(hpage); 143 } 144 145 /* Finally, verify the data page */ 146 err = fsverity_hash_page(params, inode, req, data_page, real_hash); 147 if (err) 148 goto out; 149 err = cmp_hashes(vi, want_hash, real_hash, index, -1); 150 out: 151 for (; level > 0; level--) 152 put_page(hpages[level - 1]); 153 154 return err == 0; 155 } 156 157 /** 158 * fsverity_verify_page() - verify a data page 159 * @page: the page to verity 160 * 161 * Verify a page that has just been read from a verity file. The page must be a 162 * pagecache page that is still locked and not yet uptodate. 163 * 164 * Return: true if the page is valid, else false. 165 */ 166 bool fsverity_verify_page(struct page *page) 167 { 168 struct inode *inode = page->mapping->host; 169 const struct fsverity_info *vi = inode->i_verity_info; 170 struct ahash_request *req; 171 bool valid; 172 173 /* This allocation never fails, since it's mempool-backed. */ 174 req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS); 175 176 valid = verify_page(inode, vi, req, page, 0); 177 178 fsverity_free_hash_request(vi->tree_params.hash_alg, req); 179 180 return valid; 181 } 182 EXPORT_SYMBOL_GPL(fsverity_verify_page); 183 184 #ifdef CONFIG_BLOCK 185 /** 186 * fsverity_verify_bio() - verify a 'read' bio that has just completed 187 * @bio: the bio to verify 188 * 189 * Verify a set of pages that have just been read from a verity file. The pages 190 * must be pagecache pages that are still locked and not yet uptodate. If a 191 * page fails verification, then bio->bi_status is set to an error status. 192 * 193 * This is a helper function for use by the ->readahead() method of filesystems 194 * that issue bios to read data directly into the page cache. Filesystems that 195 * populate the page cache without issuing bios (e.g. non block-based 196 * filesystems) must instead call fsverity_verify_page() directly on each page. 197 * All filesystems must also call fsverity_verify_page() on holes. 198 */ 199 void fsverity_verify_bio(struct bio *bio) 200 { 201 struct inode *inode = bio_first_page_all(bio)->mapping->host; 202 const struct fsverity_info *vi = inode->i_verity_info; 203 struct ahash_request *req; 204 struct bio_vec *bv; 205 struct bvec_iter_all iter_all; 206 unsigned long max_ra_pages = 0; 207 208 /* This allocation never fails, since it's mempool-backed. */ 209 req = fsverity_alloc_hash_request(vi->tree_params.hash_alg, GFP_NOFS); 210 211 if (bio->bi_opf & REQ_RAHEAD) { 212 /* 213 * If this bio is for data readahead, then we also do readahead 214 * of the first (largest) level of the Merkle tree. Namely, 215 * when a Merkle tree page is read, we also try to piggy-back on 216 * some additional pages -- up to 1/4 the number of data pages. 217 * 218 * This improves sequential read performance, as it greatly 219 * reduces the number of I/O requests made to the Merkle tree. 220 */ 221 max_ra_pages = bio->bi_iter.bi_size >> (PAGE_SHIFT + 2); 222 } 223 224 bio_for_each_segment_all(bv, bio, iter_all) { 225 if (!verify_page(inode, vi, req, bv->bv_page, max_ra_pages)) { 226 bio->bi_status = BLK_STS_IOERR; 227 break; 228 } 229 } 230 231 fsverity_free_hash_request(vi->tree_params.hash_alg, req); 232 } 233 EXPORT_SYMBOL_GPL(fsverity_verify_bio); 234 #endif /* CONFIG_BLOCK */ 235 236 /** 237 * fsverity_enqueue_verify_work() - enqueue work on the fs-verity workqueue 238 * @work: the work to enqueue 239 * 240 * Enqueue verification work for asynchronous processing. 241 */ 242 void fsverity_enqueue_verify_work(struct work_struct *work) 243 { 244 queue_work(fsverity_read_workqueue, work); 245 } 246 EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work); 247 248 int __init fsverity_init_workqueue(void) 249 { 250 /* 251 * Use an unbound workqueue to allow bios to be verified in parallel 252 * even when they happen to complete on the same CPU. This sacrifices 253 * locality, but it's worthwhile since hashing is CPU-intensive. 254 * 255 * Also use a high-priority workqueue to prioritize verification work, 256 * which blocks reads from completing, over regular application tasks. 257 */ 258 fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue", 259 WQ_UNBOUND | WQ_HIGHPRI, 260 num_online_cpus()); 261 if (!fsverity_read_workqueue) 262 return -ENOMEM; 263 return 0; 264 } 265 266 void __init fsverity_exit_workqueue(void) 267 { 268 destroy_workqueue(fsverity_read_workqueue); 269 fsverity_read_workqueue = NULL; 270 } 271