xref: /openbmc/linux/fs/crypto/bio.c (revision 160b8e75)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This contains encryption functions for per-file encryption.
4  *
5  * Copyright (C) 2015, Google, Inc.
6  * Copyright (C) 2015, Motorola Mobility
7  *
8  * Written by Michael Halcrow, 2014.
9  *
10  * Filename encryption additions
11  *	Uday Savagaonkar, 2014
12  * Encryption policy handling additions
13  *	Ildar Muslukhov, 2014
14  * Add fscrypt_pullback_bio_page()
15  *	Jaegeuk Kim, 2015.
16  *
17  * This has not yet undergone a rigorous security audit.
18  *
19  * The usage of AES-XTS should conform to recommendations in NIST
20  * Special Publication 800-38E and IEEE P1619/D16.
21  */
22 
23 #include <linux/pagemap.h>
24 #include <linux/module.h>
25 #include <linux/bio.h>
26 #include <linux/namei.h>
27 #include "fscrypt_private.h"
28 
29 /*
30  * Call fscrypt_decrypt_page on every single page, reusing the encryption
31  * context.
32  */
33 static void completion_pages(struct work_struct *work)
34 {
35 	struct fscrypt_ctx *ctx =
36 		container_of(work, struct fscrypt_ctx, r.work);
37 	struct bio *bio = ctx->r.bio;
38 	struct bio_vec *bv;
39 	int i;
40 
41 	bio_for_each_segment_all(bv, bio, i) {
42 		struct page *page = bv->bv_page;
43 		int ret = fscrypt_decrypt_page(page->mapping->host, page,
44 				PAGE_SIZE, 0, page->index);
45 
46 		if (ret) {
47 			WARN_ON_ONCE(1);
48 			SetPageError(page);
49 		} else {
50 			SetPageUptodate(page);
51 		}
52 		unlock_page(page);
53 	}
54 	fscrypt_release_ctx(ctx);
55 	bio_put(bio);
56 }
57 
58 void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx, struct bio *bio)
59 {
60 	INIT_WORK(&ctx->r.work, completion_pages);
61 	ctx->r.bio = bio;
62 	queue_work(fscrypt_read_workqueue, &ctx->r.work);
63 }
64 EXPORT_SYMBOL(fscrypt_decrypt_bio_pages);
65 
66 void fscrypt_pullback_bio_page(struct page **page, bool restore)
67 {
68 	struct fscrypt_ctx *ctx;
69 	struct page *bounce_page;
70 
71 	/* The bounce data pages are unmapped. */
72 	if ((*page)->mapping)
73 		return;
74 
75 	/* The bounce data page is unmapped. */
76 	bounce_page = *page;
77 	ctx = (struct fscrypt_ctx *)page_private(bounce_page);
78 
79 	/* restore control page */
80 	*page = ctx->w.control_page;
81 
82 	if (restore)
83 		fscrypt_restore_control_page(bounce_page);
84 }
85 EXPORT_SYMBOL(fscrypt_pullback_bio_page);
86 
87 int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
88 				sector_t pblk, unsigned int len)
89 {
90 	struct fscrypt_ctx *ctx;
91 	struct page *ciphertext_page = NULL;
92 	struct bio *bio;
93 	int ret, err = 0;
94 
95 	BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
96 
97 	ctx = fscrypt_get_ctx(inode, GFP_NOFS);
98 	if (IS_ERR(ctx))
99 		return PTR_ERR(ctx);
100 
101 	ciphertext_page = fscrypt_alloc_bounce_page(ctx, GFP_NOWAIT);
102 	if (IS_ERR(ciphertext_page)) {
103 		err = PTR_ERR(ciphertext_page);
104 		goto errout;
105 	}
106 
107 	while (len--) {
108 		err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk,
109 					     ZERO_PAGE(0), ciphertext_page,
110 					     PAGE_SIZE, 0, GFP_NOFS);
111 		if (err)
112 			goto errout;
113 
114 		bio = bio_alloc(GFP_NOWAIT, 1);
115 		if (!bio) {
116 			err = -ENOMEM;
117 			goto errout;
118 		}
119 		bio_set_dev(bio, inode->i_sb->s_bdev);
120 		bio->bi_iter.bi_sector =
121 			pblk << (inode->i_sb->s_blocksize_bits - 9);
122 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
123 		ret = bio_add_page(bio, ciphertext_page,
124 					inode->i_sb->s_blocksize, 0);
125 		if (ret != inode->i_sb->s_blocksize) {
126 			/* should never happen! */
127 			WARN_ON(1);
128 			bio_put(bio);
129 			err = -EIO;
130 			goto errout;
131 		}
132 		err = submit_bio_wait(bio);
133 		if (err == 0 && bio->bi_status)
134 			err = -EIO;
135 		bio_put(bio);
136 		if (err)
137 			goto errout;
138 		lblk++;
139 		pblk++;
140 	}
141 	err = 0;
142 errout:
143 	fscrypt_release_ctx(ctx);
144 	return err;
145 }
146 EXPORT_SYMBOL(fscrypt_zeroout_range);
147