xref: /openbmc/linux/block/blk-lib.c (revision 4e5e4705)
1 /*
2  * Functions related to generic helpers functions
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9 
10 #include "blk.h"
11 
12 struct bio_batch {
13 	atomic_t		done;
14 	unsigned long		flags;
15 	struct completion	*wait;
16 };
17 
18 static void bio_batch_end_io(struct bio *bio, int err)
19 {
20 	struct bio_batch *bb = bio->bi_private;
21 
22 	if (err && (err != -EOPNOTSUPP))
23 		clear_bit(BIO_UPTODATE, &bb->flags);
24 	if (atomic_dec_and_test(&bb->done))
25 		complete(bb->wait);
26 	bio_put(bio);
27 }
28 
29 /**
30  * blkdev_issue_discard - queue a discard
31  * @bdev:	blockdev to issue discard for
32  * @sector:	start sector
33  * @nr_sects:	number of sectors to discard
34  * @gfp_mask:	memory allocation flags (for bio_alloc)
35  * @flags:	BLKDEV_IFL_* flags to control behaviour
36  *
37  * Description:
38  *    Issue a discard request for the sectors in question.
39  */
40 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42 {
43 	DECLARE_COMPLETION_ONSTACK(wait);
44 	struct request_queue *q = bdev_get_queue(bdev);
45 	int type = REQ_WRITE | REQ_DISCARD;
46 	unsigned int max_discard_sectors, granularity;
47 	int alignment;
48 	struct bio_batch bb;
49 	struct bio *bio;
50 	int ret = 0;
51 	struct blk_plug plug;
52 
53 	if (!q)
54 		return -ENXIO;
55 
56 	if (!blk_queue_discard(q))
57 		return -EOPNOTSUPP;
58 
59 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
60 	granularity = max(q->limits.discard_granularity >> 9, 1U);
61 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
62 
63 	/*
64 	 * Ensure that max_discard_sectors is of the proper
65 	 * granularity, so that requests stay aligned after a split.
66 	 */
67 	max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
68 	max_discard_sectors -= max_discard_sectors % granularity;
69 	if (unlikely(!max_discard_sectors)) {
70 		/* Avoid infinite loop below. Being cautious never hurts. */
71 		return -EOPNOTSUPP;
72 	}
73 
74 	if (flags & BLKDEV_DISCARD_SECURE) {
75 		if (!blk_queue_secdiscard(q))
76 			return -EOPNOTSUPP;
77 		type |= REQ_SECURE;
78 	}
79 
80 	atomic_set(&bb.done, 1);
81 	bb.flags = 1 << BIO_UPTODATE;
82 	bb.wait = &wait;
83 
84 	blk_start_plug(&plug);
85 	while (nr_sects) {
86 		unsigned int req_sects;
87 		sector_t end_sect, tmp;
88 
89 		bio = bio_alloc(gfp_mask, 1);
90 		if (!bio) {
91 			ret = -ENOMEM;
92 			break;
93 		}
94 
95 		req_sects = min_t(sector_t, nr_sects, max_discard_sectors);
96 
97 		/*
98 		 * If splitting a request, and the next starting sector would be
99 		 * misaligned, stop the discard at the previous aligned sector.
100 		 */
101 		end_sect = sector + req_sects;
102 		tmp = end_sect;
103 		if (req_sects < nr_sects &&
104 		    sector_div(tmp, granularity) != alignment) {
105 			end_sect = end_sect - alignment;
106 			sector_div(end_sect, granularity);
107 			end_sect = end_sect * granularity + alignment;
108 			req_sects = end_sect - sector;
109 		}
110 
111 		bio->bi_sector = sector;
112 		bio->bi_end_io = bio_batch_end_io;
113 		bio->bi_bdev = bdev;
114 		bio->bi_private = &bb;
115 
116 		bio->bi_size = req_sects << 9;
117 		nr_sects -= req_sects;
118 		sector = end_sect;
119 
120 		atomic_inc(&bb.done);
121 		submit_bio(type, bio);
122 	}
123 	blk_finish_plug(&plug);
124 
125 	/* Wait for bios in-flight */
126 	if (!atomic_dec_and_test(&bb.done))
127 		wait_for_completion_io(&wait);
128 
129 	if (!test_bit(BIO_UPTODATE, &bb.flags))
130 		ret = -EIO;
131 
132 	return ret;
133 }
134 EXPORT_SYMBOL(blkdev_issue_discard);
135 
136 /**
137  * blkdev_issue_write_same - queue a write same operation
138  * @bdev:	target blockdev
139  * @sector:	start sector
140  * @nr_sects:	number of sectors to write
141  * @gfp_mask:	memory allocation flags (for bio_alloc)
142  * @page:	page containing data to write
143  *
144  * Description:
145  *    Issue a write same request for the sectors in question.
146  */
147 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
148 			    sector_t nr_sects, gfp_t gfp_mask,
149 			    struct page *page)
150 {
151 	DECLARE_COMPLETION_ONSTACK(wait);
152 	struct request_queue *q = bdev_get_queue(bdev);
153 	unsigned int max_write_same_sectors;
154 	struct bio_batch bb;
155 	struct bio *bio;
156 	int ret = 0;
157 
158 	if (!q)
159 		return -ENXIO;
160 
161 	max_write_same_sectors = q->limits.max_write_same_sectors;
162 
163 	if (max_write_same_sectors == 0)
164 		return -EOPNOTSUPP;
165 
166 	atomic_set(&bb.done, 1);
167 	bb.flags = 1 << BIO_UPTODATE;
168 	bb.wait = &wait;
169 
170 	while (nr_sects) {
171 		bio = bio_alloc(gfp_mask, 1);
172 		if (!bio) {
173 			ret = -ENOMEM;
174 			break;
175 		}
176 
177 		bio->bi_sector = sector;
178 		bio->bi_end_io = bio_batch_end_io;
179 		bio->bi_bdev = bdev;
180 		bio->bi_private = &bb;
181 		bio->bi_vcnt = 1;
182 		bio->bi_io_vec->bv_page = page;
183 		bio->bi_io_vec->bv_offset = 0;
184 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
185 
186 		if (nr_sects > max_write_same_sectors) {
187 			bio->bi_size = max_write_same_sectors << 9;
188 			nr_sects -= max_write_same_sectors;
189 			sector += max_write_same_sectors;
190 		} else {
191 			bio->bi_size = nr_sects << 9;
192 			nr_sects = 0;
193 		}
194 
195 		atomic_inc(&bb.done);
196 		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
197 	}
198 
199 	/* Wait for bios in-flight */
200 	if (!atomic_dec_and_test(&bb.done))
201 		wait_for_completion_io(&wait);
202 
203 	if (!test_bit(BIO_UPTODATE, &bb.flags))
204 		ret = -ENOTSUPP;
205 
206 	return ret;
207 }
208 EXPORT_SYMBOL(blkdev_issue_write_same);
209 
210 /**
211  * blkdev_issue_zeroout - generate number of zero filed write bios
212  * @bdev:	blockdev to issue
213  * @sector:	start sector
214  * @nr_sects:	number of sectors to write
215  * @gfp_mask:	memory allocation flags (for bio_alloc)
216  *
217  * Description:
218  *  Generate and issue number of bios with zerofiled pages.
219  */
220 
221 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
222 			sector_t nr_sects, gfp_t gfp_mask)
223 {
224 	int ret;
225 	struct bio *bio;
226 	struct bio_batch bb;
227 	unsigned int sz;
228 	DECLARE_COMPLETION_ONSTACK(wait);
229 
230 	atomic_set(&bb.done, 1);
231 	bb.flags = 1 << BIO_UPTODATE;
232 	bb.wait = &wait;
233 
234 	ret = 0;
235 	while (nr_sects != 0) {
236 		bio = bio_alloc(gfp_mask,
237 				min(nr_sects, (sector_t)BIO_MAX_PAGES));
238 		if (!bio) {
239 			ret = -ENOMEM;
240 			break;
241 		}
242 
243 		bio->bi_sector = sector;
244 		bio->bi_bdev   = bdev;
245 		bio->bi_end_io = bio_batch_end_io;
246 		bio->bi_private = &bb;
247 
248 		while (nr_sects != 0) {
249 			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
250 			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
251 			nr_sects -= ret >> 9;
252 			sector += ret >> 9;
253 			if (ret < (sz << 9))
254 				break;
255 		}
256 		ret = 0;
257 		atomic_inc(&bb.done);
258 		submit_bio(WRITE, bio);
259 	}
260 
261 	/* Wait for bios in-flight */
262 	if (!atomic_dec_and_test(&bb.done))
263 		wait_for_completion_io(&wait);
264 
265 	if (!test_bit(BIO_UPTODATE, &bb.flags))
266 		/* One of bios in the batch was completed with error.*/
267 		ret = -EIO;
268 
269 	return ret;
270 }
271 
272 /**
273  * blkdev_issue_zeroout - zero-fill a block range
274  * @bdev:	blockdev to write
275  * @sector:	start sector
276  * @nr_sects:	number of sectors to write
277  * @gfp_mask:	memory allocation flags (for bio_alloc)
278  *
279  * Description:
280  *  Generate and issue number of bios with zerofiled pages.
281  */
282 
283 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
284 			 sector_t nr_sects, gfp_t gfp_mask)
285 {
286 	if (bdev_write_same(bdev)) {
287 		unsigned char bdn[BDEVNAME_SIZE];
288 
289 		if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
290 					     ZERO_PAGE(0)))
291 			return 0;
292 
293 		bdevname(bdev, bdn);
294 		pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
295 	}
296 
297 	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
298 }
299 EXPORT_SYMBOL(blkdev_issue_zeroout);
300