xref: /openbmc/linux/block/blk-lib.c (revision 0cabf991)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14 {
15 	struct bio *new = bio_alloc(gfp, nr_pages);
16 
17 	if (bio) {
18 		bio_chain(bio, new);
19 		submit_bio(bio);
20 	}
21 
22 	return new;
23 }
24 
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 		sector_t nr_sects, gfp_t gfp_mask, int flags,
27 		struct bio **biop)
28 {
29 	struct request_queue *q = bdev_get_queue(bdev);
30 	struct bio *bio = *biop;
31 	unsigned int op;
32 	sector_t bs_mask, part_offset = 0;
33 
34 	if (!q)
35 		return -ENXIO;
36 
37 	if (bdev_read_only(bdev))
38 		return -EPERM;
39 
40 	if (flags & BLKDEV_DISCARD_SECURE) {
41 		if (!blk_queue_secure_erase(q))
42 			return -EOPNOTSUPP;
43 		op = REQ_OP_SECURE_ERASE;
44 	} else {
45 		if (!blk_queue_discard(q))
46 			return -EOPNOTSUPP;
47 		op = REQ_OP_DISCARD;
48 	}
49 
50 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 	if ((sector | nr_sects) & bs_mask)
52 		return -EINVAL;
53 
54 	if (!nr_sects)
55 		return -EINVAL;
56 
57 	/* In case the discard request is in a partition */
58 	if (bdev->bd_partno)
59 		part_offset = bdev->bd_part->start_sect;
60 
61 	while (nr_sects) {
62 		sector_t granularity_aligned_lba, req_sects;
63 		sector_t sector_mapped = sector + part_offset;
64 
65 		granularity_aligned_lba = round_up(sector_mapped,
66 				q->limits.discard_granularity >> SECTOR_SHIFT);
67 
68 		/*
69 		 * Check whether the discard bio starts at a discard_granularity
70 		 * aligned LBA,
71 		 * - If no: set (granularity_aligned_lba - sector_mapped) to
72 		 *   bi_size of the first split bio, then the second bio will
73 		 *   start at a discard_granularity aligned LBA on the device.
74 		 * - If yes: use bio_aligned_discard_max_sectors() as the max
75 		 *   possible bi_size of the first split bio. Then when this bio
76 		 *   is split in device drive, the split ones are very probably
77 		 *   to be aligned to discard_granularity of the device's queue.
78 		 */
79 		if (granularity_aligned_lba == sector_mapped)
80 			req_sects = min_t(sector_t, nr_sects,
81 					  bio_aligned_discard_max_sectors(q));
82 		else
83 			req_sects = min_t(sector_t, nr_sects,
84 					  granularity_aligned_lba - sector_mapped);
85 
86 		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
87 
88 		bio = blk_next_bio(bio, 0, gfp_mask);
89 		bio->bi_iter.bi_sector = sector;
90 		bio_set_dev(bio, bdev);
91 		bio_set_op_attrs(bio, op, 0);
92 
93 		bio->bi_iter.bi_size = req_sects << 9;
94 		sector += req_sects;
95 		nr_sects -= req_sects;
96 
97 		/*
98 		 * We can loop for a long time in here, if someone does
99 		 * full device discards (like mkfs). Be nice and allow
100 		 * us to schedule out to avoid softlocking if preempt
101 		 * is disabled.
102 		 */
103 		cond_resched();
104 	}
105 
106 	*biop = bio;
107 	return 0;
108 }
109 EXPORT_SYMBOL(__blkdev_issue_discard);
110 
111 /**
112  * blkdev_issue_discard - queue a discard
113  * @bdev:	blockdev to issue discard for
114  * @sector:	start sector
115  * @nr_sects:	number of sectors to discard
116  * @gfp_mask:	memory allocation flags (for bio_alloc)
117  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
118  *
119  * Description:
120  *    Issue a discard request for the sectors in question.
121  */
122 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
123 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
124 {
125 	struct bio *bio = NULL;
126 	struct blk_plug plug;
127 	int ret;
128 
129 	blk_start_plug(&plug);
130 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
131 			&bio);
132 	if (!ret && bio) {
133 		ret = submit_bio_wait(bio);
134 		if (ret == -EOPNOTSUPP)
135 			ret = 0;
136 		bio_put(bio);
137 	}
138 	blk_finish_plug(&plug);
139 
140 	return ret;
141 }
142 EXPORT_SYMBOL(blkdev_issue_discard);
143 
144 /**
145  * __blkdev_issue_write_same - generate number of bios with same page
146  * @bdev:	target blockdev
147  * @sector:	start sector
148  * @nr_sects:	number of sectors to write
149  * @gfp_mask:	memory allocation flags (for bio_alloc)
150  * @page:	page containing data to write
151  * @biop:	pointer to anchor bio
152  *
153  * Description:
154  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
155  */
156 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
157 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
158 		struct bio **biop)
159 {
160 	struct request_queue *q = bdev_get_queue(bdev);
161 	unsigned int max_write_same_sectors;
162 	struct bio *bio = *biop;
163 	sector_t bs_mask;
164 
165 	if (!q)
166 		return -ENXIO;
167 
168 	if (bdev_read_only(bdev))
169 		return -EPERM;
170 
171 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
172 	if ((sector | nr_sects) & bs_mask)
173 		return -EINVAL;
174 
175 	if (!bdev_write_same(bdev))
176 		return -EOPNOTSUPP;
177 
178 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
179 	max_write_same_sectors = bio_allowed_max_sectors(q);
180 
181 	while (nr_sects) {
182 		bio = blk_next_bio(bio, 1, gfp_mask);
183 		bio->bi_iter.bi_sector = sector;
184 		bio_set_dev(bio, bdev);
185 		bio->bi_vcnt = 1;
186 		bio->bi_io_vec->bv_page = page;
187 		bio->bi_io_vec->bv_offset = 0;
188 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
189 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
190 
191 		if (nr_sects > max_write_same_sectors) {
192 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
193 			nr_sects -= max_write_same_sectors;
194 			sector += max_write_same_sectors;
195 		} else {
196 			bio->bi_iter.bi_size = nr_sects << 9;
197 			nr_sects = 0;
198 		}
199 		cond_resched();
200 	}
201 
202 	*biop = bio;
203 	return 0;
204 }
205 
206 /**
207  * blkdev_issue_write_same - queue a write same operation
208  * @bdev:	target blockdev
209  * @sector:	start sector
210  * @nr_sects:	number of sectors to write
211  * @gfp_mask:	memory allocation flags (for bio_alloc)
212  * @page:	page containing data
213  *
214  * Description:
215  *    Issue a write same request for the sectors in question.
216  */
217 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
218 				sector_t nr_sects, gfp_t gfp_mask,
219 				struct page *page)
220 {
221 	struct bio *bio = NULL;
222 	struct blk_plug plug;
223 	int ret;
224 
225 	blk_start_plug(&plug);
226 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
227 			&bio);
228 	if (ret == 0 && bio) {
229 		ret = submit_bio_wait(bio);
230 		bio_put(bio);
231 	}
232 	blk_finish_plug(&plug);
233 	return ret;
234 }
235 EXPORT_SYMBOL(blkdev_issue_write_same);
236 
237 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
238 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
239 		struct bio **biop, unsigned flags)
240 {
241 	struct bio *bio = *biop;
242 	unsigned int max_write_zeroes_sectors;
243 	struct request_queue *q = bdev_get_queue(bdev);
244 
245 	if (!q)
246 		return -ENXIO;
247 
248 	if (bdev_read_only(bdev))
249 		return -EPERM;
250 
251 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
252 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
253 
254 	if (max_write_zeroes_sectors == 0)
255 		return -EOPNOTSUPP;
256 
257 	while (nr_sects) {
258 		bio = blk_next_bio(bio, 0, gfp_mask);
259 		bio->bi_iter.bi_sector = sector;
260 		bio_set_dev(bio, bdev);
261 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
262 		if (flags & BLKDEV_ZERO_NOUNMAP)
263 			bio->bi_opf |= REQ_NOUNMAP;
264 
265 		if (nr_sects > max_write_zeroes_sectors) {
266 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
267 			nr_sects -= max_write_zeroes_sectors;
268 			sector += max_write_zeroes_sectors;
269 		} else {
270 			bio->bi_iter.bi_size = nr_sects << 9;
271 			nr_sects = 0;
272 		}
273 		cond_resched();
274 	}
275 
276 	*biop = bio;
277 	return 0;
278 }
279 
280 /*
281  * Convert a number of 512B sectors to a number of pages.
282  * The result is limited to a number of pages that can fit into a BIO.
283  * Also make sure that the result is always at least 1 (page) for the cases
284  * where nr_sects is lower than the number of sectors in a page.
285  */
286 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
287 {
288 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
289 
290 	return min(pages, (sector_t)BIO_MAX_PAGES);
291 }
292 
293 static int __blkdev_issue_zero_pages(struct block_device *bdev,
294 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
295 		struct bio **biop)
296 {
297 	struct request_queue *q = bdev_get_queue(bdev);
298 	struct bio *bio = *biop;
299 	int bi_size = 0;
300 	unsigned int sz;
301 
302 	if (!q)
303 		return -ENXIO;
304 
305 	if (bdev_read_only(bdev))
306 		return -EPERM;
307 
308 	while (nr_sects != 0) {
309 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
310 				   gfp_mask);
311 		bio->bi_iter.bi_sector = sector;
312 		bio_set_dev(bio, bdev);
313 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
314 
315 		while (nr_sects != 0) {
316 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
317 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
318 			nr_sects -= bi_size >> 9;
319 			sector += bi_size >> 9;
320 			if (bi_size < sz)
321 				break;
322 		}
323 		cond_resched();
324 	}
325 
326 	*biop = bio;
327 	return 0;
328 }
329 
330 /**
331  * __blkdev_issue_zeroout - generate number of zero filed write bios
332  * @bdev:	blockdev to issue
333  * @sector:	start sector
334  * @nr_sects:	number of sectors to write
335  * @gfp_mask:	memory allocation flags (for bio_alloc)
336  * @biop:	pointer to anchor bio
337  * @flags:	controls detailed behavior
338  *
339  * Description:
340  *  Zero-fill a block range, either using hardware offload or by explicitly
341  *  writing zeroes to the device.
342  *
343  *  If a device is using logical block provisioning, the underlying space will
344  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
345  *
346  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
347  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
348  */
349 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
350 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
351 		unsigned flags)
352 {
353 	int ret;
354 	sector_t bs_mask;
355 
356 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
357 	if ((sector | nr_sects) & bs_mask)
358 		return -EINVAL;
359 
360 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
361 			biop, flags);
362 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
363 		return ret;
364 
365 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
366 					 biop);
367 }
368 EXPORT_SYMBOL(__blkdev_issue_zeroout);
369 
370 /**
371  * blkdev_issue_zeroout - zero-fill a block range
372  * @bdev:	blockdev to write
373  * @sector:	start sector
374  * @nr_sects:	number of sectors to write
375  * @gfp_mask:	memory allocation flags (for bio_alloc)
376  * @flags:	controls detailed behavior
377  *
378  * Description:
379  *  Zero-fill a block range, either using hardware offload or by explicitly
380  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
381  *  valid values for %flags.
382  */
383 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
384 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
385 {
386 	int ret = 0;
387 	sector_t bs_mask;
388 	struct bio *bio;
389 	struct blk_plug plug;
390 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
391 
392 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
393 	if ((sector | nr_sects) & bs_mask)
394 		return -EINVAL;
395 
396 retry:
397 	bio = NULL;
398 	blk_start_plug(&plug);
399 	if (try_write_zeroes) {
400 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
401 						  gfp_mask, &bio, flags);
402 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
403 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
404 						gfp_mask, &bio);
405 	} else {
406 		/* No zeroing offload support */
407 		ret = -EOPNOTSUPP;
408 	}
409 	if (ret == 0 && bio) {
410 		ret = submit_bio_wait(bio);
411 		bio_put(bio);
412 	}
413 	blk_finish_plug(&plug);
414 	if (ret && try_write_zeroes) {
415 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
416 			try_write_zeroes = false;
417 			goto retry;
418 		}
419 		if (!bdev_write_zeroes_sectors(bdev)) {
420 			/*
421 			 * Zeroing offload support was indicated, but the
422 			 * device reported ILLEGAL REQUEST (for some devices
423 			 * there is no non-destructive way to verify whether
424 			 * WRITE ZEROES is actually supported).
425 			 */
426 			ret = -EOPNOTSUPP;
427 		}
428 	}
429 
430 	return ret;
431 }
432 EXPORT_SYMBOL(blkdev_issue_zeroout);
433