xref: /openbmc/linux/block/blk-lib.c (revision e3d786a3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14 {
15 	struct bio *new = bio_alloc(gfp, nr_pages);
16 
17 	if (bio) {
18 		bio_chain(bio, new);
19 		submit_bio(bio);
20 	}
21 
22 	return new;
23 }
24 
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 		sector_t nr_sects, gfp_t gfp_mask, int flags,
27 		struct bio **biop)
28 {
29 	struct request_queue *q = bdev_get_queue(bdev);
30 	struct bio *bio = *biop;
31 	unsigned int op;
32 	sector_t bs_mask;
33 
34 	if (!q)
35 		return -ENXIO;
36 
37 	if (bdev_read_only(bdev))
38 		return -EPERM;
39 
40 	if (flags & BLKDEV_DISCARD_SECURE) {
41 		if (!blk_queue_secure_erase(q))
42 			return -EOPNOTSUPP;
43 		op = REQ_OP_SECURE_ERASE;
44 	} else {
45 		if (!blk_queue_discard(q))
46 			return -EOPNOTSUPP;
47 		op = REQ_OP_DISCARD;
48 	}
49 
50 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 	if ((sector | nr_sects) & bs_mask)
52 		return -EINVAL;
53 
54 	while (nr_sects) {
55 		unsigned int req_sects = nr_sects;
56 		sector_t end_sect;
57 
58 		if (!req_sects)
59 			goto fail;
60 		if (req_sects > UINT_MAX >> 9)
61 			req_sects = UINT_MAX >> 9;
62 
63 		end_sect = sector + req_sects;
64 
65 		bio = blk_next_bio(bio, 0, gfp_mask);
66 		bio->bi_iter.bi_sector = sector;
67 		bio_set_dev(bio, bdev);
68 		bio_set_op_attrs(bio, op, 0);
69 
70 		bio->bi_iter.bi_size = req_sects << 9;
71 		nr_sects -= req_sects;
72 		sector = end_sect;
73 
74 		/*
75 		 * We can loop for a long time in here, if someone does
76 		 * full device discards (like mkfs). Be nice and allow
77 		 * us to schedule out to avoid softlocking if preempt
78 		 * is disabled.
79 		 */
80 		cond_resched();
81 	}
82 
83 	*biop = bio;
84 	return 0;
85 
86 fail:
87 	if (bio) {
88 		submit_bio_wait(bio);
89 		bio_put(bio);
90 	}
91 	*biop = NULL;
92 	return -EOPNOTSUPP;
93 }
94 EXPORT_SYMBOL(__blkdev_issue_discard);
95 
96 /**
97  * blkdev_issue_discard - queue a discard
98  * @bdev:	blockdev to issue discard for
99  * @sector:	start sector
100  * @nr_sects:	number of sectors to discard
101  * @gfp_mask:	memory allocation flags (for bio_alloc)
102  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
103  *
104  * Description:
105  *    Issue a discard request for the sectors in question.
106  */
107 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
108 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
109 {
110 	struct bio *bio = NULL;
111 	struct blk_plug plug;
112 	int ret;
113 
114 	blk_start_plug(&plug);
115 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
116 			&bio);
117 	if (!ret && bio) {
118 		ret = submit_bio_wait(bio);
119 		if (ret == -EOPNOTSUPP)
120 			ret = 0;
121 		bio_put(bio);
122 	}
123 	blk_finish_plug(&plug);
124 
125 	return ret;
126 }
127 EXPORT_SYMBOL(blkdev_issue_discard);
128 
129 /**
130  * __blkdev_issue_write_same - generate number of bios with same page
131  * @bdev:	target blockdev
132  * @sector:	start sector
133  * @nr_sects:	number of sectors to write
134  * @gfp_mask:	memory allocation flags (for bio_alloc)
135  * @page:	page containing data to write
136  * @biop:	pointer to anchor bio
137  *
138  * Description:
139  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
140  */
141 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
142 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
143 		struct bio **biop)
144 {
145 	struct request_queue *q = bdev_get_queue(bdev);
146 	unsigned int max_write_same_sectors;
147 	struct bio *bio = *biop;
148 	sector_t bs_mask;
149 
150 	if (!q)
151 		return -ENXIO;
152 
153 	if (bdev_read_only(bdev))
154 		return -EPERM;
155 
156 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
157 	if ((sector | nr_sects) & bs_mask)
158 		return -EINVAL;
159 
160 	if (!bdev_write_same(bdev))
161 		return -EOPNOTSUPP;
162 
163 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
164 	max_write_same_sectors = UINT_MAX >> 9;
165 
166 	while (nr_sects) {
167 		bio = blk_next_bio(bio, 1, gfp_mask);
168 		bio->bi_iter.bi_sector = sector;
169 		bio_set_dev(bio, bdev);
170 		bio->bi_vcnt = 1;
171 		bio->bi_io_vec->bv_page = page;
172 		bio->bi_io_vec->bv_offset = 0;
173 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
174 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
175 
176 		if (nr_sects > max_write_same_sectors) {
177 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
178 			nr_sects -= max_write_same_sectors;
179 			sector += max_write_same_sectors;
180 		} else {
181 			bio->bi_iter.bi_size = nr_sects << 9;
182 			nr_sects = 0;
183 		}
184 		cond_resched();
185 	}
186 
187 	*biop = bio;
188 	return 0;
189 }
190 
191 /**
192  * blkdev_issue_write_same - queue a write same operation
193  * @bdev:	target blockdev
194  * @sector:	start sector
195  * @nr_sects:	number of sectors to write
196  * @gfp_mask:	memory allocation flags (for bio_alloc)
197  * @page:	page containing data
198  *
199  * Description:
200  *    Issue a write same request for the sectors in question.
201  */
202 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
203 				sector_t nr_sects, gfp_t gfp_mask,
204 				struct page *page)
205 {
206 	struct bio *bio = NULL;
207 	struct blk_plug plug;
208 	int ret;
209 
210 	blk_start_plug(&plug);
211 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
212 			&bio);
213 	if (ret == 0 && bio) {
214 		ret = submit_bio_wait(bio);
215 		bio_put(bio);
216 	}
217 	blk_finish_plug(&plug);
218 	return ret;
219 }
220 EXPORT_SYMBOL(blkdev_issue_write_same);
221 
222 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
223 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
224 		struct bio **biop, unsigned flags)
225 {
226 	struct bio *bio = *biop;
227 	unsigned int max_write_zeroes_sectors;
228 	struct request_queue *q = bdev_get_queue(bdev);
229 
230 	if (!q)
231 		return -ENXIO;
232 
233 	if (bdev_read_only(bdev))
234 		return -EPERM;
235 
236 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
237 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
238 
239 	if (max_write_zeroes_sectors == 0)
240 		return -EOPNOTSUPP;
241 
242 	while (nr_sects) {
243 		bio = blk_next_bio(bio, 0, gfp_mask);
244 		bio->bi_iter.bi_sector = sector;
245 		bio_set_dev(bio, bdev);
246 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
247 		if (flags & BLKDEV_ZERO_NOUNMAP)
248 			bio->bi_opf |= REQ_NOUNMAP;
249 
250 		if (nr_sects > max_write_zeroes_sectors) {
251 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
252 			nr_sects -= max_write_zeroes_sectors;
253 			sector += max_write_zeroes_sectors;
254 		} else {
255 			bio->bi_iter.bi_size = nr_sects << 9;
256 			nr_sects = 0;
257 		}
258 		cond_resched();
259 	}
260 
261 	*biop = bio;
262 	return 0;
263 }
264 
265 /*
266  * Convert a number of 512B sectors to a number of pages.
267  * The result is limited to a number of pages that can fit into a BIO.
268  * Also make sure that the result is always at least 1 (page) for the cases
269  * where nr_sects is lower than the number of sectors in a page.
270  */
271 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
272 {
273 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
274 
275 	return min(pages, (sector_t)BIO_MAX_PAGES);
276 }
277 
278 static int __blkdev_issue_zero_pages(struct block_device *bdev,
279 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
280 		struct bio **biop)
281 {
282 	struct request_queue *q = bdev_get_queue(bdev);
283 	struct bio *bio = *biop;
284 	int bi_size = 0;
285 	unsigned int sz;
286 
287 	if (!q)
288 		return -ENXIO;
289 
290 	if (bdev_read_only(bdev))
291 		return -EPERM;
292 
293 	while (nr_sects != 0) {
294 		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
295 				   gfp_mask);
296 		bio->bi_iter.bi_sector = sector;
297 		bio_set_dev(bio, bdev);
298 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
299 
300 		while (nr_sects != 0) {
301 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
302 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
303 			nr_sects -= bi_size >> 9;
304 			sector += bi_size >> 9;
305 			if (bi_size < sz)
306 				break;
307 		}
308 		cond_resched();
309 	}
310 
311 	*biop = bio;
312 	return 0;
313 }
314 
315 /**
316  * __blkdev_issue_zeroout - generate number of zero filed write bios
317  * @bdev:	blockdev to issue
318  * @sector:	start sector
319  * @nr_sects:	number of sectors to write
320  * @gfp_mask:	memory allocation flags (for bio_alloc)
321  * @biop:	pointer to anchor bio
322  * @flags:	controls detailed behavior
323  *
324  * Description:
325  *  Zero-fill a block range, either using hardware offload or by explicitly
326  *  writing zeroes to the device.
327  *
328  *  If a device is using logical block provisioning, the underlying space will
329  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
330  *
331  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
332  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
333  */
334 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
335 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
336 		unsigned flags)
337 {
338 	int ret;
339 	sector_t bs_mask;
340 
341 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
342 	if ((sector | nr_sects) & bs_mask)
343 		return -EINVAL;
344 
345 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
346 			biop, flags);
347 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
348 		return ret;
349 
350 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
351 					 biop);
352 }
353 EXPORT_SYMBOL(__blkdev_issue_zeroout);
354 
355 /**
356  * blkdev_issue_zeroout - zero-fill a block range
357  * @bdev:	blockdev to write
358  * @sector:	start sector
359  * @nr_sects:	number of sectors to write
360  * @gfp_mask:	memory allocation flags (for bio_alloc)
361  * @flags:	controls detailed behavior
362  *
363  * Description:
364  *  Zero-fill a block range, either using hardware offload or by explicitly
365  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
366  *  valid values for %flags.
367  */
368 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
369 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
370 {
371 	int ret = 0;
372 	sector_t bs_mask;
373 	struct bio *bio;
374 	struct blk_plug plug;
375 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
376 
377 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
378 	if ((sector | nr_sects) & bs_mask)
379 		return -EINVAL;
380 
381 retry:
382 	bio = NULL;
383 	blk_start_plug(&plug);
384 	if (try_write_zeroes) {
385 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
386 						  gfp_mask, &bio, flags);
387 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
388 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
389 						gfp_mask, &bio);
390 	} else {
391 		/* No zeroing offload support */
392 		ret = -EOPNOTSUPP;
393 	}
394 	if (ret == 0 && bio) {
395 		ret = submit_bio_wait(bio);
396 		bio_put(bio);
397 	}
398 	blk_finish_plug(&plug);
399 	if (ret && try_write_zeroes) {
400 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
401 			try_write_zeroes = false;
402 			goto retry;
403 		}
404 		if (!bdev_write_zeroes_sectors(bdev)) {
405 			/*
406 			 * Zeroing offload support was indicated, but the
407 			 * device reported ILLEGAL REQUEST (for some devices
408 			 * there is no non-destructive way to verify whether
409 			 * WRITE ZEROES is actually supported).
410 			 */
411 			ret = -EOPNOTSUPP;
412 		}
413 	}
414 
415 	return ret;
416 }
417 EXPORT_SYMBOL(blkdev_issue_zeroout);
418