xref: /openbmc/linux/block/blk-lib.c (revision 4f2c0a4acffbec01079c28f839422e64ddeff004)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2f31e7e40SDmitry Monakhov /*
3f31e7e40SDmitry Monakhov  * Functions related to generic helpers functions
4f31e7e40SDmitry Monakhov  */
5f31e7e40SDmitry Monakhov #include <linux/kernel.h>
6f31e7e40SDmitry Monakhov #include <linux/module.h>
7f31e7e40SDmitry Monakhov #include <linux/bio.h>
8f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
9f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
10f31e7e40SDmitry Monakhov 
11f31e7e40SDmitry Monakhov #include "blk.h"
12f31e7e40SDmitry Monakhov 
bio_discard_limit(struct block_device * bdev,sector_t sector)13e3cc28eaSChristoph Hellwig static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
14e3cc28eaSChristoph Hellwig {
157b47ef52SChristoph Hellwig 	unsigned int discard_granularity = bdev_discard_granularity(bdev);
16e3cc28eaSChristoph Hellwig 	sector_t granularity_aligned_sector;
17e3cc28eaSChristoph Hellwig 
18e3cc28eaSChristoph Hellwig 	if (bdev_is_partition(bdev))
19e3cc28eaSChristoph Hellwig 		sector += bdev->bd_start_sect;
20e3cc28eaSChristoph Hellwig 
21e3cc28eaSChristoph Hellwig 	granularity_aligned_sector =
22e3cc28eaSChristoph Hellwig 		round_up(sector, discard_granularity >> SECTOR_SHIFT);
23e3cc28eaSChristoph Hellwig 
24e3cc28eaSChristoph Hellwig 	/*
25e3cc28eaSChristoph Hellwig 	 * Make sure subsequent bios start aligned to the discard granularity if
26e3cc28eaSChristoph Hellwig 	 * it needs to be split.
27e3cc28eaSChristoph Hellwig 	 */
28e3cc28eaSChristoph Hellwig 	if (granularity_aligned_sector != sector)
29e3cc28eaSChristoph Hellwig 		return granularity_aligned_sector - sector;
30e3cc28eaSChristoph Hellwig 
31e3cc28eaSChristoph Hellwig 	/*
32e3cc28eaSChristoph Hellwig 	 * Align the bio size to the discard granularity to make splitting the bio
33e3cc28eaSChristoph Hellwig 	 * at discard granularity boundaries easier in the driver if needed.
34e3cc28eaSChristoph Hellwig 	 */
35e3cc28eaSChristoph Hellwig 	return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
36e3cc28eaSChristoph Hellwig }
37e3cc28eaSChristoph Hellwig 
__blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop)3838f25255SChristoph Hellwig int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
3944abff2cSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
40f31e7e40SDmitry Monakhov {
4138f25255SChristoph Hellwig 	struct bio *bio = *biop;
42e3cc28eaSChristoph Hellwig 	sector_t bs_mask;
43f31e7e40SDmitry Monakhov 
44a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
45a13553c7SIlya Dryomov 		return -EPERM;
4670200574SChristoph Hellwig 	if (!bdev_max_discard_sectors(bdev))
47f31e7e40SDmitry Monakhov 		return -EOPNOTSUPP;
48f31e7e40SDmitry Monakhov 
49b35fd742SColy Li 	/* In case the discard granularity isn't set by buggy device driver */
507b47ef52SChristoph Hellwig 	if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
5102ff3dd2SChristoph Hellwig 		pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
5202ff3dd2SChristoph Hellwig 				   bdev);
53b35fd742SColy Li 		return -EOPNOTSUPP;
54b35fd742SColy Li 	}
55b35fd742SColy Li 
5628b2be20SDarrick J. Wong 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
5728b2be20SDarrick J. Wong 	if ((sector | nr_sects) & bs_mask)
5828b2be20SDarrick J. Wong 		return -EINVAL;
5928b2be20SDarrick J. Wong 
60ba5d7385SMing Lei 	if (!nr_sects)
61ba5d7385SMing Lei 		return -EINVAL;
62ba5d7385SMing Lei 
635dba3089SLukas Czerner 	while (nr_sects) {
64e3cc28eaSChristoph Hellwig 		sector_t req_sects =
65e3cc28eaSChristoph Hellwig 			min(nr_sects, bio_discard_limit(bdev, sector));
664800bf7bSDave Chinner 
6744abff2cSChristoph Hellwig 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
684f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
694f024f37SKent Overstreet 		bio->bi_iter.bi_size = req_sects << 9;
70ba5d7385SMing Lei 		sector += req_sects;
71c6e66634SPaolo Bonzini 		nr_sects -= req_sects;
72f31e7e40SDmitry Monakhov 
73c8123f8cSJens Axboe 		/*
74c8123f8cSJens Axboe 		 * We can loop for a long time in here, if someone does
75c8123f8cSJens Axboe 		 * full device discards (like mkfs). Be nice and allow
76c8123f8cSJens Axboe 		 * us to schedule out to avoid softlocking if preempt
77c8123f8cSJens Axboe 		 * is disabled.
78c8123f8cSJens Axboe 		 */
79c8123f8cSJens Axboe 		cond_resched();
805dba3089SLukas Czerner 	}
8138f25255SChristoph Hellwig 
8238f25255SChristoph Hellwig 	*biop = bio;
8338f25255SChristoph Hellwig 	return 0;
8438f25255SChristoph Hellwig }
8538f25255SChristoph Hellwig EXPORT_SYMBOL(__blkdev_issue_discard);
8638f25255SChristoph Hellwig 
8738f25255SChristoph Hellwig /**
8838f25255SChristoph Hellwig  * blkdev_issue_discard - queue a discard
8938f25255SChristoph Hellwig  * @bdev:	blockdev to issue discard for
9038f25255SChristoph Hellwig  * @sector:	start sector
9138f25255SChristoph Hellwig  * @nr_sects:	number of sectors to discard
9238f25255SChristoph Hellwig  * @gfp_mask:	memory allocation flags (for bio_alloc)
9338f25255SChristoph Hellwig  *
9438f25255SChristoph Hellwig  * Description:
9538f25255SChristoph Hellwig  *    Issue a discard request for the sectors in question.
9638f25255SChristoph Hellwig  */
blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask)9738f25255SChristoph Hellwig int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
9844abff2cSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask)
9938f25255SChristoph Hellwig {
10038f25255SChristoph Hellwig 	struct bio *bio = NULL;
10138f25255SChristoph Hellwig 	struct blk_plug plug;
10238f25255SChristoph Hellwig 	int ret;
10338f25255SChristoph Hellwig 
10438f25255SChristoph Hellwig 	blk_start_plug(&plug);
10544abff2cSChristoph Hellwig 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
106bbd848e0SMike Snitzer 	if (!ret && bio) {
1074e49ea4aSMike Christie 		ret = submit_bio_wait(bio);
10848920ff2SChristoph Hellwig 		if (ret == -EOPNOTSUPP)
109bbd848e0SMike Snitzer 			ret = 0;
11005bd92ddSShaun Tancheff 		bio_put(bio);
111bbd848e0SMike Snitzer 	}
1120cfbcafcSShaohua Li 	blk_finish_plug(&plug);
113f31e7e40SDmitry Monakhov 
114bbd848e0SMike Snitzer 	return ret;
115f31e7e40SDmitry Monakhov }
116f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
1173f14d792SDmitry Monakhov 
__blkdev_issue_write_zeroes(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)118a6f0788eSChaitanya Kulkarni static int __blkdev_issue_write_zeroes(struct block_device *bdev,
119a6f0788eSChaitanya Kulkarni 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
120d928be9fSChristoph Hellwig 		struct bio **biop, unsigned flags)
121a6f0788eSChaitanya Kulkarni {
122a6f0788eSChaitanya Kulkarni 	struct bio *bio = *biop;
123a6f0788eSChaitanya Kulkarni 	unsigned int max_write_zeroes_sectors;
124a6f0788eSChaitanya Kulkarni 
125a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
126a13553c7SIlya Dryomov 		return -EPERM;
127a13553c7SIlya Dryomov 
128a6f0788eSChaitanya Kulkarni 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
129a6f0788eSChaitanya Kulkarni 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
130a6f0788eSChaitanya Kulkarni 
131a6f0788eSChaitanya Kulkarni 	if (max_write_zeroes_sectors == 0)
132a6f0788eSChaitanya Kulkarni 		return -EOPNOTSUPP;
133a6f0788eSChaitanya Kulkarni 
134a6f0788eSChaitanya Kulkarni 	while (nr_sects) {
1350a3140eaSChaitanya Kulkarni 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
136a6f0788eSChaitanya Kulkarni 		bio->bi_iter.bi_sector = sector;
137d928be9fSChristoph Hellwig 		if (flags & BLKDEV_ZERO_NOUNMAP)
138d928be9fSChristoph Hellwig 			bio->bi_opf |= REQ_NOUNMAP;
139a6f0788eSChaitanya Kulkarni 
140a6f0788eSChaitanya Kulkarni 		if (nr_sects > max_write_zeroes_sectors) {
141a6f0788eSChaitanya Kulkarni 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
142a6f0788eSChaitanya Kulkarni 			nr_sects -= max_write_zeroes_sectors;
143a6f0788eSChaitanya Kulkarni 			sector += max_write_zeroes_sectors;
144a6f0788eSChaitanya Kulkarni 		} else {
145a6f0788eSChaitanya Kulkarni 			bio->bi_iter.bi_size = nr_sects << 9;
146a6f0788eSChaitanya Kulkarni 			nr_sects = 0;
147a6f0788eSChaitanya Kulkarni 		}
148a6f0788eSChaitanya Kulkarni 		cond_resched();
149a6f0788eSChaitanya Kulkarni 	}
150a6f0788eSChaitanya Kulkarni 
151a6f0788eSChaitanya Kulkarni 	*biop = bio;
152a6f0788eSChaitanya Kulkarni 	return 0;
153a6f0788eSChaitanya Kulkarni }
154a6f0788eSChaitanya Kulkarni 
155615d22a5SDamien Le Moal /*
156615d22a5SDamien Le Moal  * Convert a number of 512B sectors to a number of pages.
157615d22a5SDamien Le Moal  * The result is limited to a number of pages that can fit into a BIO.
158615d22a5SDamien Le Moal  * Also make sure that the result is always at least 1 (page) for the cases
159615d22a5SDamien Le Moal  * where nr_sects is lower than the number of sectors in a page.
160615d22a5SDamien Le Moal  */
__blkdev_sectors_to_bio_pages(sector_t nr_sects)161615d22a5SDamien Le Moal static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
162615d22a5SDamien Le Moal {
16309c2c359SMikulas Patocka 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
164615d22a5SDamien Le Moal 
165a8affc03SChristoph Hellwig 	return min(pages, (sector_t)BIO_MAX_VECS);
166615d22a5SDamien Le Moal }
167615d22a5SDamien Le Moal 
__blkdev_issue_zero_pages(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop)168425a4dbaSIlya Dryomov static int __blkdev_issue_zero_pages(struct block_device *bdev,
169425a4dbaSIlya Dryomov 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
170425a4dbaSIlya Dryomov 		struct bio **biop)
1713f14d792SDmitry Monakhov {
172e73c23ffSChaitanya Kulkarni 	struct bio *bio = *biop;
173425a4dbaSIlya Dryomov 	int bi_size = 0;
1740aeea189SLukas Czerner 	unsigned int sz;
17528b2be20SDarrick J. Wong 
176a13553c7SIlya Dryomov 	if (bdev_read_only(bdev))
177a13553c7SIlya Dryomov 		return -EPERM;
178a13553c7SIlya Dryomov 
1793f14d792SDmitry Monakhov 	while (nr_sects != 0) {
1800a3140eaSChaitanya Kulkarni 		bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
1810a3140eaSChaitanya Kulkarni 				   REQ_OP_WRITE, gfp_mask);
1824f024f37SKent Overstreet 		bio->bi_iter.bi_sector = sector;
1833f14d792SDmitry Monakhov 
1843f14d792SDmitry Monakhov 		while (nr_sects != 0) {
185615d22a5SDamien Le Moal 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
186615d22a5SDamien Le Moal 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
187e73c23ffSChaitanya Kulkarni 			nr_sects -= bi_size >> 9;
188e73c23ffSChaitanya Kulkarni 			sector += bi_size >> 9;
189615d22a5SDamien Le Moal 			if (bi_size < sz)
1903f14d792SDmitry Monakhov 				break;
1913f14d792SDmitry Monakhov 		}
192e73c23ffSChaitanya Kulkarni 		cond_resched();
1933f14d792SDmitry Monakhov 	}
1943f14d792SDmitry Monakhov 
195e73c23ffSChaitanya Kulkarni 	*biop = bio;
196425a4dbaSIlya Dryomov 	return 0;
197425a4dbaSIlya Dryomov }
198425a4dbaSIlya Dryomov 
1993f14d792SDmitry Monakhov /**
2003f14d792SDmitry Monakhov  * __blkdev_issue_zeroout - generate number of zero filed write bios
2013f14d792SDmitry Monakhov  * @bdev:	blockdev to issue
2023f14d792SDmitry Monakhov  * @sector:	start sector
2033f14d792SDmitry Monakhov  * @nr_sects:	number of sectors to write
2043f14d792SDmitry Monakhov  * @gfp_mask:	memory allocation flags (for bio_alloc)
2053f14d792SDmitry Monakhov  * @biop:	pointer to anchor bio
2063f14d792SDmitry Monakhov  * @flags:	controls detailed behavior
2073f14d792SDmitry Monakhov  *
2083f14d792SDmitry Monakhov  * Description:
2093f14d792SDmitry Monakhov  *  Zero-fill a block range, either using hardware offload or by explicitly
2103f14d792SDmitry Monakhov  *  writing zeroes to the device.
2113f14d792SDmitry Monakhov  *
2123f14d792SDmitry Monakhov  *  If a device is using logical block provisioning, the underlying space will
2133f14d792SDmitry Monakhov  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
2143f14d792SDmitry Monakhov  *
2153f14d792SDmitry Monakhov  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
2163f14d792SDmitry Monakhov  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
2173f14d792SDmitry Monakhov  */
__blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)2183f14d792SDmitry Monakhov int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
2193f14d792SDmitry Monakhov 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
2203f14d792SDmitry Monakhov 		unsigned flags)
2213f14d792SDmitry Monakhov {
2223f14d792SDmitry Monakhov 	int ret;
2233f14d792SDmitry Monakhov 	sector_t bs_mask;
2243f14d792SDmitry Monakhov 
2253f14d792SDmitry Monakhov 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
2263f14d792SDmitry Monakhov 	if ((sector | nr_sects) & bs_mask)
2273f14d792SDmitry Monakhov 		return -EINVAL;
2283f14d792SDmitry Monakhov 
2293f14d792SDmitry Monakhov 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
2303f14d792SDmitry Monakhov 			biop, flags);
2313f14d792SDmitry Monakhov 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
23205bd92ddSShaun Tancheff 		return ret;
233425a4dbaSIlya Dryomov 
234425a4dbaSIlya Dryomov 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
235425a4dbaSIlya Dryomov 					 biop);
23605bd92ddSShaun Tancheff }
237e73c23ffSChaitanya Kulkarni EXPORT_SYMBOL(__blkdev_issue_zeroout);
238579e8f3cSMartin K. Petersen 
239579e8f3cSMartin K. Petersen /**
240579e8f3cSMartin K. Petersen  * blkdev_issue_zeroout - zero-fill a block range
241579e8f3cSMartin K. Petersen  * @bdev:	blockdev to write
242579e8f3cSMartin K. Petersen  * @sector:	start sector
243579e8f3cSMartin K. Petersen  * @nr_sects:	number of sectors to write
244579e8f3cSMartin K. Petersen  * @gfp_mask:	memory allocation flags (for bio_alloc)
245ee472d83SChristoph Hellwig  * @flags:	controls detailed behavior
246579e8f3cSMartin K. Petersen  *
247579e8f3cSMartin K. Petersen  * Description:
248ee472d83SChristoph Hellwig  *  Zero-fill a block range, either using hardware offload or by explicitly
249ee472d83SChristoph Hellwig  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
250ee472d83SChristoph Hellwig  *  valid values for %flags.
251579e8f3cSMartin K. Petersen  */
blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned flags)252579e8f3cSMartin K. Petersen int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
253ee472d83SChristoph Hellwig 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
254579e8f3cSMartin K. Petersen {
255d5ce4c31SIlya Dryomov 	int ret = 0;
256d5ce4c31SIlya Dryomov 	sector_t bs_mask;
257d5ce4c31SIlya Dryomov 	struct bio *bio;
258e73c23ffSChaitanya Kulkarni 	struct blk_plug plug;
259d5ce4c31SIlya Dryomov 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
260e73c23ffSChaitanya Kulkarni 
261d5ce4c31SIlya Dryomov 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
262d5ce4c31SIlya Dryomov 	if ((sector | nr_sects) & bs_mask)
263d5ce4c31SIlya Dryomov 		return -EINVAL;
264d5ce4c31SIlya Dryomov 
265d5ce4c31SIlya Dryomov retry:
266d5ce4c31SIlya Dryomov 	bio = NULL;
267e73c23ffSChaitanya Kulkarni 	blk_start_plug(&plug);
268d5ce4c31SIlya Dryomov 	if (try_write_zeroes) {
269d5ce4c31SIlya Dryomov 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
270d5ce4c31SIlya Dryomov 						  gfp_mask, &bio, flags);
271d5ce4c31SIlya Dryomov 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
272d5ce4c31SIlya Dryomov 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
273d5ce4c31SIlya Dryomov 						gfp_mask, &bio);
274d5ce4c31SIlya Dryomov 	} else {
275d5ce4c31SIlya Dryomov 		/* No zeroing offload support */
276d5ce4c31SIlya Dryomov 		ret = -EOPNOTSUPP;
277d5ce4c31SIlya Dryomov 	}
278e73c23ffSChaitanya Kulkarni 	if (ret == 0 && bio) {
279e73c23ffSChaitanya Kulkarni 		ret = submit_bio_wait(bio);
280e73c23ffSChaitanya Kulkarni 		bio_put(bio);
281e950fdf7SChristoph Hellwig 	}
282e73c23ffSChaitanya Kulkarni 	blk_finish_plug(&plug);
283d5ce4c31SIlya Dryomov 	if (ret && try_write_zeroes) {
284d5ce4c31SIlya Dryomov 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
285d5ce4c31SIlya Dryomov 			try_write_zeroes = false;
286d5ce4c31SIlya Dryomov 			goto retry;
287d5ce4c31SIlya Dryomov 		}
288d5ce4c31SIlya Dryomov 		if (!bdev_write_zeroes_sectors(bdev)) {
289d5ce4c31SIlya Dryomov 			/*
290d5ce4c31SIlya Dryomov 			 * Zeroing offload support was indicated, but the
291d5ce4c31SIlya Dryomov 			 * device reported ILLEGAL REQUEST (for some devices
292d5ce4c31SIlya Dryomov 			 * there is no non-destructive way to verify whether
293d5ce4c31SIlya Dryomov 			 * WRITE ZEROES is actually supported).
294d5ce4c31SIlya Dryomov 			 */
295d5ce4c31SIlya Dryomov 			ret = -EOPNOTSUPP;
296d5ce4c31SIlya Dryomov 		}
297d5ce4c31SIlya Dryomov 	}
298d93ba7a5SMartin K. Petersen 
299e73c23ffSChaitanya Kulkarni 	return ret;
300579e8f3cSMartin K. Petersen }
3013f14d792SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_zeroout);
30244abff2cSChristoph Hellwig 
blkdev_issue_secure_erase(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp)30344abff2cSChristoph Hellwig int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
30444abff2cSChristoph Hellwig 		sector_t nr_sects, gfp_t gfp)
30544abff2cSChristoph Hellwig {
30644abff2cSChristoph Hellwig 	sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
30744abff2cSChristoph Hellwig 	unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
30844abff2cSChristoph Hellwig 	struct bio *bio = NULL;
30944abff2cSChristoph Hellwig 	struct blk_plug plug;
31044abff2cSChristoph Hellwig 	int ret = 0;
31144abff2cSChristoph Hellwig 
312*c4fa3684SMikulas Patocka 	/* make sure that "len << SECTOR_SHIFT" doesn't overflow */
313*c4fa3684SMikulas Patocka 	if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
314*c4fa3684SMikulas Patocka 		max_sectors = UINT_MAX >> SECTOR_SHIFT;
315*c4fa3684SMikulas Patocka 	max_sectors &= ~bs_mask;
316*c4fa3684SMikulas Patocka 
31744abff2cSChristoph Hellwig 	if (max_sectors == 0)
31844abff2cSChristoph Hellwig 		return -EOPNOTSUPP;
31944abff2cSChristoph Hellwig 	if ((sector | nr_sects) & bs_mask)
32044abff2cSChristoph Hellwig 		return -EINVAL;
32144abff2cSChristoph Hellwig 	if (bdev_read_only(bdev))
32244abff2cSChristoph Hellwig 		return -EPERM;
32344abff2cSChristoph Hellwig 
32444abff2cSChristoph Hellwig 	blk_start_plug(&plug);
32544abff2cSChristoph Hellwig 	for (;;) {
32644abff2cSChristoph Hellwig 		unsigned int len = min_t(sector_t, nr_sects, max_sectors);
32744abff2cSChristoph Hellwig 
32844abff2cSChristoph Hellwig 		bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
32944abff2cSChristoph Hellwig 		bio->bi_iter.bi_sector = sector;
330*c4fa3684SMikulas Patocka 		bio->bi_iter.bi_size = len << SECTOR_SHIFT;
33144abff2cSChristoph Hellwig 
332*c4fa3684SMikulas Patocka 		sector += len;
333*c4fa3684SMikulas Patocka 		nr_sects -= len;
33444abff2cSChristoph Hellwig 		if (!nr_sects) {
33544abff2cSChristoph Hellwig 			ret = submit_bio_wait(bio);
33644abff2cSChristoph Hellwig 			bio_put(bio);
33744abff2cSChristoph Hellwig 			break;
33844abff2cSChristoph Hellwig 		}
33944abff2cSChristoph Hellwig 		cond_resched();
34044abff2cSChristoph Hellwig 	}
34144abff2cSChristoph Hellwig 	blk_finish_plug(&plug);
34244abff2cSChristoph Hellwig 
34344abff2cSChristoph Hellwig 	return ret;
34444abff2cSChristoph Hellwig }
34544abff2cSChristoph Hellwig EXPORT_SYMBOL(blkdev_issue_secure_erase);
346