Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Functions related to generic helpers functions
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <linux/scatterlist.h>
  9
 10#include "blk.h"
 11
 12struct bio_batch {
 13	atomic_t		done;
 14	int			error;
 15	struct completion	*wait;
 16};
 17
 18static void bio_batch_end_io(struct bio *bio)
 19{
 20	struct bio_batch *bb = bio->bi_private;
 21
 22	if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
 23		bb->error = bio->bi_error;
 24	if (atomic_dec_and_test(&bb->done))
 25		complete(bb->wait);
 26	bio_put(bio);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27}
 
 28
 29/**
 30 * blkdev_issue_discard - queue a discard
 31 * @bdev:	blockdev to issue discard for
 32 * @sector:	start sector
 33 * @nr_sects:	number of sectors to discard
 34 * @gfp_mask:	memory allocation flags (for bio_alloc)
 35 * @flags:	BLKDEV_IFL_* flags to control behaviour
 36 *
 37 * Description:
 38 *    Issue a discard request for the sectors in question.
 39 */
 40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 41		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
 42{
 43	DECLARE_COMPLETION_ONSTACK(wait);
 44	struct request_queue *q = bdev_get_queue(bdev);
 45	int type = REQ_WRITE | REQ_DISCARD;
 46	unsigned int granularity;
 47	int alignment;
 48	struct bio_batch bb;
 49	struct bio *bio;
 50	int ret = 0;
 51	struct blk_plug plug;
 
 52
 53	if (!q)
 54		return -ENXIO;
 
 
 
 
 
 
 
 55
 56	if (!blk_queue_discard(q))
 57		return -EOPNOTSUPP;
 
 58
 59	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 60	granularity = max(q->limits.discard_granularity >> 9, 1U);
 61	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 62
 63	if (flags & BLKDEV_DISCARD_SECURE) {
 64		if (!blk_queue_secdiscard(q))
 65			return -EOPNOTSUPP;
 66		type |= REQ_SECURE;
 67	}
 68
 69	atomic_set(&bb.done, 1);
 70	bb.error = 0;
 71	bb.wait = &wait;
 
 
 
 
 
 
 
 
 72
 73	blk_start_plug(&plug);
 74	while (nr_sects) {
 75		unsigned int req_sects;
 76		sector_t end_sect, tmp;
 77
 78		bio = bio_alloc(gfp_mask, 1);
 79		if (!bio) {
 80			ret = -ENOMEM;
 81			break;
 82		}
 83
 84		/* Make sure bi_size doesn't overflow */
 85		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
 
 
 86
 87		/*
 88		 * If splitting a request, and the next starting sector would be
 89		 * misaligned, stop the discard at the previous aligned sector.
 90		 */
 91		end_sect = sector + req_sects;
 92		tmp = end_sect;
 93		if (req_sects < nr_sects &&
 94		    sector_div(tmp, granularity) != alignment) {
 95			end_sect = end_sect - alignment;
 96			sector_div(end_sect, granularity);
 97			end_sect = end_sect * granularity + alignment;
 98			req_sects = end_sect - sector;
 99		}
100
101		bio->bi_iter.bi_sector = sector;
102		bio->bi_end_io = bio_batch_end_io;
103		bio->bi_bdev = bdev;
104		bio->bi_private = &bb;
105
106		bio->bi_iter.bi_size = req_sects << 9;
107		nr_sects -= req_sects;
108		sector = end_sect;
109
110		atomic_inc(&bb.done);
111		submit_bio(type, bio);
112
113		/*
114		 * We can loop for a long time in here, if someone does
115		 * full device discards (like mkfs). Be nice and allow
116		 * us to schedule out to avoid softlocking if preempt
117		 * is disabled.
118		 */
119		cond_resched();
120	}
121	blk_finish_plug(&plug);
122
123	/* Wait for bios in-flight */
124	if (!atomic_dec_and_test(&bb.done))
125		wait_for_completion_io(&wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
127	if (bb.error)
128		return bb.error;
 
 
 
 
 
 
129	return ret;
130}
131EXPORT_SYMBOL(blkdev_issue_discard);
132
133/**
134 * blkdev_issue_write_same - queue a write same operation
135 * @bdev:	target blockdev
136 * @sector:	start sector
137 * @nr_sects:	number of sectors to write
138 * @gfp_mask:	memory allocation flags (for bio_alloc)
139 * @page:	page containing data to write
140 *
141 * Description:
142 *    Issue a write same request for the sectors in question.
143 */
144int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
145			    sector_t nr_sects, gfp_t gfp_mask,
146			    struct page *page)
147{
148	DECLARE_COMPLETION_ONSTACK(wait);
149	struct request_queue *q = bdev_get_queue(bdev);
150	unsigned int max_write_same_sectors;
151	struct bio_batch bb;
152	struct bio *bio;
153	int ret = 0;
154
155	if (!q)
156		return -ENXIO;
157
158	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
159	max_write_same_sectors = UINT_MAX >> 9;
 
 
 
 
 
160
161	atomic_set(&bb.done, 1);
162	bb.error = 0;
163	bb.wait = &wait;
164
165	while (nr_sects) {
166		bio = bio_alloc(gfp_mask, 1);
167		if (!bio) {
168			ret = -ENOMEM;
169			break;
170		}
171
172		bio->bi_iter.bi_sector = sector;
173		bio->bi_end_io = bio_batch_end_io;
174		bio->bi_bdev = bdev;
175		bio->bi_private = &bb;
176		bio->bi_vcnt = 1;
177		bio->bi_io_vec->bv_page = page;
178		bio->bi_io_vec->bv_offset = 0;
179		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
180
181		if (nr_sects > max_write_same_sectors) {
182			bio->bi_iter.bi_size = max_write_same_sectors << 9;
183			nr_sects -= max_write_same_sectors;
184			sector += max_write_same_sectors;
185		} else {
186			bio->bi_iter.bi_size = nr_sects << 9;
187			nr_sects = 0;
188		}
189
190		atomic_inc(&bb.done);
191		submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
 
 
 
 
 
 
 
 
 
192	}
 
 
 
 
 
 
 
 
193
194	/* Wait for bios in-flight */
195	if (!atomic_dec_and_test(&bb.done))
196		wait_for_completion_io(&wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
197
198	if (bb.error)
199		return bb.error;
200	return ret;
201}
202EXPORT_SYMBOL(blkdev_issue_write_same);
203
204/**
205 * blkdev_issue_zeroout - generate number of zero filed write bios
206 * @bdev:	blockdev to issue
207 * @sector:	start sector
208 * @nr_sects:	number of sectors to write
209 * @gfp_mask:	memory allocation flags (for bio_alloc)
 
 
210 *
211 * Description:
212 *  Generate and issue number of bios with zerofiled pages.
 
 
 
 
 
 
 
213 */
214
215static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
216				  sector_t nr_sects, gfp_t gfp_mask)
217{
218	int ret;
219	struct bio *bio;
220	struct bio_batch bb;
221	unsigned int sz;
222	DECLARE_COMPLETION_ONSTACK(wait);
223
224	atomic_set(&bb.done, 1);
225	bb.error = 0;
226	bb.wait = &wait;
227
228	ret = 0;
229	while (nr_sects != 0) {
230		bio = bio_alloc(gfp_mask,
231				min(nr_sects, (sector_t)BIO_MAX_PAGES));
232		if (!bio) {
233			ret = -ENOMEM;
234			break;
235		}
236
237		bio->bi_iter.bi_sector = sector;
238		bio->bi_bdev   = bdev;
239		bio->bi_end_io = bio_batch_end_io;
240		bio->bi_private = &bb;
241
242		while (nr_sects != 0) {
243			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
244			ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
245			nr_sects -= ret >> 9;
246			sector += ret >> 9;
247			if (ret < (sz << 9))
248				break;
249		}
250		ret = 0;
251		atomic_inc(&bb.done);
252		submit_bio(WRITE, bio);
253	}
254
255	/* Wait for bios in-flight */
256	if (!atomic_dec_and_test(&bb.done))
257		wait_for_completion_io(&wait);
258
259	if (bb.error)
260		return bb.error;
261	return ret;
 
 
 
 
 
 
 
262}
 
263
264/**
265 * blkdev_issue_zeroout - zero-fill a block range
266 * @bdev:	blockdev to write
267 * @sector:	start sector
268 * @nr_sects:	number of sectors to write
269 * @gfp_mask:	memory allocation flags (for bio_alloc)
270 * @discard:	whether to discard the block range
271 *
272 * Description:
273 *  Zero-fill a block range.  If the discard flag is set and the block
274 *  device guarantees that subsequent READ operations to the block range
275 *  in question will return zeroes, the blocks will be discarded. Should
276 *  the discard request fail, if the discard flag is not set, or if
277 *  discard_zeroes_data is not supported, this function will resort to
278 *  zeroing the blocks manually, thus provisioning (allocating,
279 *  anchoring) them. If the block device supports the WRITE SAME command
280 *  blkdev_issue_zeroout() will use it to optimize the process of
281 *  clearing the block range. Otherwise the zeroing will be performed
282 *  using regular WRITE calls.
283 */
284
285int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
286			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
287{
288	struct request_queue *q = bdev_get_queue(bdev);
289
290	if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
291	    blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
292		return 0;
293
294	if (bdev_write_same(bdev) &&
295	    blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
296				    ZERO_PAGE(0)) == 0)
297		return 0;
 
 
 
 
 
 
 
298
299	return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
300}
301EXPORT_SYMBOL(blkdev_issue_zeroout);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to generic helpers functions
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/scatterlist.h>
 10
 11#include "blk.h"
 12
 13static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
 14{
 15	unsigned int discard_granularity = bdev_discard_granularity(bdev);
 16	sector_t granularity_aligned_sector;
 17
 18	if (bdev_is_partition(bdev))
 19		sector += bdev->bd_start_sect;
 20
 21	granularity_aligned_sector =
 22		round_up(sector, discard_granularity >> SECTOR_SHIFT);
 23
 24	/*
 25	 * Make sure subsequent bios start aligned to the discard granularity if
 26	 * it needs to be split.
 27	 */
 28	if (granularity_aligned_sector != sector)
 29		return granularity_aligned_sector - sector;
 30
 31	/*
 32	 * Align the bio size to the discard granularity to make splitting the bio
 33	 * at discard granularity boundaries easier in the driver if needed.
 34	 */
 35	return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
 36}
 37
 38struct bio *blk_alloc_discard_bio(struct block_device *bdev,
 39		sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask)
 40{
 41	sector_t bio_sects = min(*nr_sects, bio_discard_limit(bdev, *sector));
 42	struct bio *bio;
 43
 44	if (!bio_sects)
 45		return NULL;
 46
 47	bio = bio_alloc(bdev, 0, REQ_OP_DISCARD, gfp_mask);
 48	if (!bio)
 49		return NULL;
 50	bio->bi_iter.bi_sector = *sector;
 51	bio->bi_iter.bi_size = bio_sects << SECTOR_SHIFT;
 52	*sector += bio_sects;
 53	*nr_sects -= bio_sects;
 54	/*
 55	 * We can loop for a long time in here if someone does full device
 56	 * discards (like mkfs).  Be nice and allow us to schedule out to avoid
 57	 * softlocking if preempt is disabled.
 58	 */
 59	cond_resched();
 60	return bio;
 61}
 62
 63int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 64		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
 65{
 66	struct bio *bio;
 67
 68	while ((bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
 69			gfp_mask)))
 70		*biop = bio_chain_and_submit(*biop, bio);
 71	return 0;
 72}
 73EXPORT_SYMBOL(__blkdev_issue_discard);
 74
 75/**
 76 * blkdev_issue_discard - queue a discard
 77 * @bdev:	blockdev to issue discard for
 78 * @sector:	start sector
 79 * @nr_sects:	number of sectors to discard
 80 * @gfp_mask:	memory allocation flags (for bio_alloc)
 
 81 *
 82 * Description:
 83 *    Issue a discard request for the sectors in question.
 84 */
 85int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 86		sector_t nr_sects, gfp_t gfp_mask)
 87{
 88	struct bio *bio = NULL;
 
 
 
 
 
 
 
 89	struct blk_plug plug;
 90	int ret;
 91
 92	blk_start_plug(&plug);
 93	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
 94	if (!ret && bio) {
 95		ret = submit_bio_wait(bio);
 96		if (ret == -EOPNOTSUPP)
 97			ret = 0;
 98		bio_put(bio);
 99	}
100	blk_finish_plug(&plug);
101
102	return ret;
103}
104EXPORT_SYMBOL(blkdev_issue_discard);
105
106static sector_t bio_write_zeroes_limit(struct block_device *bdev)
107{
108	sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
109
110	return min(bdev_write_zeroes_sectors(bdev),
111		(UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
112}
 
 
113
114/*
115 * There is no reliable way for the SCSI subsystem to determine whether a
116 * device supports a WRITE SAME operation without actually performing a write
117 * to media. As a result, write_zeroes is enabled by default and will be
118 * disabled if a zeroing operation subsequently fails. This means that this
119 * queue limit is likely to change at runtime.
120 */
121static void __blkdev_issue_write_zeroes(struct block_device *bdev,
122		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
123		struct bio **biop, unsigned flags, sector_t limit)
124{
125
 
126	while (nr_sects) {
127		unsigned int len = min(nr_sects, limit);
128		struct bio *bio;
129
130		if ((flags & BLKDEV_ZERO_KILLABLE) &&
131		    fatal_signal_pending(current))
 
132			break;
 
133
134		bio = bio_alloc(bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
135		bio->bi_iter.bi_sector = sector;
136		if (flags & BLKDEV_ZERO_NOUNMAP)
137			bio->bi_opf |= REQ_NOUNMAP;
138
139		bio->bi_iter.bi_size = len << SECTOR_SHIFT;
140		*biop = bio_chain_and_submit(*biop, bio);
 
 
 
 
 
 
 
 
 
 
 
141
142		nr_sects -= len;
143		sector += len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144		cond_resched();
145	}
146}
147
148static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
149		sector_t nr_sects, gfp_t gfp, unsigned flags)
150{
151	sector_t limit = bio_write_zeroes_limit(bdev);
152	struct bio *bio = NULL;
153	struct blk_plug plug;
154	int ret = 0;
155
156	blk_start_plug(&plug);
157	__blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio,
158			flags, limit);
159	if (bio) {
160		if ((flags & BLKDEV_ZERO_KILLABLE) &&
161		    fatal_signal_pending(current)) {
162			bio_await_chain(bio);
163			blk_finish_plug(&plug);
164			return -EINTR;
165		}
166		ret = submit_bio_wait(bio);
167		bio_put(bio);
168	}
169	blk_finish_plug(&plug);
170
171	/*
172	 * For some devices there is no non-destructive way to verify whether
173	 * WRITE ZEROES is actually supported.  These will clear the capability
174	 * on an I/O error, in which case we'll turn any error into
175	 * "not supported" here.
176	 */
177	if (ret && !bdev_write_zeroes_sectors(bdev))
178		return -EOPNOTSUPP;
179	return ret;
180}
 
181
182/*
183 * Convert a number of 512B sectors to a number of pages.
184 * The result is limited to a number of pages that can fit into a BIO.
185 * Also make sure that the result is always at least 1 (page) for the cases
186 * where nr_sects is lower than the number of sectors in a page.
 
 
 
 
 
187 */
188static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
189{
190	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
 
 
 
 
 
 
 
191
192	return min(pages, (sector_t)BIO_MAX_VECS);
193}
194
195static void __blkdev_issue_zero_pages(struct block_device *bdev,
196		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
197		struct bio **biop, unsigned int flags)
198{
199	while (nr_sects) {
200		unsigned int nr_vecs = __blkdev_sectors_to_bio_pages(nr_sects);
201		struct bio *bio;
202
203		bio = bio_alloc(bdev, nr_vecs, REQ_OP_WRITE, gfp_mask);
204		bio->bi_iter.bi_sector = sector;
 
205
206		if ((flags & BLKDEV_ZERO_KILLABLE) &&
207		    fatal_signal_pending(current))
 
 
208			break;
 
209
210		do {
211			unsigned int len, added;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
213			len = min_t(sector_t,
214				PAGE_SIZE, nr_sects << SECTOR_SHIFT);
215			added = bio_add_page(bio, ZERO_PAGE(0), len, 0);
216			if (added < len)
217				break;
218			nr_sects -= added >> SECTOR_SHIFT;
219			sector += added >> SECTOR_SHIFT;
220		} while (nr_sects);
221
222		*biop = bio_chain_and_submit(*biop, bio);
223		cond_resched();
224	}
225}
226
227static int blkdev_issue_zero_pages(struct block_device *bdev, sector_t sector,
228		sector_t nr_sects, gfp_t gfp, unsigned flags)
229{
230	struct bio *bio = NULL;
231	struct blk_plug plug;
232	int ret = 0;
233
234	if (flags & BLKDEV_ZERO_NOFALLBACK)
235		return -EOPNOTSUPP;
236
237	blk_start_plug(&plug);
238	__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp, &bio, flags);
239	if (bio) {
240		if ((flags & BLKDEV_ZERO_KILLABLE) &&
241		    fatal_signal_pending(current)) {
242			bio_await_chain(bio);
243			blk_finish_plug(&plug);
244			return -EINTR;
245		}
246		ret = submit_bio_wait(bio);
247		bio_put(bio);
248	}
249	blk_finish_plug(&plug);
250
 
 
251	return ret;
252}
 
253
254/**
255 * __blkdev_issue_zeroout - generate number of zero filed write bios
256 * @bdev:	blockdev to issue
257 * @sector:	start sector
258 * @nr_sects:	number of sectors to write
259 * @gfp_mask:	memory allocation flags (for bio_alloc)
260 * @biop:	pointer to anchor bio
261 * @flags:	controls detailed behavior
262 *
263 * Description:
264 *  Zero-fill a block range, either using hardware offload or by explicitly
265 *  writing zeroes to the device.
266 *
267 *  If a device is using logical block provisioning, the underlying space will
268 *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
269 *
270 *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
271 *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
272 */
273int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
274		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
275		unsigned flags)
276{
277	sector_t limit = bio_write_zeroes_limit(bdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
279	if (bdev_read_only(bdev))
280		return -EPERM;
 
281
282	if (limit) {
283		__blkdev_issue_write_zeroes(bdev, sector, nr_sects,
284				gfp_mask, biop, flags, limit);
285	} else {
286		if (flags & BLKDEV_ZERO_NOFALLBACK)
287			return -EOPNOTSUPP;
288		__blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
289				biop, flags);
290	}
291	return 0;
292}
293EXPORT_SYMBOL(__blkdev_issue_zeroout);
294
295/**
296 * blkdev_issue_zeroout - zero-fill a block range
297 * @bdev:	blockdev to write
298 * @sector:	start sector
299 * @nr_sects:	number of sectors to write
300 * @gfp_mask:	memory allocation flags (for bio_alloc)
301 * @flags:	controls detailed behavior
302 *
303 * Description:
304 *  Zero-fill a block range, either using hardware offload or by explicitly
305 *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
306 *  valid values for %flags.
 
 
 
 
 
 
 
307 */
 
308int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
309		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
310{
311	int ret;
 
 
 
 
312
313	if ((sector | nr_sects) & ((bdev_logical_block_size(bdev) >> 9) - 1))
314		return -EINVAL;
315	if (bdev_read_only(bdev))
316		return -EPERM;
317
318	if (bdev_write_zeroes_sectors(bdev)) {
319		ret = blkdev_issue_write_zeroes(bdev, sector, nr_sects,
320				gfp_mask, flags);
321		if (ret != -EOPNOTSUPP)
322			return ret;
323	}
324
325	return blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, flags);
326}
327EXPORT_SYMBOL(blkdev_issue_zeroout);
328
329int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
330		sector_t nr_sects, gfp_t gfp)
331{
332	sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
333	unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
334	struct bio *bio = NULL;
335	struct blk_plug plug;
336	int ret = 0;
337
338	/* make sure that "len << SECTOR_SHIFT" doesn't overflow */
339	if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
340		max_sectors = UINT_MAX >> SECTOR_SHIFT;
341	max_sectors &= ~bs_mask;
342
343	if (max_sectors == 0)
344		return -EOPNOTSUPP;
345	if ((sector | nr_sects) & bs_mask)
346		return -EINVAL;
347	if (bdev_read_only(bdev))
348		return -EPERM;
349
350	blk_start_plug(&plug);
351	while (nr_sects) {
352		unsigned int len = min_t(sector_t, nr_sects, max_sectors);
353
354		bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
355		bio->bi_iter.bi_sector = sector;
356		bio->bi_iter.bi_size = len << SECTOR_SHIFT;
357
358		sector += len;
359		nr_sects -= len;
360		cond_resched();
361	}
362	if (bio) {
363		ret = submit_bio_wait(bio);
364		bio_put(bio);
365	}
366	blk_finish_plug(&plug);
367
368	return ret;
369}
370EXPORT_SYMBOL(blkdev_issue_secure_erase);