Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to generic helpers functions
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/scatterlist.h>
 10
 11#include "blk.h"
 12
 13static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector)
 
 14{
 15	unsigned int discard_granularity = bdev_discard_granularity(bdev);
 16	sector_t granularity_aligned_sector;
 17
 18	if (bdev_is_partition(bdev))
 19		sector += bdev->bd_start_sect;
 20
 21	granularity_aligned_sector =
 22		round_up(sector, discard_granularity >> SECTOR_SHIFT);
 23
 24	/*
 25	 * Make sure subsequent bios start aligned to the discard granularity if
 26	 * it needs to be split.
 27	 */
 28	if (granularity_aligned_sector != sector)
 29		return granularity_aligned_sector - sector;
 30
 31	/*
 32	 * Align the bio size to the discard granularity to make splitting the bio
 33	 * at discard granularity boundaries easier in the driver if needed.
 34	 */
 35	return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT;
 36}
 37
 38int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 39		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop)
 
 40{
 
 41	struct bio *bio = *biop;
 
 
 
 42	sector_t bs_mask;
 43
 44	if (bdev_read_only(bdev))
 45		return -EPERM;
 46	if (!bdev_max_discard_sectors(bdev))
 47		return -EOPNOTSUPP;
 48
 49	/* In case the discard granularity isn't set by buggy device driver */
 50	if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) {
 51		pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n",
 52				   bdev);
 53		return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 54	}
 55
 56	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
 57	if ((sector | nr_sects) & bs_mask)
 58		return -EINVAL;
 59
 60	if (!nr_sects)
 61		return -EINVAL;
 
 62
 63	while (nr_sects) {
 64		sector_t req_sects =
 65			min(nr_sects, bio_discard_limit(bdev, sector));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66
 67		bio = blk_next_bio(bio, bdev, 0, REQ_OP_DISCARD, gfp_mask);
 68		bio->bi_iter.bi_sector = sector;
 
 
 
 69		bio->bi_iter.bi_size = req_sects << 9;
 70		sector += req_sects;
 71		nr_sects -= req_sects;
 
 72
 73		/*
 74		 * We can loop for a long time in here, if someone does
 75		 * full device discards (like mkfs). Be nice and allow
 76		 * us to schedule out to avoid softlocking if preempt
 77		 * is disabled.
 78		 */
 79		cond_resched();
 80	}
 81
 82	*biop = bio;
 83	return 0;
 84}
 85EXPORT_SYMBOL(__blkdev_issue_discard);
 86
 87/**
 88 * blkdev_issue_discard - queue a discard
 89 * @bdev:	blockdev to issue discard for
 90 * @sector:	start sector
 91 * @nr_sects:	number of sectors to discard
 92 * @gfp_mask:	memory allocation flags (for bio_alloc)
 
 93 *
 94 * Description:
 95 *    Issue a discard request for the sectors in question.
 96 */
 97int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 98		sector_t nr_sects, gfp_t gfp_mask)
 99{
100	struct bio *bio = NULL;
101	struct blk_plug plug;
102	int ret;
103
104	blk_start_plug(&plug);
105	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio);
 
106	if (!ret && bio) {
107		ret = submit_bio_wait(bio);
108		if (ret == -EOPNOTSUPP)
109			ret = 0;
110		bio_put(bio);
111	}
112	blk_finish_plug(&plug);
113
114	return ret;
115}
116EXPORT_SYMBOL(blkdev_issue_discard);
117
118static int __blkdev_issue_write_zeroes(struct block_device *bdev,
119		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
120		struct bio **biop, unsigned flags)
 
 
 
 
 
 
 
 
 
 
 
 
121{
 
 
122	struct bio *bio = *biop;
123	unsigned int max_write_zeroes_sectors;
124
125	if (bdev_read_only(bdev))
126		return -EPERM;
127
128	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
129	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
 
130
131	if (max_write_zeroes_sectors == 0)
132		return -EOPNOTSUPP;
133
 
 
 
134	while (nr_sects) {
135		bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
136		bio->bi_iter.bi_sector = sector;
137		if (flags & BLKDEV_ZERO_NOUNMAP)
138			bio->bi_opf |= REQ_NOUNMAP;
139
140		if (nr_sects > max_write_zeroes_sectors) {
141			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
142			nr_sects -= max_write_zeroes_sectors;
143			sector += max_write_zeroes_sectors;
 
 
 
 
144		} else {
145			bio->bi_iter.bi_size = nr_sects << 9;
146			nr_sects = 0;
147		}
148		cond_resched();
149	}
150
151	*biop = bio;
152	return 0;
153}
154
155/*
156 * Convert a number of 512B sectors to a number of pages.
157 * The result is limited to a number of pages that can fit into a BIO.
158 * Also make sure that the result is always at least 1 (page) for the cases
159 * where nr_sects is lower than the number of sectors in a page.
 
 
 
 
 
160 */
161static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
 
 
162{
163	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
 
 
164
165	return min(pages, (sector_t)BIO_MAX_VECS);
 
 
 
 
 
 
 
 
166}
 
167
168static int __blkdev_issue_zero_pages(struct block_device *bdev,
 
 
 
 
 
 
 
 
 
 
 
169		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
170		struct bio **biop)
171{
172	struct bio *bio = *biop;
173	int bi_size = 0;
174	unsigned int sz;
175
176	if (bdev_read_only(bdev))
177		return -EPERM;
178
179	while (nr_sects != 0) {
180		bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
181				   REQ_OP_WRITE, gfp_mask);
 
 
 
 
 
182		bio->bi_iter.bi_sector = sector;
 
 
183
184		while (nr_sects != 0) {
185			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
186			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
187			nr_sects -= bi_size >> 9;
188			sector += bi_size >> 9;
189			if (bi_size < sz)
190				break;
191		}
192		cond_resched();
193	}
194
195	*biop = bio;
196	return 0;
197}
198
199/**
200 * __blkdev_issue_zeroout - generate number of zero filed write bios
201 * @bdev:	blockdev to issue
202 * @sector:	start sector
203 * @nr_sects:	number of sectors to write
204 * @gfp_mask:	memory allocation flags (for bio_alloc)
205 * @biop:	pointer to anchor bio
206 * @flags:	controls detailed behavior
207 *
208 * Description:
209 *  Zero-fill a block range, either using hardware offload or by explicitly
210 *  writing zeroes to the device.
211 *
212 *  If a device is using logical block provisioning, the underlying space will
213 *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
214 *
215 *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
216 *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
217 */
218int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
219		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
220		unsigned flags)
221{
222	int ret;
 
 
 
223	sector_t bs_mask;
224
225	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
226	if ((sector | nr_sects) & bs_mask)
227		return -EINVAL;
228
229	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
230			biop, flags);
231	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
232		return ret;
233
234	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
235					 biop);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236}
237EXPORT_SYMBOL(__blkdev_issue_zeroout);
238
239/**
240 * blkdev_issue_zeroout - zero-fill a block range
241 * @bdev:	blockdev to write
242 * @sector:	start sector
243 * @nr_sects:	number of sectors to write
244 * @gfp_mask:	memory allocation flags (for bio_alloc)
245 * @flags:	controls detailed behavior
246 *
247 * Description:
248 *  Zero-fill a block range, either using hardware offload or by explicitly
249 *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
250 *  valid values for %flags.
 
 
 
 
 
 
 
251 */
252int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
253		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
254{
255	int ret = 0;
256	sector_t bs_mask;
257	struct bio *bio;
258	struct blk_plug plug;
259	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
260
261	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
262	if ((sector | nr_sects) & bs_mask)
263		return -EINVAL;
 
 
 
 
 
 
264
265retry:
266	bio = NULL;
267	blk_start_plug(&plug);
268	if (try_write_zeroes) {
269		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
270						  gfp_mask, &bio, flags);
271	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
272		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
273						gfp_mask, &bio);
274	} else {
275		/* No zeroing offload support */
276		ret = -EOPNOTSUPP;
277	}
278	if (ret == 0 && bio) {
279		ret = submit_bio_wait(bio);
280		bio_put(bio);
281	}
282	blk_finish_plug(&plug);
283	if (ret && try_write_zeroes) {
284		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
285			try_write_zeroes = false;
286			goto retry;
287		}
288		if (!bdev_write_zeroes_sectors(bdev)) {
289			/*
290			 * Zeroing offload support was indicated, but the
291			 * device reported ILLEGAL REQUEST (for some devices
292			 * there is no non-destructive way to verify whether
293			 * WRITE ZEROES is actually supported).
294			 */
295			ret = -EOPNOTSUPP;
296		}
297	}
298
299	return ret;
300}
301EXPORT_SYMBOL(blkdev_issue_zeroout);
302
303int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
304		sector_t nr_sects, gfp_t gfp)
305{
306	sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
307	unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev);
308	struct bio *bio = NULL;
309	struct blk_plug plug;
310	int ret = 0;
311
312	/* make sure that "len << SECTOR_SHIFT" doesn't overflow */
313	if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
314		max_sectors = UINT_MAX >> SECTOR_SHIFT;
315	max_sectors &= ~bs_mask;
316
317	if (max_sectors == 0)
318		return -EOPNOTSUPP;
319	if ((sector | nr_sects) & bs_mask)
320		return -EINVAL;
321	if (bdev_read_only(bdev))
322		return -EPERM;
323
324	blk_start_plug(&plug);
325	for (;;) {
326		unsigned int len = min_t(sector_t, nr_sects, max_sectors);
327
328		bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
329		bio->bi_iter.bi_sector = sector;
330		bio->bi_iter.bi_size = len << SECTOR_SHIFT;
331
332		sector += len;
333		nr_sects -= len;
334		if (!nr_sects) {
335			ret = submit_bio_wait(bio);
336			bio_put(bio);
337			break;
338		}
339		cond_resched();
340	}
341	blk_finish_plug(&plug);
342
343	return ret;
344}
345EXPORT_SYMBOL(blkdev_issue_secure_erase);
v4.10.11
 
  1/*
  2 * Functions related to generic helpers functions
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <linux/scatterlist.h>
  9
 10#include "blk.h"
 11
 12static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
 13		gfp_t gfp)
 14{
 15	struct bio *new = bio_alloc(gfp, nr_pages);
 
 16
 17	if (bio) {
 18		bio_chain(bio, new);
 19		submit_bio(bio);
 20	}
 
 21
 22	return new;
 
 
 
 
 
 
 
 
 
 
 
 23}
 24
 25int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 26		sector_t nr_sects, gfp_t gfp_mask, int flags,
 27		struct bio **biop)
 28{
 29	struct request_queue *q = bdev_get_queue(bdev);
 30	struct bio *bio = *biop;
 31	unsigned int granularity;
 32	unsigned int op;
 33	int alignment;
 34	sector_t bs_mask;
 35
 36	if (!q)
 37		return -ENXIO;
 
 
 38
 39	if (flags & BLKDEV_DISCARD_SECURE) {
 40		if (flags & BLKDEV_DISCARD_ZERO)
 41			return -EOPNOTSUPP;
 42		if (!blk_queue_secure_erase(q))
 43			return -EOPNOTSUPP;
 44		op = REQ_OP_SECURE_ERASE;
 45	} else {
 46		if (!blk_queue_discard(q))
 47			return -EOPNOTSUPP;
 48		if ((flags & BLKDEV_DISCARD_ZERO) &&
 49		    !q->limits.discard_zeroes_data)
 50			return -EOPNOTSUPP;
 51		op = REQ_OP_DISCARD;
 52	}
 53
 54	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
 55	if ((sector | nr_sects) & bs_mask)
 56		return -EINVAL;
 57
 58	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 59	granularity = max(q->limits.discard_granularity >> 9, 1U);
 60	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 61
 62	while (nr_sects) {
 63		unsigned int req_sects;
 64		sector_t end_sect, tmp;
 65
 66		/* Make sure bi_size doesn't overflow */
 67		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
 68
 69		/**
 70		 * If splitting a request, and the next starting sector would be
 71		 * misaligned, stop the discard at the previous aligned sector.
 72		 */
 73		end_sect = sector + req_sects;
 74		tmp = end_sect;
 75		if (req_sects < nr_sects &&
 76		    sector_div(tmp, granularity) != alignment) {
 77			end_sect = end_sect - alignment;
 78			sector_div(end_sect, granularity);
 79			end_sect = end_sect * granularity + alignment;
 80			req_sects = end_sect - sector;
 81		}
 82
 83		bio = next_bio(bio, 0, gfp_mask);
 84		bio->bi_iter.bi_sector = sector;
 85		bio->bi_bdev = bdev;
 86		bio_set_op_attrs(bio, op, 0);
 87
 88		bio->bi_iter.bi_size = req_sects << 9;
 
 89		nr_sects -= req_sects;
 90		sector = end_sect;
 91
 92		/*
 93		 * We can loop for a long time in here, if someone does
 94		 * full device discards (like mkfs). Be nice and allow
 95		 * us to schedule out to avoid softlocking if preempt
 96		 * is disabled.
 97		 */
 98		cond_resched();
 99	}
100
101	*biop = bio;
102	return 0;
103}
104EXPORT_SYMBOL(__blkdev_issue_discard);
105
106/**
107 * blkdev_issue_discard - queue a discard
108 * @bdev:	blockdev to issue discard for
109 * @sector:	start sector
110 * @nr_sects:	number of sectors to discard
111 * @gfp_mask:	memory allocation flags (for bio_alloc)
112 * @flags:	BLKDEV_IFL_* flags to control behaviour
113 *
114 * Description:
115 *    Issue a discard request for the sectors in question.
116 */
117int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
118		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
119{
120	struct bio *bio = NULL;
121	struct blk_plug plug;
122	int ret;
123
124	blk_start_plug(&plug);
125	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
126			&bio);
127	if (!ret && bio) {
128		ret = submit_bio_wait(bio);
129		if (ret == -EOPNOTSUPP && !(flags & BLKDEV_DISCARD_ZERO))
130			ret = 0;
131		bio_put(bio);
132	}
133	blk_finish_plug(&plug);
134
135	return ret;
136}
137EXPORT_SYMBOL(blkdev_issue_discard);
138
139/**
140 * __blkdev_issue_write_same - generate number of bios with same page
141 * @bdev:	target blockdev
142 * @sector:	start sector
143 * @nr_sects:	number of sectors to write
144 * @gfp_mask:	memory allocation flags (for bio_alloc)
145 * @page:	page containing data to write
146 * @biop:	pointer to anchor bio
147 *
148 * Description:
149 *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
150 */
151static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
152		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
153		struct bio **biop)
154{
155	struct request_queue *q = bdev_get_queue(bdev);
156	unsigned int max_write_same_sectors;
157	struct bio *bio = *biop;
158	sector_t bs_mask;
159
160	if (!q)
161		return -ENXIO;
162
163	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
164	if ((sector | nr_sects) & bs_mask)
165		return -EINVAL;
166
167	if (!bdev_write_same(bdev))
168		return -EOPNOTSUPP;
169
170	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
171	max_write_same_sectors = UINT_MAX >> 9;
172
173	while (nr_sects) {
174		bio = next_bio(bio, 1, gfp_mask);
175		bio->bi_iter.bi_sector = sector;
176		bio->bi_bdev = bdev;
177		bio->bi_vcnt = 1;
178		bio->bi_io_vec->bv_page = page;
179		bio->bi_io_vec->bv_offset = 0;
180		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
181		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
182
183		if (nr_sects > max_write_same_sectors) {
184			bio->bi_iter.bi_size = max_write_same_sectors << 9;
185			nr_sects -= max_write_same_sectors;
186			sector += max_write_same_sectors;
187		} else {
188			bio->bi_iter.bi_size = nr_sects << 9;
189			nr_sects = 0;
190		}
191		cond_resched();
192	}
193
194	*biop = bio;
195	return 0;
196}
197
198/**
199 * blkdev_issue_write_same - queue a write same operation
200 * @bdev:	target blockdev
201 * @sector:	start sector
202 * @nr_sects:	number of sectors to write
203 * @gfp_mask:	memory allocation flags (for bio_alloc)
204 * @page:	page containing data
205 *
206 * Description:
207 *    Issue a write same request for the sectors in question.
208 */
209int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
210				sector_t nr_sects, gfp_t gfp_mask,
211				struct page *page)
212{
213	struct bio *bio = NULL;
214	struct blk_plug plug;
215	int ret;
216
217	blk_start_plug(&plug);
218	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
219			&bio);
220	if (ret == 0 && bio) {
221		ret = submit_bio_wait(bio);
222		bio_put(bio);
223	}
224	blk_finish_plug(&plug);
225	return ret;
226}
227EXPORT_SYMBOL(blkdev_issue_write_same);
228
229/**
230 * __blkdev_issue_write_zeroes - generate number of bios with WRITE ZEROES
231 * @bdev:	blockdev to issue
232 * @sector:	start sector
233 * @nr_sects:	number of sectors to write
234 * @gfp_mask:	memory allocation flags (for bio_alloc)
235 * @biop:	pointer to anchor bio
236 *
237 * Description:
238 *  Generate and issue number of bios(REQ_OP_WRITE_ZEROES) with zerofiled pages.
239 */
240static int __blkdev_issue_write_zeroes(struct block_device *bdev,
241		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
242		struct bio **biop)
243{
244	struct bio *bio = *biop;
245	unsigned int max_write_zeroes_sectors;
246	struct request_queue *q = bdev_get_queue(bdev);
247
248	if (!q)
249		return -ENXIO;
250
251	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
252	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
253
254	if (max_write_zeroes_sectors == 0)
255		return -EOPNOTSUPP;
256
257	while (nr_sects) {
258		bio = next_bio(bio, 0, gfp_mask);
259		bio->bi_iter.bi_sector = sector;
260		bio->bi_bdev = bdev;
261		bio_set_op_attrs(bio, REQ_OP_WRITE_ZEROES, 0);
262
263		if (nr_sects > max_write_zeroes_sectors) {
264			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
265			nr_sects -= max_write_zeroes_sectors;
266			sector += max_write_zeroes_sectors;
267		} else {
268			bio->bi_iter.bi_size = nr_sects << 9;
269			nr_sects = 0;
270		}
271		cond_resched();
272	}
273
274	*biop = bio;
275	return 0;
276}
277
278/**
279 * __blkdev_issue_zeroout - generate number of zero filed write bios
280 * @bdev:	blockdev to issue
281 * @sector:	start sector
282 * @nr_sects:	number of sectors to write
283 * @gfp_mask:	memory allocation flags (for bio_alloc)
284 * @biop:	pointer to anchor bio
285 * @discard:	discard flag
286 *
287 * Description:
288 *  Generate and issue number of bios with zerofiled pages.
 
 
 
 
 
 
 
289 */
290int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
291		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
292		bool discard)
293{
294	int ret;
295	int bi_size = 0;
296	struct bio *bio = *biop;
297	unsigned int sz;
298	sector_t bs_mask;
299
300	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
301	if ((sector | nr_sects) & bs_mask)
302		return -EINVAL;
303
304	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
305			biop);
306	if (ret == 0 || (ret && ret != -EOPNOTSUPP))
307		goto out;
308
309	ret = 0;
310	while (nr_sects != 0) {
311		bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
312				gfp_mask);
313		bio->bi_iter.bi_sector = sector;
314		bio->bi_bdev   = bdev;
315		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
316
317		while (nr_sects != 0) {
318			sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
319			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
320			nr_sects -= bi_size >> 9;
321			sector += bi_size >> 9;
322			if (bi_size < (sz << 9))
323				break;
324		}
325		cond_resched();
326	}
327
328	*biop = bio;
329out:
330	return ret;
331}
332EXPORT_SYMBOL(__blkdev_issue_zeroout);
333
334/**
335 * blkdev_issue_zeroout - zero-fill a block range
336 * @bdev:	blockdev to write
337 * @sector:	start sector
338 * @nr_sects:	number of sectors to write
339 * @gfp_mask:	memory allocation flags (for bio_alloc)
340 * @discard:	whether to discard the block range
341 *
342 * Description:
343 *  Zero-fill a block range.  If the discard flag is set and the block
344 *  device guarantees that subsequent READ operations to the block range
345 *  in question will return zeroes, the blocks will be discarded. Should
346 *  the discard request fail, if the discard flag is not set, or if
347 *  discard_zeroes_data is not supported, this function will resort to
348 *  zeroing the blocks manually, thus provisioning (allocating,
349 *  anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME
350 *  command(s), blkdev_issue_zeroout() will use it to optimize the process of
351 *  clearing the block range. Otherwise the zeroing will be performed
352 *  using regular WRITE calls.
353 */
354int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
355			 sector_t nr_sects, gfp_t gfp_mask, bool discard)
356{
357	int ret;
358	struct bio *bio = NULL;
 
359	struct blk_plug plug;
 
360
361	if (discard) {
362		if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
363				BLKDEV_DISCARD_ZERO))
364			return 0;
365	}
366
367	if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
368			ZERO_PAGE(0)))
369		return 0;
370
 
 
371	blk_start_plug(&plug);
372	ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
373			&bio, discard);
 
 
 
 
 
 
 
 
374	if (ret == 0 && bio) {
375		ret = submit_bio_wait(bio);
376		bio_put(bio);
377	}
378	blk_finish_plug(&plug);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
380	return ret;
381}
382EXPORT_SYMBOL(blkdev_issue_zeroout);