Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to generic helpers functions
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/scatterlist.h>
 10
 11#include "blk.h"
 12
 13static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
 14		gfp_t gfp)
 15{
 16	struct bio *new = bio_alloc(gfp, nr_pages);
 17
 18	if (bio) {
 19		bio_chain(bio, new);
 20		submit_bio(bio);
 21	}
 22
 23	return new;
 24}
 25
 26int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 27		sector_t nr_sects, gfp_t gfp_mask, int flags,
 28		struct bio **biop)
 29{
 30	struct request_queue *q = bdev_get_queue(bdev);
 31	struct bio *bio = *biop;
 32	unsigned int granularity;
 33	unsigned int op;
 34	int alignment;
 35	sector_t bs_mask;
 36
 37	if (!q)
 38		return -ENXIO;
 39
 40	if (bdev_read_only(bdev))
 41		return -EPERM;
 42
 43	if (flags & BLKDEV_DISCARD_SECURE) {
 44		if (!blk_queue_secure_erase(q))
 45			return -EOPNOTSUPP;
 46		op = REQ_OP_SECURE_ERASE;
 47	} else {
 48		if (!blk_queue_discard(q))
 49			return -EOPNOTSUPP;
 50		op = REQ_OP_DISCARD;
 51	}
 52
 
 
 
 
 
 
 
 
 
 53	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
 54	if ((sector | nr_sects) & bs_mask)
 55		return -EINVAL;
 56
 57	/* Zero-sector (unknown) and one-sector granularities are the same.  */
 58	granularity = max(q->limits.discard_granularity >> 9, 1U);
 59	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
 
 
 
 60
 61	while (nr_sects) {
 62		unsigned int req_sects;
 63		sector_t end_sect, tmp;
 64
 65		/* Make sure bi_size doesn't overflow */
 66		req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
 67
 68		/**
 69		 * If splitting a request, and the next starting sector would be
 70		 * misaligned, stop the discard at the previous aligned sector.
 
 
 
 
 
 
 
 71		 */
 72		end_sect = sector + req_sects;
 73		tmp = end_sect;
 74		if (req_sects < nr_sects &&
 75		    sector_div(tmp, granularity) != alignment) {
 76			end_sect = end_sect - alignment;
 77			sector_div(end_sect, granularity);
 78			end_sect = end_sect * granularity + alignment;
 79			req_sects = end_sect - sector;
 80		}
 81
 82		bio = next_bio(bio, 0, gfp_mask);
 83		bio->bi_iter.bi_sector = sector;
 84		bio_set_dev(bio, bdev);
 85		bio_set_op_attrs(bio, op, 0);
 86
 87		bio->bi_iter.bi_size = req_sects << 9;
 
 88		nr_sects -= req_sects;
 89		sector = end_sect;
 90
 91		/*
 92		 * We can loop for a long time in here, if someone does
 93		 * full device discards (like mkfs). Be nice and allow
 94		 * us to schedule out to avoid softlocking if preempt
 95		 * is disabled.
 96		 */
 97		cond_resched();
 98	}
 99
100	*biop = bio;
101	return 0;
102}
103EXPORT_SYMBOL(__blkdev_issue_discard);
104
105/**
106 * blkdev_issue_discard - queue a discard
107 * @bdev:	blockdev to issue discard for
108 * @sector:	start sector
109 * @nr_sects:	number of sectors to discard
110 * @gfp_mask:	memory allocation flags (for bio_alloc)
111 * @flags:	BLKDEV_DISCARD_* flags to control behaviour
112 *
113 * Description:
114 *    Issue a discard request for the sectors in question.
115 */
116int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
118{
119	struct bio *bio = NULL;
120	struct blk_plug plug;
121	int ret;
122
123	blk_start_plug(&plug);
124	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
125			&bio);
126	if (!ret && bio) {
127		ret = submit_bio_wait(bio);
128		if (ret == -EOPNOTSUPP)
129			ret = 0;
130		bio_put(bio);
131	}
132	blk_finish_plug(&plug);
133
134	return ret;
135}
136EXPORT_SYMBOL(blkdev_issue_discard);
137
138/**
139 * __blkdev_issue_write_same - generate number of bios with same page
140 * @bdev:	target blockdev
141 * @sector:	start sector
142 * @nr_sects:	number of sectors to write
143 * @gfp_mask:	memory allocation flags (for bio_alloc)
144 * @page:	page containing data to write
145 * @biop:	pointer to anchor bio
146 *
147 * Description:
148 *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
149 */
150static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
151		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
152		struct bio **biop)
153{
154	struct request_queue *q = bdev_get_queue(bdev);
155	unsigned int max_write_same_sectors;
156	struct bio *bio = *biop;
157	sector_t bs_mask;
158
159	if (!q)
160		return -ENXIO;
161
162	if (bdev_read_only(bdev))
163		return -EPERM;
164
165	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
166	if ((sector | nr_sects) & bs_mask)
167		return -EINVAL;
168
169	if (!bdev_write_same(bdev))
170		return -EOPNOTSUPP;
171
172	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
173	max_write_same_sectors = UINT_MAX >> 9;
174
175	while (nr_sects) {
176		bio = next_bio(bio, 1, gfp_mask);
177		bio->bi_iter.bi_sector = sector;
178		bio_set_dev(bio, bdev);
179		bio->bi_vcnt = 1;
180		bio->bi_io_vec->bv_page = page;
181		bio->bi_io_vec->bv_offset = 0;
182		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
183		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
184
185		if (nr_sects > max_write_same_sectors) {
186			bio->bi_iter.bi_size = max_write_same_sectors << 9;
187			nr_sects -= max_write_same_sectors;
188			sector += max_write_same_sectors;
189		} else {
190			bio->bi_iter.bi_size = nr_sects << 9;
191			nr_sects = 0;
192		}
193		cond_resched();
194	}
195
196	*biop = bio;
197	return 0;
198}
199
200/**
201 * blkdev_issue_write_same - queue a write same operation
202 * @bdev:	target blockdev
203 * @sector:	start sector
204 * @nr_sects:	number of sectors to write
205 * @gfp_mask:	memory allocation flags (for bio_alloc)
206 * @page:	page containing data
207 *
208 * Description:
209 *    Issue a write same request for the sectors in question.
210 */
211int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
212				sector_t nr_sects, gfp_t gfp_mask,
213				struct page *page)
214{
215	struct bio *bio = NULL;
216	struct blk_plug plug;
217	int ret;
218
219	blk_start_plug(&plug);
220	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
221			&bio);
222	if (ret == 0 && bio) {
223		ret = submit_bio_wait(bio);
224		bio_put(bio);
225	}
226	blk_finish_plug(&plug);
227	return ret;
228}
229EXPORT_SYMBOL(blkdev_issue_write_same);
230
231static int __blkdev_issue_write_zeroes(struct block_device *bdev,
232		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
233		struct bio **biop, unsigned flags)
234{
235	struct bio *bio = *biop;
236	unsigned int max_write_zeroes_sectors;
237	struct request_queue *q = bdev_get_queue(bdev);
238
239	if (!q)
240		return -ENXIO;
241
242	if (bdev_read_only(bdev))
243		return -EPERM;
244
245	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
246	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
247
248	if (max_write_zeroes_sectors == 0)
249		return -EOPNOTSUPP;
250
251	while (nr_sects) {
252		bio = next_bio(bio, 0, gfp_mask);
253		bio->bi_iter.bi_sector = sector;
254		bio_set_dev(bio, bdev);
255		bio->bi_opf = REQ_OP_WRITE_ZEROES;
256		if (flags & BLKDEV_ZERO_NOUNMAP)
257			bio->bi_opf |= REQ_NOUNMAP;
258
259		if (nr_sects > max_write_zeroes_sectors) {
260			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
261			nr_sects -= max_write_zeroes_sectors;
262			sector += max_write_zeroes_sectors;
263		} else {
264			bio->bi_iter.bi_size = nr_sects << 9;
265			nr_sects = 0;
266		}
267		cond_resched();
268	}
269
270	*biop = bio;
271	return 0;
272}
273
274/*
275 * Convert a number of 512B sectors to a number of pages.
276 * The result is limited to a number of pages that can fit into a BIO.
277 * Also make sure that the result is always at least 1 (page) for the cases
278 * where nr_sects is lower than the number of sectors in a page.
279 */
280static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
281{
282	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
283
284	return min(pages, (sector_t)BIO_MAX_PAGES);
285}
286
287static int __blkdev_issue_zero_pages(struct block_device *bdev,
288		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
289		struct bio **biop)
290{
291	struct request_queue *q = bdev_get_queue(bdev);
292	struct bio *bio = *biop;
293	int bi_size = 0;
294	unsigned int sz;
295
296	if (!q)
297		return -ENXIO;
298
299	if (bdev_read_only(bdev))
300		return -EPERM;
301
302	while (nr_sects != 0) {
303		bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
304			       gfp_mask);
305		bio->bi_iter.bi_sector = sector;
306		bio_set_dev(bio, bdev);
307		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
308
309		while (nr_sects != 0) {
310			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
311			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
312			nr_sects -= bi_size >> 9;
313			sector += bi_size >> 9;
314			if (bi_size < sz)
315				break;
316		}
317		cond_resched();
318	}
319
320	*biop = bio;
321	return 0;
322}
323
324/**
325 * __blkdev_issue_zeroout - generate number of zero filed write bios
326 * @bdev:	blockdev to issue
327 * @sector:	start sector
328 * @nr_sects:	number of sectors to write
329 * @gfp_mask:	memory allocation flags (for bio_alloc)
330 * @biop:	pointer to anchor bio
331 * @flags:	controls detailed behavior
332 *
333 * Description:
334 *  Zero-fill a block range, either using hardware offload or by explicitly
335 *  writing zeroes to the device.
336 *
337 *  If a device is using logical block provisioning, the underlying space will
338 *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
339 *
340 *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
341 *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
342 */
343int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
344		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
345		unsigned flags)
346{
347	int ret;
348	sector_t bs_mask;
349
350	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
351	if ((sector | nr_sects) & bs_mask)
352		return -EINVAL;
353
354	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
355			biop, flags);
356	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
357		return ret;
358
359	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
360					 biop);
361}
362EXPORT_SYMBOL(__blkdev_issue_zeroout);
363
364/**
365 * blkdev_issue_zeroout - zero-fill a block range
366 * @bdev:	blockdev to write
367 * @sector:	start sector
368 * @nr_sects:	number of sectors to write
369 * @gfp_mask:	memory allocation flags (for bio_alloc)
370 * @flags:	controls detailed behavior
371 *
372 * Description:
373 *  Zero-fill a block range, either using hardware offload or by explicitly
374 *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
375 *  valid values for %flags.
376 */
377int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
378		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
379{
380	int ret = 0;
381	sector_t bs_mask;
382	struct bio *bio;
383	struct blk_plug plug;
384	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
385
386	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
387	if ((sector | nr_sects) & bs_mask)
388		return -EINVAL;
389
390retry:
391	bio = NULL;
392	blk_start_plug(&plug);
393	if (try_write_zeroes) {
394		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
395						  gfp_mask, &bio, flags);
396	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
397		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
398						gfp_mask, &bio);
399	} else {
400		/* No zeroing offload support */
401		ret = -EOPNOTSUPP;
402	}
403	if (ret == 0 && bio) {
404		ret = submit_bio_wait(bio);
405		bio_put(bio);
406	}
407	blk_finish_plug(&plug);
408	if (ret && try_write_zeroes) {
409		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
410			try_write_zeroes = false;
411			goto retry;
412		}
413		if (!bdev_write_zeroes_sectors(bdev)) {
414			/*
415			 * Zeroing offload support was indicated, but the
416			 * device reported ILLEGAL REQUEST (for some devices
417			 * there is no non-destructive way to verify whether
418			 * WRITE ZEROES is actually supported).
419			 */
420			ret = -EOPNOTSUPP;
421		}
422	}
423
424	return ret;
425}
426EXPORT_SYMBOL(blkdev_issue_zeroout);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to generic helpers functions
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/scatterlist.h>
 10
 11#include "blk.h"
 12
 13struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
 
 14{
 15	struct bio *new = bio_alloc(gfp, nr_pages);
 16
 17	if (bio) {
 18		bio_chain(bio, new);
 19		submit_bio(bio);
 20	}
 21
 22	return new;
 23}
 24
 25int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 26		sector_t nr_sects, gfp_t gfp_mask, int flags,
 27		struct bio **biop)
 28{
 29	struct request_queue *q = bdev_get_queue(bdev);
 30	struct bio *bio = *biop;
 
 31	unsigned int op;
 32	sector_t bs_mask, part_offset = 0;
 
 33
 34	if (!q)
 35		return -ENXIO;
 36
 37	if (bdev_read_only(bdev))
 38		return -EPERM;
 39
 40	if (flags & BLKDEV_DISCARD_SECURE) {
 41		if (!blk_queue_secure_erase(q))
 42			return -EOPNOTSUPP;
 43		op = REQ_OP_SECURE_ERASE;
 44	} else {
 45		if (!blk_queue_discard(q))
 46			return -EOPNOTSUPP;
 47		op = REQ_OP_DISCARD;
 48	}
 49
 50	/* In case the discard granularity isn't set by buggy device driver */
 51	if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
 52		char dev_name[BDEVNAME_SIZE];
 53
 54		bdevname(bdev, dev_name);
 55		pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
 56		return -EOPNOTSUPP;
 57	}
 58
 59	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
 60	if ((sector | nr_sects) & bs_mask)
 61		return -EINVAL;
 62
 63	if (!nr_sects)
 64		return -EINVAL;
 65
 66	/* In case the discard request is in a partition */
 67	if (bdev->bd_partno)
 68		part_offset = bdev->bd_part->start_sect;
 69
 70	while (nr_sects) {
 71		sector_t granularity_aligned_lba, req_sects;
 72		sector_t sector_mapped = sector + part_offset;
 73
 74		granularity_aligned_lba = round_up(sector_mapped,
 75				q->limits.discard_granularity >> SECTOR_SHIFT);
 76
 77		/*
 78		 * Check whether the discard bio starts at a discard_granularity
 79		 * aligned LBA,
 80		 * - If no: set (granularity_aligned_lba - sector_mapped) to
 81		 *   bi_size of the first split bio, then the second bio will
 82		 *   start at a discard_granularity aligned LBA on the device.
 83		 * - If yes: use bio_aligned_discard_max_sectors() as the max
 84		 *   possible bi_size of the first split bio. Then when this bio
 85		 *   is split in device drive, the split ones are very probably
 86		 *   to be aligned to discard_granularity of the device's queue.
 87		 */
 88		if (granularity_aligned_lba == sector_mapped)
 89			req_sects = min_t(sector_t, nr_sects,
 90					  bio_aligned_discard_max_sectors(q));
 91		else
 92			req_sects = min_t(sector_t, nr_sects,
 93					  granularity_aligned_lba - sector_mapped);
 94
 95		WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
 
 96
 97		bio = blk_next_bio(bio, 0, gfp_mask);
 98		bio->bi_iter.bi_sector = sector;
 99		bio_set_dev(bio, bdev);
100		bio_set_op_attrs(bio, op, 0);
101
102		bio->bi_iter.bi_size = req_sects << 9;
103		sector += req_sects;
104		nr_sects -= req_sects;
 
105
106		/*
107		 * We can loop for a long time in here, if someone does
108		 * full device discards (like mkfs). Be nice and allow
109		 * us to schedule out to avoid softlocking if preempt
110		 * is disabled.
111		 */
112		cond_resched();
113	}
114
115	*biop = bio;
116	return 0;
117}
118EXPORT_SYMBOL(__blkdev_issue_discard);
119
120/**
121 * blkdev_issue_discard - queue a discard
122 * @bdev:	blockdev to issue discard for
123 * @sector:	start sector
124 * @nr_sects:	number of sectors to discard
125 * @gfp_mask:	memory allocation flags (for bio_alloc)
126 * @flags:	BLKDEV_DISCARD_* flags to control behaviour
127 *
128 * Description:
129 *    Issue a discard request for the sectors in question.
130 */
131int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
132		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
133{
134	struct bio *bio = NULL;
135	struct blk_plug plug;
136	int ret;
137
138	blk_start_plug(&plug);
139	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
140			&bio);
141	if (!ret && bio) {
142		ret = submit_bio_wait(bio);
143		if (ret == -EOPNOTSUPP)
144			ret = 0;
145		bio_put(bio);
146	}
147	blk_finish_plug(&plug);
148
149	return ret;
150}
151EXPORT_SYMBOL(blkdev_issue_discard);
152
153/**
154 * __blkdev_issue_write_same - generate number of bios with same page
155 * @bdev:	target blockdev
156 * @sector:	start sector
157 * @nr_sects:	number of sectors to write
158 * @gfp_mask:	memory allocation flags (for bio_alloc)
159 * @page:	page containing data to write
160 * @biop:	pointer to anchor bio
161 *
162 * Description:
163 *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
164 */
165static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
166		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
167		struct bio **biop)
168{
169	struct request_queue *q = bdev_get_queue(bdev);
170	unsigned int max_write_same_sectors;
171	struct bio *bio = *biop;
172	sector_t bs_mask;
173
174	if (!q)
175		return -ENXIO;
176
177	if (bdev_read_only(bdev))
178		return -EPERM;
179
180	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
181	if ((sector | nr_sects) & bs_mask)
182		return -EINVAL;
183
184	if (!bdev_write_same(bdev))
185		return -EOPNOTSUPP;
186
187	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
188	max_write_same_sectors = bio_allowed_max_sectors(q);
189
190	while (nr_sects) {
191		bio = blk_next_bio(bio, 1, gfp_mask);
192		bio->bi_iter.bi_sector = sector;
193		bio_set_dev(bio, bdev);
194		bio->bi_vcnt = 1;
195		bio->bi_io_vec->bv_page = page;
196		bio->bi_io_vec->bv_offset = 0;
197		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
198		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
199
200		if (nr_sects > max_write_same_sectors) {
201			bio->bi_iter.bi_size = max_write_same_sectors << 9;
202			nr_sects -= max_write_same_sectors;
203			sector += max_write_same_sectors;
204		} else {
205			bio->bi_iter.bi_size = nr_sects << 9;
206			nr_sects = 0;
207		}
208		cond_resched();
209	}
210
211	*biop = bio;
212	return 0;
213}
214
215/**
216 * blkdev_issue_write_same - queue a write same operation
217 * @bdev:	target blockdev
218 * @sector:	start sector
219 * @nr_sects:	number of sectors to write
220 * @gfp_mask:	memory allocation flags (for bio_alloc)
221 * @page:	page containing data
222 *
223 * Description:
224 *    Issue a write same request for the sectors in question.
225 */
226int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
227				sector_t nr_sects, gfp_t gfp_mask,
228				struct page *page)
229{
230	struct bio *bio = NULL;
231	struct blk_plug plug;
232	int ret;
233
234	blk_start_plug(&plug);
235	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
236			&bio);
237	if (ret == 0 && bio) {
238		ret = submit_bio_wait(bio);
239		bio_put(bio);
240	}
241	blk_finish_plug(&plug);
242	return ret;
243}
244EXPORT_SYMBOL(blkdev_issue_write_same);
245
246static int __blkdev_issue_write_zeroes(struct block_device *bdev,
247		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
248		struct bio **biop, unsigned flags)
249{
250	struct bio *bio = *biop;
251	unsigned int max_write_zeroes_sectors;
252	struct request_queue *q = bdev_get_queue(bdev);
253
254	if (!q)
255		return -ENXIO;
256
257	if (bdev_read_only(bdev))
258		return -EPERM;
259
260	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
261	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
262
263	if (max_write_zeroes_sectors == 0)
264		return -EOPNOTSUPP;
265
266	while (nr_sects) {
267		bio = blk_next_bio(bio, 0, gfp_mask);
268		bio->bi_iter.bi_sector = sector;
269		bio_set_dev(bio, bdev);
270		bio->bi_opf = REQ_OP_WRITE_ZEROES;
271		if (flags & BLKDEV_ZERO_NOUNMAP)
272			bio->bi_opf |= REQ_NOUNMAP;
273
274		if (nr_sects > max_write_zeroes_sectors) {
275			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
276			nr_sects -= max_write_zeroes_sectors;
277			sector += max_write_zeroes_sectors;
278		} else {
279			bio->bi_iter.bi_size = nr_sects << 9;
280			nr_sects = 0;
281		}
282		cond_resched();
283	}
284
285	*biop = bio;
286	return 0;
287}
288
289/*
290 * Convert a number of 512B sectors to a number of pages.
291 * The result is limited to a number of pages that can fit into a BIO.
292 * Also make sure that the result is always at least 1 (page) for the cases
293 * where nr_sects is lower than the number of sectors in a page.
294 */
295static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
296{
297	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
298
299	return min(pages, (sector_t)BIO_MAX_PAGES);
300}
301
302static int __blkdev_issue_zero_pages(struct block_device *bdev,
303		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
304		struct bio **biop)
305{
306	struct request_queue *q = bdev_get_queue(bdev);
307	struct bio *bio = *biop;
308	int bi_size = 0;
309	unsigned int sz;
310
311	if (!q)
312		return -ENXIO;
313
314	if (bdev_read_only(bdev))
315		return -EPERM;
316
317	while (nr_sects != 0) {
318		bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
319				   gfp_mask);
320		bio->bi_iter.bi_sector = sector;
321		bio_set_dev(bio, bdev);
322		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
323
324		while (nr_sects != 0) {
325			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
326			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
327			nr_sects -= bi_size >> 9;
328			sector += bi_size >> 9;
329			if (bi_size < sz)
330				break;
331		}
332		cond_resched();
333	}
334
335	*biop = bio;
336	return 0;
337}
338
339/**
340 * __blkdev_issue_zeroout - generate number of zero filed write bios
341 * @bdev:	blockdev to issue
342 * @sector:	start sector
343 * @nr_sects:	number of sectors to write
344 * @gfp_mask:	memory allocation flags (for bio_alloc)
345 * @biop:	pointer to anchor bio
346 * @flags:	controls detailed behavior
347 *
348 * Description:
349 *  Zero-fill a block range, either using hardware offload or by explicitly
350 *  writing zeroes to the device.
351 *
352 *  If a device is using logical block provisioning, the underlying space will
353 *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
354 *
355 *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
356 *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
357 */
358int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
359		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
360		unsigned flags)
361{
362	int ret;
363	sector_t bs_mask;
364
365	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
366	if ((sector | nr_sects) & bs_mask)
367		return -EINVAL;
368
369	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
370			biop, flags);
371	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
372		return ret;
373
374	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
375					 biop);
376}
377EXPORT_SYMBOL(__blkdev_issue_zeroout);
378
379/**
380 * blkdev_issue_zeroout - zero-fill a block range
381 * @bdev:	blockdev to write
382 * @sector:	start sector
383 * @nr_sects:	number of sectors to write
384 * @gfp_mask:	memory allocation flags (for bio_alloc)
385 * @flags:	controls detailed behavior
386 *
387 * Description:
388 *  Zero-fill a block range, either using hardware offload or by explicitly
389 *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
390 *  valid values for %flags.
391 */
392int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
393		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
394{
395	int ret = 0;
396	sector_t bs_mask;
397	struct bio *bio;
398	struct blk_plug plug;
399	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
400
401	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
402	if ((sector | nr_sects) & bs_mask)
403		return -EINVAL;
404
405retry:
406	bio = NULL;
407	blk_start_plug(&plug);
408	if (try_write_zeroes) {
409		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
410						  gfp_mask, &bio, flags);
411	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
412		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
413						gfp_mask, &bio);
414	} else {
415		/* No zeroing offload support */
416		ret = -EOPNOTSUPP;
417	}
418	if (ret == 0 && bio) {
419		ret = submit_bio_wait(bio);
420		bio_put(bio);
421	}
422	blk_finish_plug(&plug);
423	if (ret && try_write_zeroes) {
424		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
425			try_write_zeroes = false;
426			goto retry;
427		}
428		if (!bdev_write_zeroes_sectors(bdev)) {
429			/*
430			 * Zeroing offload support was indicated, but the
431			 * device reported ILLEGAL REQUEST (for some devices
432			 * there is no non-destructive way to verify whether
433			 * WRITE ZEROES is actually supported).
434			 */
435			ret = -EOPNOTSUPP;
436		}
437	}
438
439	return ret;
440}
441EXPORT_SYMBOL(blkdev_issue_zeroout);