Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
13struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
14{
15 struct bio *new = bio_alloc(gfp, nr_pages);
16
17 if (bio) {
18 bio_chain(bio, new);
19 submit_bio(bio);
20 }
21
22 return new;
23}
24
25int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 sector_t nr_sects, gfp_t gfp_mask, int flags,
27 struct bio **biop)
28{
29 struct request_queue *q = bdev_get_queue(bdev);
30 struct bio *bio = *biop;
31 unsigned int op;
32 sector_t bs_mask;
33
34 if (!q)
35 return -ENXIO;
36
37 if (bdev_read_only(bdev))
38 return -EPERM;
39
40 if (flags & BLKDEV_DISCARD_SECURE) {
41 if (!blk_queue_secure_erase(q))
42 return -EOPNOTSUPP;
43 op = REQ_OP_SECURE_ERASE;
44 } else {
45 if (!blk_queue_discard(q))
46 return -EOPNOTSUPP;
47 op = REQ_OP_DISCARD;
48 }
49
50 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 if ((sector | nr_sects) & bs_mask)
52 return -EINVAL;
53
54 if (!nr_sects)
55 return -EINVAL;
56
57 while (nr_sects) {
58 sector_t req_sects = min_t(sector_t, nr_sects,
59 bio_allowed_max_sectors(q));
60
61 WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
62
63 bio = blk_next_bio(bio, 0, gfp_mask);
64 bio->bi_iter.bi_sector = sector;
65 bio_set_dev(bio, bdev);
66 bio_set_op_attrs(bio, op, 0);
67
68 bio->bi_iter.bi_size = req_sects << 9;
69 sector += req_sects;
70 nr_sects -= req_sects;
71
72 /*
73 * We can loop for a long time in here, if someone does
74 * full device discards (like mkfs). Be nice and allow
75 * us to schedule out to avoid softlocking if preempt
76 * is disabled.
77 */
78 cond_resched();
79 }
80
81 *biop = bio;
82 return 0;
83}
84EXPORT_SYMBOL(__blkdev_issue_discard);
85
86/**
87 * blkdev_issue_discard - queue a discard
88 * @bdev: blockdev to issue discard for
89 * @sector: start sector
90 * @nr_sects: number of sectors to discard
91 * @gfp_mask: memory allocation flags (for bio_alloc)
92 * @flags: BLKDEV_DISCARD_* flags to control behaviour
93 *
94 * Description:
95 * Issue a discard request for the sectors in question.
96 */
97int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
98 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
99{
100 struct bio *bio = NULL;
101 struct blk_plug plug;
102 int ret;
103
104 blk_start_plug(&plug);
105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
106 &bio);
107 if (!ret && bio) {
108 ret = submit_bio_wait(bio);
109 if (ret == -EOPNOTSUPP)
110 ret = 0;
111 bio_put(bio);
112 }
113 blk_finish_plug(&plug);
114
115 return ret;
116}
117EXPORT_SYMBOL(blkdev_issue_discard);
118
119/**
120 * __blkdev_issue_write_same - generate number of bios with same page
121 * @bdev: target blockdev
122 * @sector: start sector
123 * @nr_sects: number of sectors to write
124 * @gfp_mask: memory allocation flags (for bio_alloc)
125 * @page: page containing data to write
126 * @biop: pointer to anchor bio
127 *
128 * Description:
129 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
130 */
131static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
132 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
133 struct bio **biop)
134{
135 struct request_queue *q = bdev_get_queue(bdev);
136 unsigned int max_write_same_sectors;
137 struct bio *bio = *biop;
138 sector_t bs_mask;
139
140 if (!q)
141 return -ENXIO;
142
143 if (bdev_read_only(bdev))
144 return -EPERM;
145
146 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
147 if ((sector | nr_sects) & bs_mask)
148 return -EINVAL;
149
150 if (!bdev_write_same(bdev))
151 return -EOPNOTSUPP;
152
153 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
154 max_write_same_sectors = bio_allowed_max_sectors(q);
155
156 while (nr_sects) {
157 bio = blk_next_bio(bio, 1, gfp_mask);
158 bio->bi_iter.bi_sector = sector;
159 bio_set_dev(bio, bdev);
160 bio->bi_vcnt = 1;
161 bio->bi_io_vec->bv_page = page;
162 bio->bi_io_vec->bv_offset = 0;
163 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
164 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
165
166 if (nr_sects > max_write_same_sectors) {
167 bio->bi_iter.bi_size = max_write_same_sectors << 9;
168 nr_sects -= max_write_same_sectors;
169 sector += max_write_same_sectors;
170 } else {
171 bio->bi_iter.bi_size = nr_sects << 9;
172 nr_sects = 0;
173 }
174 cond_resched();
175 }
176
177 *biop = bio;
178 return 0;
179}
180
181/**
182 * blkdev_issue_write_same - queue a write same operation
183 * @bdev: target blockdev
184 * @sector: start sector
185 * @nr_sects: number of sectors to write
186 * @gfp_mask: memory allocation flags (for bio_alloc)
187 * @page: page containing data
188 *
189 * Description:
190 * Issue a write same request for the sectors in question.
191 */
192int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
193 sector_t nr_sects, gfp_t gfp_mask,
194 struct page *page)
195{
196 struct bio *bio = NULL;
197 struct blk_plug plug;
198 int ret;
199
200 blk_start_plug(&plug);
201 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
202 &bio);
203 if (ret == 0 && bio) {
204 ret = submit_bio_wait(bio);
205 bio_put(bio);
206 }
207 blk_finish_plug(&plug);
208 return ret;
209}
210EXPORT_SYMBOL(blkdev_issue_write_same);
211
212static int __blkdev_issue_write_zeroes(struct block_device *bdev,
213 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
214 struct bio **biop, unsigned flags)
215{
216 struct bio *bio = *biop;
217 unsigned int max_write_zeroes_sectors;
218 struct request_queue *q = bdev_get_queue(bdev);
219
220 if (!q)
221 return -ENXIO;
222
223 if (bdev_read_only(bdev))
224 return -EPERM;
225
226 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
227 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
228
229 if (max_write_zeroes_sectors == 0)
230 return -EOPNOTSUPP;
231
232 while (nr_sects) {
233 bio = blk_next_bio(bio, 0, gfp_mask);
234 bio->bi_iter.bi_sector = sector;
235 bio_set_dev(bio, bdev);
236 bio->bi_opf = REQ_OP_WRITE_ZEROES;
237 if (flags & BLKDEV_ZERO_NOUNMAP)
238 bio->bi_opf |= REQ_NOUNMAP;
239
240 if (nr_sects > max_write_zeroes_sectors) {
241 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
242 nr_sects -= max_write_zeroes_sectors;
243 sector += max_write_zeroes_sectors;
244 } else {
245 bio->bi_iter.bi_size = nr_sects << 9;
246 nr_sects = 0;
247 }
248 cond_resched();
249 }
250
251 *biop = bio;
252 return 0;
253}
254
255/*
256 * Convert a number of 512B sectors to a number of pages.
257 * The result is limited to a number of pages that can fit into a BIO.
258 * Also make sure that the result is always at least 1 (page) for the cases
259 * where nr_sects is lower than the number of sectors in a page.
260 */
261static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
262{
263 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
264
265 return min(pages, (sector_t)BIO_MAX_PAGES);
266}
267
268static int __blkdev_issue_zero_pages(struct block_device *bdev,
269 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
270 struct bio **biop)
271{
272 struct request_queue *q = bdev_get_queue(bdev);
273 struct bio *bio = *biop;
274 int bi_size = 0;
275 unsigned int sz;
276
277 if (!q)
278 return -ENXIO;
279
280 if (bdev_read_only(bdev))
281 return -EPERM;
282
283 while (nr_sects != 0) {
284 bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
285 gfp_mask);
286 bio->bi_iter.bi_sector = sector;
287 bio_set_dev(bio, bdev);
288 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
289
290 while (nr_sects != 0) {
291 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
292 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
293 nr_sects -= bi_size >> 9;
294 sector += bi_size >> 9;
295 if (bi_size < sz)
296 break;
297 }
298 cond_resched();
299 }
300
301 *biop = bio;
302 return 0;
303}
304
305/**
306 * __blkdev_issue_zeroout - generate number of zero filed write bios
307 * @bdev: blockdev to issue
308 * @sector: start sector
309 * @nr_sects: number of sectors to write
310 * @gfp_mask: memory allocation flags (for bio_alloc)
311 * @biop: pointer to anchor bio
312 * @flags: controls detailed behavior
313 *
314 * Description:
315 * Zero-fill a block range, either using hardware offload or by explicitly
316 * writing zeroes to the device.
317 *
318 * If a device is using logical block provisioning, the underlying space will
319 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
320 *
321 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
322 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
323 */
324int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
325 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
326 unsigned flags)
327{
328 int ret;
329 sector_t bs_mask;
330
331 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
332 if ((sector | nr_sects) & bs_mask)
333 return -EINVAL;
334
335 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
336 biop, flags);
337 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
338 return ret;
339
340 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
341 biop);
342}
343EXPORT_SYMBOL(__blkdev_issue_zeroout);
344
345/**
346 * blkdev_issue_zeroout - zero-fill a block range
347 * @bdev: blockdev to write
348 * @sector: start sector
349 * @nr_sects: number of sectors to write
350 * @gfp_mask: memory allocation flags (for bio_alloc)
351 * @flags: controls detailed behavior
352 *
353 * Description:
354 * Zero-fill a block range, either using hardware offload or by explicitly
355 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
356 * valid values for %flags.
357 */
358int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
359 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
360{
361 int ret = 0;
362 sector_t bs_mask;
363 struct bio *bio;
364 struct blk_plug plug;
365 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
366
367 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
368 if ((sector | nr_sects) & bs_mask)
369 return -EINVAL;
370
371retry:
372 bio = NULL;
373 blk_start_plug(&plug);
374 if (try_write_zeroes) {
375 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
376 gfp_mask, &bio, flags);
377 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
378 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
379 gfp_mask, &bio);
380 } else {
381 /* No zeroing offload support */
382 ret = -EOPNOTSUPP;
383 }
384 if (ret == 0 && bio) {
385 ret = submit_bio_wait(bio);
386 bio_put(bio);
387 }
388 blk_finish_plug(&plug);
389 if (ret && try_write_zeroes) {
390 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
391 try_write_zeroes = false;
392 goto retry;
393 }
394 if (!bdev_write_zeroes_sectors(bdev)) {
395 /*
396 * Zeroing offload support was indicated, but the
397 * device reported ILLEGAL REQUEST (for some devices
398 * there is no non-destructive way to verify whether
399 * WRITE ZEROES is actually supported).
400 */
401 ret = -EOPNOTSUPP;
402 }
403 }
404
405 return ret;
406}
407EXPORT_SYMBOL(blkdev_issue_zeroout);
1/*
2 * Functions related to generic helpers functions
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/scatterlist.h>
9
10#include "blk.h"
11
12struct bio_batch {
13 atomic_t done;
14 int error;
15 struct completion *wait;
16};
17
18static void bio_batch_end_io(struct bio *bio)
19{
20 struct bio_batch *bb = bio->bi_private;
21
22 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP)
23 bb->error = bio->bi_error;
24 if (atomic_dec_and_test(&bb->done))
25 complete(bb->wait);
26 bio_put(bio);
27}
28
29/**
30 * blkdev_issue_discard - queue a discard
31 * @bdev: blockdev to issue discard for
32 * @sector: start sector
33 * @nr_sects: number of sectors to discard
34 * @gfp_mask: memory allocation flags (for bio_alloc)
35 * @flags: BLKDEV_IFL_* flags to control behaviour
36 *
37 * Description:
38 * Issue a discard request for the sectors in question.
39 */
40int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
41 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
42{
43 DECLARE_COMPLETION_ONSTACK(wait);
44 struct request_queue *q = bdev_get_queue(bdev);
45 int type = REQ_WRITE | REQ_DISCARD;
46 unsigned int granularity;
47 int alignment;
48 struct bio_batch bb;
49 struct bio *bio;
50 int ret = 0;
51 struct blk_plug plug;
52
53 if (!q)
54 return -ENXIO;
55
56 if (!blk_queue_discard(q))
57 return -EOPNOTSUPP;
58
59 /* Zero-sector (unknown) and one-sector granularities are the same. */
60 granularity = max(q->limits.discard_granularity >> 9, 1U);
61 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
62
63 if (flags & BLKDEV_DISCARD_SECURE) {
64 if (!blk_queue_secdiscard(q))
65 return -EOPNOTSUPP;
66 type |= REQ_SECURE;
67 }
68
69 atomic_set(&bb.done, 1);
70 bb.error = 0;
71 bb.wait = &wait;
72
73 blk_start_plug(&plug);
74 while (nr_sects) {
75 unsigned int req_sects;
76 sector_t end_sect, tmp;
77
78 bio = bio_alloc(gfp_mask, 1);
79 if (!bio) {
80 ret = -ENOMEM;
81 break;
82 }
83
84 /* Make sure bi_size doesn't overflow */
85 req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9);
86
87 /*
88 * If splitting a request, and the next starting sector would be
89 * misaligned, stop the discard at the previous aligned sector.
90 */
91 end_sect = sector + req_sects;
92 tmp = end_sect;
93 if (req_sects < nr_sects &&
94 sector_div(tmp, granularity) != alignment) {
95 end_sect = end_sect - alignment;
96 sector_div(end_sect, granularity);
97 end_sect = end_sect * granularity + alignment;
98 req_sects = end_sect - sector;
99 }
100
101 bio->bi_iter.bi_sector = sector;
102 bio->bi_end_io = bio_batch_end_io;
103 bio->bi_bdev = bdev;
104 bio->bi_private = &bb;
105
106 bio->bi_iter.bi_size = req_sects << 9;
107 nr_sects -= req_sects;
108 sector = end_sect;
109
110 atomic_inc(&bb.done);
111 submit_bio(type, bio);
112
113 /*
114 * We can loop for a long time in here, if someone does
115 * full device discards (like mkfs). Be nice and allow
116 * us to schedule out to avoid softlocking if preempt
117 * is disabled.
118 */
119 cond_resched();
120 }
121 blk_finish_plug(&plug);
122
123 /* Wait for bios in-flight */
124 if (!atomic_dec_and_test(&bb.done))
125 wait_for_completion_io(&wait);
126
127 if (bb.error)
128 return bb.error;
129 return ret;
130}
131EXPORT_SYMBOL(blkdev_issue_discard);
132
133/**
134 * blkdev_issue_write_same - queue a write same operation
135 * @bdev: target blockdev
136 * @sector: start sector
137 * @nr_sects: number of sectors to write
138 * @gfp_mask: memory allocation flags (for bio_alloc)
139 * @page: page containing data to write
140 *
141 * Description:
142 * Issue a write same request for the sectors in question.
143 */
144int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
145 sector_t nr_sects, gfp_t gfp_mask,
146 struct page *page)
147{
148 DECLARE_COMPLETION_ONSTACK(wait);
149 struct request_queue *q = bdev_get_queue(bdev);
150 unsigned int max_write_same_sectors;
151 struct bio_batch bb;
152 struct bio *bio;
153 int ret = 0;
154
155 if (!q)
156 return -ENXIO;
157
158 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
159 max_write_same_sectors = UINT_MAX >> 9;
160
161 atomic_set(&bb.done, 1);
162 bb.error = 0;
163 bb.wait = &wait;
164
165 while (nr_sects) {
166 bio = bio_alloc(gfp_mask, 1);
167 if (!bio) {
168 ret = -ENOMEM;
169 break;
170 }
171
172 bio->bi_iter.bi_sector = sector;
173 bio->bi_end_io = bio_batch_end_io;
174 bio->bi_bdev = bdev;
175 bio->bi_private = &bb;
176 bio->bi_vcnt = 1;
177 bio->bi_io_vec->bv_page = page;
178 bio->bi_io_vec->bv_offset = 0;
179 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
180
181 if (nr_sects > max_write_same_sectors) {
182 bio->bi_iter.bi_size = max_write_same_sectors << 9;
183 nr_sects -= max_write_same_sectors;
184 sector += max_write_same_sectors;
185 } else {
186 bio->bi_iter.bi_size = nr_sects << 9;
187 nr_sects = 0;
188 }
189
190 atomic_inc(&bb.done);
191 submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
192 }
193
194 /* Wait for bios in-flight */
195 if (!atomic_dec_and_test(&bb.done))
196 wait_for_completion_io(&wait);
197
198 if (bb.error)
199 return bb.error;
200 return ret;
201}
202EXPORT_SYMBOL(blkdev_issue_write_same);
203
204/**
205 * blkdev_issue_zeroout - generate number of zero filed write bios
206 * @bdev: blockdev to issue
207 * @sector: start sector
208 * @nr_sects: number of sectors to write
209 * @gfp_mask: memory allocation flags (for bio_alloc)
210 *
211 * Description:
212 * Generate and issue number of bios with zerofiled pages.
213 */
214
215static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
216 sector_t nr_sects, gfp_t gfp_mask)
217{
218 int ret;
219 struct bio *bio;
220 struct bio_batch bb;
221 unsigned int sz;
222 DECLARE_COMPLETION_ONSTACK(wait);
223
224 atomic_set(&bb.done, 1);
225 bb.error = 0;
226 bb.wait = &wait;
227
228 ret = 0;
229 while (nr_sects != 0) {
230 bio = bio_alloc(gfp_mask,
231 min(nr_sects, (sector_t)BIO_MAX_PAGES));
232 if (!bio) {
233 ret = -ENOMEM;
234 break;
235 }
236
237 bio->bi_iter.bi_sector = sector;
238 bio->bi_bdev = bdev;
239 bio->bi_end_io = bio_batch_end_io;
240 bio->bi_private = &bb;
241
242 while (nr_sects != 0) {
243 sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
244 ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
245 nr_sects -= ret >> 9;
246 sector += ret >> 9;
247 if (ret < (sz << 9))
248 break;
249 }
250 ret = 0;
251 atomic_inc(&bb.done);
252 submit_bio(WRITE, bio);
253 }
254
255 /* Wait for bios in-flight */
256 if (!atomic_dec_and_test(&bb.done))
257 wait_for_completion_io(&wait);
258
259 if (bb.error)
260 return bb.error;
261 return ret;
262}
263
264/**
265 * blkdev_issue_zeroout - zero-fill a block range
266 * @bdev: blockdev to write
267 * @sector: start sector
268 * @nr_sects: number of sectors to write
269 * @gfp_mask: memory allocation flags (for bio_alloc)
270 * @discard: whether to discard the block range
271 *
272 * Description:
273 * Zero-fill a block range. If the discard flag is set and the block
274 * device guarantees that subsequent READ operations to the block range
275 * in question will return zeroes, the blocks will be discarded. Should
276 * the discard request fail, if the discard flag is not set, or if
277 * discard_zeroes_data is not supported, this function will resort to
278 * zeroing the blocks manually, thus provisioning (allocating,
279 * anchoring) them. If the block device supports the WRITE SAME command
280 * blkdev_issue_zeroout() will use it to optimize the process of
281 * clearing the block range. Otherwise the zeroing will be performed
282 * using regular WRITE calls.
283 */
284
285int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
286 sector_t nr_sects, gfp_t gfp_mask, bool discard)
287{
288 struct request_queue *q = bdev_get_queue(bdev);
289
290 if (discard && blk_queue_discard(q) && q->limits.discard_zeroes_data &&
291 blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, 0) == 0)
292 return 0;
293
294 if (bdev_write_same(bdev) &&
295 blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
296 ZERO_PAGE(0)) == 0)
297 return 0;
298
299 return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
300}
301EXPORT_SYMBOL(blkdev_issue_zeroout);