Loading...
1/*
2 * bio-integrity.c - bio data integrity extensions
3 *
4 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
5 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
19 * USA.
20 *
21 */
22
23#include <linux/blkdev.h>
24#include <linux/mempool.h>
25#include <linux/export.h>
26#include <linux/bio.h>
27#include <linux/workqueue.h>
28#include <linux/slab.h>
29
30#define BIP_INLINE_VECS 4
31
32static struct kmem_cache *bip_slab;
33static struct workqueue_struct *kintegrityd_wq;
34
35void blk_flush_integrity(void)
36{
37 flush_workqueue(kintegrityd_wq);
38}
39
40/**
41 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
42 * @bio: bio to attach integrity metadata to
43 * @gfp_mask: Memory allocation mask
44 * @nr_vecs: Number of integrity metadata scatter-gather elements
45 *
46 * Description: This function prepares a bio for attaching integrity
47 * metadata. nr_vecs specifies the maximum number of pages containing
48 * integrity metadata that can be attached.
49 */
50struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
51 gfp_t gfp_mask,
52 unsigned int nr_vecs)
53{
54 struct bio_integrity_payload *bip;
55 struct bio_set *bs = bio->bi_pool;
56 unsigned long idx = BIO_POOL_NONE;
57 unsigned inline_vecs;
58
59 if (!bs || !bs->bio_integrity_pool) {
60 bip = kmalloc(sizeof(struct bio_integrity_payload) +
61 sizeof(struct bio_vec) * nr_vecs, gfp_mask);
62 inline_vecs = nr_vecs;
63 } else {
64 bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
65 inline_vecs = BIP_INLINE_VECS;
66 }
67
68 if (unlikely(!bip))
69 return ERR_PTR(-ENOMEM);
70
71 memset(bip, 0, sizeof(*bip));
72
73 if (nr_vecs > inline_vecs) {
74 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
75 bs->bvec_integrity_pool);
76 if (!bip->bip_vec)
77 goto err;
78 bip->bip_max_vcnt = bvec_nr_vecs(idx);
79 } else {
80 bip->bip_vec = bip->bip_inline_vecs;
81 bip->bip_max_vcnt = inline_vecs;
82 }
83
84 bip->bip_slab = idx;
85 bip->bip_bio = bio;
86 bio->bi_integrity = bip;
87 bio->bi_rw |= REQ_INTEGRITY;
88
89 return bip;
90err:
91 mempool_free(bip, bs->bio_integrity_pool);
92 return ERR_PTR(-ENOMEM);
93}
94EXPORT_SYMBOL(bio_integrity_alloc);
95
96/**
97 * bio_integrity_free - Free bio integrity payload
98 * @bio: bio containing bip to be freed
99 *
100 * Description: Used to free the integrity portion of a bio. Usually
101 * called from bio_free().
102 */
103void bio_integrity_free(struct bio *bio)
104{
105 struct bio_integrity_payload *bip = bio_integrity(bio);
106 struct bio_set *bs = bio->bi_pool;
107
108 if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
109 kfree(page_address(bip->bip_vec->bv_page) +
110 bip->bip_vec->bv_offset);
111
112 if (bs && bs->bio_integrity_pool) {
113 if (bip->bip_slab != BIO_POOL_NONE)
114 bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
115 bip->bip_slab);
116
117 mempool_free(bip, bs->bio_integrity_pool);
118 } else {
119 kfree(bip);
120 }
121
122 bio->bi_integrity = NULL;
123}
124EXPORT_SYMBOL(bio_integrity_free);
125
126/**
127 * bio_integrity_add_page - Attach integrity metadata
128 * @bio: bio to update
129 * @page: page containing integrity metadata
130 * @len: number of bytes of integrity metadata in page
131 * @offset: start offset within page
132 *
133 * Description: Attach a page containing integrity metadata to bio.
134 */
135int bio_integrity_add_page(struct bio *bio, struct page *page,
136 unsigned int len, unsigned int offset)
137{
138 struct bio_integrity_payload *bip = bio_integrity(bio);
139 struct bio_vec *iv;
140
141 if (bip->bip_vcnt >= bip->bip_max_vcnt) {
142 printk(KERN_ERR "%s: bip_vec full\n", __func__);
143 return 0;
144 }
145
146 iv = bip->bip_vec + bip->bip_vcnt;
147
148 if (bip->bip_vcnt &&
149 bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
150 &bip->bip_vec[bip->bip_vcnt - 1], offset))
151 return 0;
152
153 iv->bv_page = page;
154 iv->bv_len = len;
155 iv->bv_offset = offset;
156 bip->bip_vcnt++;
157
158 return len;
159}
160EXPORT_SYMBOL(bio_integrity_add_page);
161
162/**
163 * bio_integrity_enabled - Check whether integrity can be passed
164 * @bio: bio to check
165 *
166 * Description: Determines whether bio_integrity_prep() can be called
167 * on this bio or not. bio data direction and target device must be
168 * set prior to calling. The functions honors the write_generate and
169 * read_verify flags in sysfs.
170 */
171bool bio_integrity_enabled(struct bio *bio)
172{
173 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
174
175 if (!bio_is_rw(bio))
176 return false;
177
178 /* Already protected? */
179 if (bio_integrity(bio))
180 return false;
181
182 if (bi == NULL)
183 return false;
184
185 if (bio_data_dir(bio) == READ && bi->profile->verify_fn != NULL &&
186 (bi->flags & BLK_INTEGRITY_VERIFY))
187 return true;
188
189 if (bio_data_dir(bio) == WRITE && bi->profile->generate_fn != NULL &&
190 (bi->flags & BLK_INTEGRITY_GENERATE))
191 return true;
192
193 return false;
194}
195EXPORT_SYMBOL(bio_integrity_enabled);
196
197/**
198 * bio_integrity_intervals - Return number of integrity intervals for a bio
199 * @bi: blk_integrity profile for device
200 * @sectors: Size of the bio in 512-byte sectors
201 *
202 * Description: The block layer calculates everything in 512 byte
203 * sectors but integrity metadata is done in terms of the data integrity
204 * interval size of the storage device. Convert the block layer sectors
205 * to the appropriate number of integrity intervals.
206 */
207static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
208 unsigned int sectors)
209{
210 return sectors >> (bi->interval_exp - 9);
211}
212
213static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
214 unsigned int sectors)
215{
216 return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
217}
218
219/**
220 * bio_integrity_process - Process integrity metadata for a bio
221 * @bio: bio to generate/verify integrity metadata for
222 * @proc_fn: Pointer to the relevant processing function
223 */
224static int bio_integrity_process(struct bio *bio,
225 integrity_processing_fn *proc_fn)
226{
227 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
228 struct blk_integrity_iter iter;
229 struct bvec_iter bviter;
230 struct bio_vec bv;
231 struct bio_integrity_payload *bip = bio_integrity(bio);
232 unsigned int ret = 0;
233 void *prot_buf = page_address(bip->bip_vec->bv_page) +
234 bip->bip_vec->bv_offset;
235
236 iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
237 iter.interval = 1 << bi->interval_exp;
238 iter.seed = bip_get_seed(bip);
239 iter.prot_buf = prot_buf;
240
241 bio_for_each_segment(bv, bio, bviter) {
242 void *kaddr = kmap_atomic(bv.bv_page);
243
244 iter.data_buf = kaddr + bv.bv_offset;
245 iter.data_size = bv.bv_len;
246
247 ret = proc_fn(&iter);
248 if (ret) {
249 kunmap_atomic(kaddr);
250 return ret;
251 }
252
253 kunmap_atomic(kaddr);
254 }
255 return ret;
256}
257
258/**
259 * bio_integrity_prep - Prepare bio for integrity I/O
260 * @bio: bio to prepare
261 *
262 * Description: Allocates a buffer for integrity metadata, maps the
263 * pages and attaches them to a bio. The bio must have data
264 * direction, target device and start sector set priot to calling. In
265 * the WRITE case, integrity metadata will be generated using the
266 * block device's integrity function. In the READ case, the buffer
267 * will be prepared for DMA and a suitable end_io handler set up.
268 */
269int bio_integrity_prep(struct bio *bio)
270{
271 struct bio_integrity_payload *bip;
272 struct blk_integrity *bi;
273 struct request_queue *q;
274 void *buf;
275 unsigned long start, end;
276 unsigned int len, nr_pages;
277 unsigned int bytes, offset, i;
278 unsigned int intervals;
279
280 bi = bdev_get_integrity(bio->bi_bdev);
281 q = bdev_get_queue(bio->bi_bdev);
282 BUG_ON(bi == NULL);
283 BUG_ON(bio_integrity(bio));
284
285 intervals = bio_integrity_intervals(bi, bio_sectors(bio));
286
287 /* Allocate kernel buffer for protection data */
288 len = intervals * bi->tuple_size;
289 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
290 if (unlikely(buf == NULL)) {
291 printk(KERN_ERR "could not allocate integrity buffer\n");
292 return -ENOMEM;
293 }
294
295 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
296 start = ((unsigned long) buf) >> PAGE_SHIFT;
297 nr_pages = end - start;
298
299 /* Allocate bio integrity payload and integrity vectors */
300 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
301 if (IS_ERR(bip)) {
302 printk(KERN_ERR "could not allocate data integrity bioset\n");
303 kfree(buf);
304 return PTR_ERR(bip);
305 }
306
307 bip->bip_flags |= BIP_BLOCK_INTEGRITY;
308 bip->bip_iter.bi_size = len;
309 bip_set_seed(bip, bio->bi_iter.bi_sector);
310
311 if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
312 bip->bip_flags |= BIP_IP_CHECKSUM;
313
314 /* Map it */
315 offset = offset_in_page(buf);
316 for (i = 0 ; i < nr_pages ; i++) {
317 int ret;
318 bytes = PAGE_SIZE - offset;
319
320 if (len <= 0)
321 break;
322
323 if (bytes > len)
324 bytes = len;
325
326 ret = bio_integrity_add_page(bio, virt_to_page(buf),
327 bytes, offset);
328
329 if (ret == 0)
330 return 0;
331
332 if (ret < bytes)
333 break;
334
335 buf += bytes;
336 len -= bytes;
337 offset = 0;
338 }
339
340 /* Install custom I/O completion handler if read verify is enabled */
341 if (bio_data_dir(bio) == READ) {
342 bip->bip_end_io = bio->bi_end_io;
343 bio->bi_end_io = bio_integrity_endio;
344 }
345
346 /* Auto-generate integrity metadata if this is a write */
347 if (bio_data_dir(bio) == WRITE)
348 bio_integrity_process(bio, bi->profile->generate_fn);
349
350 return 0;
351}
352EXPORT_SYMBOL(bio_integrity_prep);
353
354/**
355 * bio_integrity_verify_fn - Integrity I/O completion worker
356 * @work: Work struct stored in bio to be verified
357 *
358 * Description: This workqueue function is called to complete a READ
359 * request. The function verifies the transferred integrity metadata
360 * and then calls the original bio end_io function.
361 */
362static void bio_integrity_verify_fn(struct work_struct *work)
363{
364 struct bio_integrity_payload *bip =
365 container_of(work, struct bio_integrity_payload, bip_work);
366 struct bio *bio = bip->bip_bio;
367 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
368
369 bio->bi_error = bio_integrity_process(bio, bi->profile->verify_fn);
370
371 /* Restore original bio completion handler */
372 bio->bi_end_io = bip->bip_end_io;
373 bio_endio(bio);
374}
375
376/**
377 * bio_integrity_endio - Integrity I/O completion function
378 * @bio: Protected bio
379 * @error: Pointer to errno
380 *
381 * Description: Completion for integrity I/O
382 *
383 * Normally I/O completion is done in interrupt context. However,
384 * verifying I/O integrity is a time-consuming task which must be run
385 * in process context. This function postpones completion
386 * accordingly.
387 */
388void bio_integrity_endio(struct bio *bio)
389{
390 struct bio_integrity_payload *bip = bio_integrity(bio);
391
392 BUG_ON(bip->bip_bio != bio);
393
394 /* In case of an I/O error there is no point in verifying the
395 * integrity metadata. Restore original bio end_io handler
396 * and run it.
397 */
398 if (bio->bi_error) {
399 bio->bi_end_io = bip->bip_end_io;
400 bio_endio(bio);
401
402 return;
403 }
404
405 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
406 queue_work(kintegrityd_wq, &bip->bip_work);
407}
408EXPORT_SYMBOL(bio_integrity_endio);
409
410/**
411 * bio_integrity_advance - Advance integrity vector
412 * @bio: bio whose integrity vector to update
413 * @bytes_done: number of data bytes that have been completed
414 *
415 * Description: This function calculates how many integrity bytes the
416 * number of completed data bytes correspond to and advances the
417 * integrity vector accordingly.
418 */
419void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
420{
421 struct bio_integrity_payload *bip = bio_integrity(bio);
422 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
423 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
424
425 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
426}
427EXPORT_SYMBOL(bio_integrity_advance);
428
429/**
430 * bio_integrity_trim - Trim integrity vector
431 * @bio: bio whose integrity vector to update
432 * @offset: offset to first data sector
433 * @sectors: number of data sectors
434 *
435 * Description: Used to trim the integrity vector in a cloned bio.
436 * The ivec will be advanced corresponding to 'offset' data sectors
437 * and the length will be truncated corresponding to 'len' data
438 * sectors.
439 */
440void bio_integrity_trim(struct bio *bio, unsigned int offset,
441 unsigned int sectors)
442{
443 struct bio_integrity_payload *bip = bio_integrity(bio);
444 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
445
446 bio_integrity_advance(bio, offset << 9);
447 bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
448}
449EXPORT_SYMBOL(bio_integrity_trim);
450
451/**
452 * bio_integrity_clone - Callback for cloning bios with integrity metadata
453 * @bio: New bio
454 * @bio_src: Original bio
455 * @gfp_mask: Memory allocation mask
456 *
457 * Description: Called to allocate a bip when cloning a bio
458 */
459int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
460 gfp_t gfp_mask)
461{
462 struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
463 struct bio_integrity_payload *bip;
464
465 BUG_ON(bip_src == NULL);
466
467 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
468 if (IS_ERR(bip))
469 return PTR_ERR(bip);
470
471 memcpy(bip->bip_vec, bip_src->bip_vec,
472 bip_src->bip_vcnt * sizeof(struct bio_vec));
473
474 bip->bip_vcnt = bip_src->bip_vcnt;
475 bip->bip_iter = bip_src->bip_iter;
476
477 return 0;
478}
479EXPORT_SYMBOL(bio_integrity_clone);
480
481int bioset_integrity_create(struct bio_set *bs, int pool_size)
482{
483 if (bs->bio_integrity_pool)
484 return 0;
485
486 bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab);
487 if (!bs->bio_integrity_pool)
488 return -1;
489
490 bs->bvec_integrity_pool = biovec_create_pool(pool_size);
491 if (!bs->bvec_integrity_pool) {
492 mempool_destroy(bs->bio_integrity_pool);
493 return -1;
494 }
495
496 return 0;
497}
498EXPORT_SYMBOL(bioset_integrity_create);
499
500void bioset_integrity_free(struct bio_set *bs)
501{
502 if (bs->bio_integrity_pool)
503 mempool_destroy(bs->bio_integrity_pool);
504
505 if (bs->bvec_integrity_pool)
506 mempool_destroy(bs->bvec_integrity_pool);
507}
508EXPORT_SYMBOL(bioset_integrity_free);
509
510void __init bio_integrity_init(void)
511{
512 /*
513 * kintegrityd won't block much but may burn a lot of CPU cycles.
514 * Make it highpri CPU intensive wq with max concurrency of 1.
515 */
516 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
517 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
518 if (!kintegrityd_wq)
519 panic("Failed to create kintegrityd\n");
520
521 bip_slab = kmem_cache_create("bio_integrity_payload",
522 sizeof(struct bio_integrity_payload) +
523 sizeof(struct bio_vec) * BIP_INLINE_VECS,
524 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
525}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bio-integrity.c - bio data integrity extensions
4 *
5 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 */
8
9#include <linux/blk-integrity.h>
10#include <linux/mempool.h>
11#include <linux/export.h>
12#include <linux/bio.h>
13#include <linux/workqueue.h>
14#include <linux/slab.h>
15#include "blk.h"
16
17static struct kmem_cache *bip_slab;
18static struct workqueue_struct *kintegrityd_wq;
19
20void blk_flush_integrity(void)
21{
22 flush_workqueue(kintegrityd_wq);
23}
24
25/**
26 * bio_integrity_free - Free bio integrity payload
27 * @bio: bio containing bip to be freed
28 *
29 * Description: Free the integrity portion of a bio.
30 */
31void bio_integrity_free(struct bio *bio)
32{
33 struct bio_integrity_payload *bip = bio_integrity(bio);
34 struct bio_set *bs = bio->bi_pool;
35
36 if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
37 if (bip->bip_vec)
38 bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
39 bip->bip_max_vcnt);
40 mempool_free(bip, &bs->bio_integrity_pool);
41 } else {
42 kfree(bip);
43 }
44 bio->bi_integrity = NULL;
45 bio->bi_opf &= ~REQ_INTEGRITY;
46}
47
48/**
49 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
50 * @bio: bio to attach integrity metadata to
51 * @gfp_mask: Memory allocation mask
52 * @nr_vecs: Number of integrity metadata scatter-gather elements
53 *
54 * Description: This function prepares a bio for attaching integrity
55 * metadata. nr_vecs specifies the maximum number of pages containing
56 * integrity metadata that can be attached.
57 */
58struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
59 gfp_t gfp_mask,
60 unsigned int nr_vecs)
61{
62 struct bio_integrity_payload *bip;
63 struct bio_set *bs = bio->bi_pool;
64 unsigned inline_vecs;
65
66 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
67 return ERR_PTR(-EOPNOTSUPP);
68
69 if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
70 bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
71 inline_vecs = nr_vecs;
72 } else {
73 bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
74 inline_vecs = BIO_INLINE_VECS;
75 }
76
77 if (unlikely(!bip))
78 return ERR_PTR(-ENOMEM);
79
80 memset(bip, 0, sizeof(*bip));
81
82 /* always report as many vecs as asked explicitly, not inline vecs */
83 bip->bip_max_vcnt = nr_vecs;
84 if (nr_vecs > inline_vecs) {
85 bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
86 &bip->bip_max_vcnt, gfp_mask);
87 if (!bip->bip_vec)
88 goto err;
89 } else if (nr_vecs) {
90 bip->bip_vec = bip->bip_inline_vecs;
91 }
92
93 bip->bip_bio = bio;
94 bio->bi_integrity = bip;
95 bio->bi_opf |= REQ_INTEGRITY;
96
97 return bip;
98err:
99 if (bs && mempool_initialized(&bs->bio_integrity_pool))
100 mempool_free(bip, &bs->bio_integrity_pool);
101 else
102 kfree(bip);
103 return ERR_PTR(-ENOMEM);
104}
105EXPORT_SYMBOL(bio_integrity_alloc);
106
107static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
108 bool dirty)
109{
110 int i;
111
112 for (i = 0; i < nr_vecs; i++) {
113 if (dirty && !PageCompound(bv[i].bv_page))
114 set_page_dirty_lock(bv[i].bv_page);
115 unpin_user_page(bv[i].bv_page);
116 }
117}
118
119static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
120{
121 unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
122 struct bio_vec *orig_bvecs = &bip->bip_vec[1];
123 struct bio_vec *bounce_bvec = &bip->bip_vec[0];
124 size_t bytes = bounce_bvec->bv_len;
125 struct iov_iter orig_iter;
126 int ret;
127
128 iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
129 ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
130 WARN_ON_ONCE(ret != bytes);
131
132 bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
133}
134
135/**
136 * bio_integrity_unmap_user - Unmap user integrity payload
137 * @bio: bio containing bip to be unmapped
138 *
139 * Unmap the user mapped integrity portion of a bio.
140 */
141void bio_integrity_unmap_user(struct bio *bio)
142{
143 struct bio_integrity_payload *bip = bio_integrity(bio);
144
145 if (bip->bip_flags & BIP_COPY_USER) {
146 if (bio_data_dir(bio) == READ)
147 bio_integrity_uncopy_user(bip);
148 kfree(bvec_virt(bip->bip_vec));
149 return;
150 }
151
152 bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
153 bio_data_dir(bio) == READ);
154}
155
156/**
157 * bio_integrity_add_page - Attach integrity metadata
158 * @bio: bio to update
159 * @page: page containing integrity metadata
160 * @len: number of bytes of integrity metadata in page
161 * @offset: start offset within page
162 *
163 * Description: Attach a page containing integrity metadata to bio.
164 */
165int bio_integrity_add_page(struct bio *bio, struct page *page,
166 unsigned int len, unsigned int offset)
167{
168 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
169 struct bio_integrity_payload *bip = bio_integrity(bio);
170
171 if (bip->bip_vcnt > 0) {
172 struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
173 bool same_page = false;
174
175 if (bvec_try_merge_hw_page(q, bv, page, len, offset,
176 &same_page)) {
177 bip->bip_iter.bi_size += len;
178 return len;
179 }
180
181 if (bip->bip_vcnt >=
182 min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
183 return 0;
184
185 /*
186 * If the queue doesn't support SG gaps and adding this segment
187 * would create a gap, disallow it.
188 */
189 if (bvec_gap_to_prev(&q->limits, bv, offset))
190 return 0;
191 }
192
193 bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
194 bip->bip_vcnt++;
195 bip->bip_iter.bi_size += len;
196
197 return len;
198}
199EXPORT_SYMBOL(bio_integrity_add_page);
200
201static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
202 int nr_vecs, unsigned int len,
203 unsigned int direction)
204{
205 bool write = direction == ITER_SOURCE;
206 struct bio_integrity_payload *bip;
207 struct iov_iter iter;
208 void *buf;
209 int ret;
210
211 buf = kmalloc(len, GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
215 if (write) {
216 iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
217 if (!copy_from_iter_full(buf, len, &iter)) {
218 ret = -EFAULT;
219 goto free_buf;
220 }
221
222 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
223 } else {
224 memset(buf, 0, len);
225
226 /*
227 * We need to preserve the original bvec and the number of vecs
228 * in it for completion handling
229 */
230 bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
231 }
232
233 if (IS_ERR(bip)) {
234 ret = PTR_ERR(bip);
235 goto free_buf;
236 }
237
238 if (write)
239 bio_integrity_unpin_bvec(bvec, nr_vecs, false);
240 else
241 memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
242
243 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
244 offset_in_page(buf));
245 if (ret != len) {
246 ret = -ENOMEM;
247 goto free_bip;
248 }
249
250 bip->bip_flags |= BIP_COPY_USER;
251 bip->bip_vcnt = nr_vecs;
252 return 0;
253free_bip:
254 bio_integrity_free(bio);
255free_buf:
256 kfree(buf);
257 return ret;
258}
259
260static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
261 int nr_vecs, unsigned int len)
262{
263 struct bio_integrity_payload *bip;
264
265 bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
266 if (IS_ERR(bip))
267 return PTR_ERR(bip);
268
269 memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
270 bip->bip_iter.bi_size = len;
271 bip->bip_vcnt = nr_vecs;
272 return 0;
273}
274
275static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
276 int nr_vecs, ssize_t bytes, ssize_t offset)
277{
278 unsigned int nr_bvecs = 0;
279 int i, j;
280
281 for (i = 0; i < nr_vecs; i = j) {
282 size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
283 struct folio *folio = page_folio(pages[i]);
284
285 bytes -= size;
286 for (j = i + 1; j < nr_vecs; j++) {
287 size_t next = min_t(size_t, PAGE_SIZE, bytes);
288
289 if (page_folio(pages[j]) != folio ||
290 pages[j] != pages[j - 1] + 1)
291 break;
292 unpin_user_page(pages[j]);
293 size += next;
294 bytes -= next;
295 }
296
297 bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
298 offset = 0;
299 nr_bvecs++;
300 }
301
302 return nr_bvecs;
303}
304
305int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes)
306{
307 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
308 unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
309 struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
310 struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
311 unsigned int direction, nr_bvecs;
312 struct iov_iter iter;
313 int ret, nr_vecs;
314 size_t offset;
315 bool copy;
316
317 if (bio_integrity(bio))
318 return -EINVAL;
319 if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
320 return -E2BIG;
321
322 if (bio_data_dir(bio) == READ)
323 direction = ITER_DEST;
324 else
325 direction = ITER_SOURCE;
326
327 iov_iter_ubuf(&iter, direction, ubuf, bytes);
328 nr_vecs = iov_iter_npages(&iter, BIO_MAX_VECS + 1);
329 if (nr_vecs > BIO_MAX_VECS)
330 return -E2BIG;
331 if (nr_vecs > UIO_FASTIOV) {
332 bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
333 if (!bvec)
334 return -ENOMEM;
335 pages = NULL;
336 }
337
338 copy = !iov_iter_is_aligned(&iter, align, align);
339 ret = iov_iter_extract_pages(&iter, &pages, bytes, nr_vecs, 0, &offset);
340 if (unlikely(ret < 0))
341 goto free_bvec;
342
343 nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset);
344 if (pages != stack_pages)
345 kvfree(pages);
346 if (nr_bvecs > queue_max_integrity_segments(q))
347 copy = true;
348
349 if (copy)
350 ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
351 direction);
352 else
353 ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
354 if (ret)
355 goto release_pages;
356 if (bvec != stack_vec)
357 kfree(bvec);
358
359 return 0;
360
361release_pages:
362 bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
363free_bvec:
364 if (bvec != stack_vec)
365 kfree(bvec);
366 return ret;
367}
368
369/**
370 * bio_integrity_prep - Prepare bio for integrity I/O
371 * @bio: bio to prepare
372 *
373 * Description: Checks if the bio already has an integrity payload attached.
374 * If it does, the payload has been generated by another kernel subsystem,
375 * and we just pass it through. Otherwise allocates integrity payload.
376 * The bio must have data direction, target device and start sector set priot
377 * to calling. In the WRITE case, integrity metadata will be generated using
378 * the block device's integrity function. In the READ case, the buffer
379 * will be prepared for DMA and a suitable end_io handler set up.
380 */
381bool bio_integrity_prep(struct bio *bio)
382{
383 struct bio_integrity_payload *bip;
384 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
385 unsigned int len;
386 void *buf;
387 gfp_t gfp = GFP_NOIO;
388
389 if (!bi)
390 return true;
391
392 if (!bio_sectors(bio))
393 return true;
394
395 /* Already protected? */
396 if (bio_integrity(bio))
397 return true;
398
399 switch (bio_op(bio)) {
400 case REQ_OP_READ:
401 if (bi->flags & BLK_INTEGRITY_NOVERIFY)
402 return true;
403 break;
404 case REQ_OP_WRITE:
405 if (bi->flags & BLK_INTEGRITY_NOGENERATE)
406 return true;
407
408 /*
409 * Zero the memory allocated to not leak uninitialized kernel
410 * memory to disk for non-integrity metadata where nothing else
411 * initializes the memory.
412 */
413 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
414 gfp |= __GFP_ZERO;
415 break;
416 default:
417 return true;
418 }
419
420 /* Allocate kernel buffer for protection data */
421 len = bio_integrity_bytes(bi, bio_sectors(bio));
422 buf = kmalloc(len, gfp);
423 if (unlikely(buf == NULL)) {
424 goto err_end_io;
425 }
426
427 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
428 if (IS_ERR(bip)) {
429 kfree(buf);
430 goto err_end_io;
431 }
432
433 bip->bip_flags |= BIP_BLOCK_INTEGRITY;
434 bip_set_seed(bip, bio->bi_iter.bi_sector);
435
436 if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
437 bip->bip_flags |= BIP_IP_CHECKSUM;
438
439 if (bio_integrity_add_page(bio, virt_to_page(buf), len,
440 offset_in_page(buf)) < len) {
441 printk(KERN_ERR "could not attach integrity payload\n");
442 goto err_end_io;
443 }
444
445 /* Auto-generate integrity metadata if this is a write */
446 if (bio_data_dir(bio) == WRITE)
447 blk_integrity_generate(bio);
448 else
449 bip->bio_iter = bio->bi_iter;
450 return true;
451
452err_end_io:
453 bio->bi_status = BLK_STS_RESOURCE;
454 bio_endio(bio);
455 return false;
456}
457EXPORT_SYMBOL(bio_integrity_prep);
458
459/**
460 * bio_integrity_verify_fn - Integrity I/O completion worker
461 * @work: Work struct stored in bio to be verified
462 *
463 * Description: This workqueue function is called to complete a READ
464 * request. The function verifies the transferred integrity metadata
465 * and then calls the original bio end_io function.
466 */
467static void bio_integrity_verify_fn(struct work_struct *work)
468{
469 struct bio_integrity_payload *bip =
470 container_of(work, struct bio_integrity_payload, bip_work);
471 struct bio *bio = bip->bip_bio;
472
473 blk_integrity_verify(bio);
474
475 kfree(bvec_virt(bip->bip_vec));
476 bio_integrity_free(bio);
477 bio_endio(bio);
478}
479
480/**
481 * __bio_integrity_endio - Integrity I/O completion function
482 * @bio: Protected bio
483 *
484 * Description: Completion for integrity I/O
485 *
486 * Normally I/O completion is done in interrupt context. However,
487 * verifying I/O integrity is a time-consuming task which must be run
488 * in process context. This function postpones completion
489 * accordingly.
490 */
491bool __bio_integrity_endio(struct bio *bio)
492{
493 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
494 struct bio_integrity_payload *bip = bio_integrity(bio);
495
496 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
497 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
498 queue_work(kintegrityd_wq, &bip->bip_work);
499 return false;
500 }
501
502 kfree(bvec_virt(bip->bip_vec));
503 bio_integrity_free(bio);
504 return true;
505}
506
507/**
508 * bio_integrity_advance - Advance integrity vector
509 * @bio: bio whose integrity vector to update
510 * @bytes_done: number of data bytes that have been completed
511 *
512 * Description: This function calculates how many integrity bytes the
513 * number of completed data bytes correspond to and advances the
514 * integrity vector accordingly.
515 */
516void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
517{
518 struct bio_integrity_payload *bip = bio_integrity(bio);
519 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
520 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
521
522 bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
523 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
524}
525
526/**
527 * bio_integrity_trim - Trim integrity vector
528 * @bio: bio whose integrity vector to update
529 *
530 * Description: Used to trim the integrity vector in a cloned bio.
531 */
532void bio_integrity_trim(struct bio *bio)
533{
534 struct bio_integrity_payload *bip = bio_integrity(bio);
535 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
536
537 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
538}
539EXPORT_SYMBOL(bio_integrity_trim);
540
541/**
542 * bio_integrity_clone - Callback for cloning bios with integrity metadata
543 * @bio: New bio
544 * @bio_src: Original bio
545 * @gfp_mask: Memory allocation mask
546 *
547 * Description: Called to allocate a bip when cloning a bio
548 */
549int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
550 gfp_t gfp_mask)
551{
552 struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
553 struct bio_integrity_payload *bip;
554
555 BUG_ON(bip_src == NULL);
556
557 bip = bio_integrity_alloc(bio, gfp_mask, 0);
558 if (IS_ERR(bip))
559 return PTR_ERR(bip);
560
561 bip->bip_vec = bip_src->bip_vec;
562 bip->bip_iter = bip_src->bip_iter;
563 bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
564
565 return 0;
566}
567
568int bioset_integrity_create(struct bio_set *bs, int pool_size)
569{
570 if (mempool_initialized(&bs->bio_integrity_pool))
571 return 0;
572
573 if (mempool_init_slab_pool(&bs->bio_integrity_pool,
574 pool_size, bip_slab))
575 return -1;
576
577 if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
578 mempool_exit(&bs->bio_integrity_pool);
579 return -1;
580 }
581
582 return 0;
583}
584EXPORT_SYMBOL(bioset_integrity_create);
585
586void bioset_integrity_free(struct bio_set *bs)
587{
588 mempool_exit(&bs->bio_integrity_pool);
589 mempool_exit(&bs->bvec_integrity_pool);
590}
591
592void __init bio_integrity_init(void)
593{
594 /*
595 * kintegrityd won't block much but may burn a lot of CPU cycles.
596 * Make it highpri CPU intensive wq with max concurrency of 1.
597 */
598 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
599 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
600 if (!kintegrityd_wq)
601 panic("Failed to create kintegrityd\n");
602
603 bip_slab = kmem_cache_create("bio_integrity_payload",
604 sizeof(struct bio_integrity_payload) +
605 sizeof(struct bio_vec) * BIO_INLINE_VECS,
606 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
607}