Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bio-integrity.c - bio data integrity extensions
4 *
5 * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 */
8
9#include <linux/blk-integrity.h>
10#include <linux/mempool.h>
11#include <linux/export.h>
12#include <linux/bio.h>
13#include <linux/workqueue.h>
14#include <linux/slab.h>
15#include "blk.h"
16
17static struct kmem_cache *bip_slab;
18static struct workqueue_struct *kintegrityd_wq;
19
20void blk_flush_integrity(void)
21{
22 flush_workqueue(kintegrityd_wq);
23}
24
25/**
26 * bio_integrity_free - Free bio integrity payload
27 * @bio: bio containing bip to be freed
28 *
29 * Description: Free the integrity portion of a bio.
30 */
31void bio_integrity_free(struct bio *bio)
32{
33 struct bio_integrity_payload *bip = bio_integrity(bio);
34 struct bio_set *bs = bio->bi_pool;
35
36 if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
37 if (bip->bip_vec)
38 bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
39 bip->bip_max_vcnt);
40 mempool_free(bip, &bs->bio_integrity_pool);
41 } else {
42 kfree(bip);
43 }
44 bio->bi_integrity = NULL;
45 bio->bi_opf &= ~REQ_INTEGRITY;
46}
47
48/**
49 * bio_integrity_alloc - Allocate integrity payload and attach it to bio
50 * @bio: bio to attach integrity metadata to
51 * @gfp_mask: Memory allocation mask
52 * @nr_vecs: Number of integrity metadata scatter-gather elements
53 *
54 * Description: This function prepares a bio for attaching integrity
55 * metadata. nr_vecs specifies the maximum number of pages containing
56 * integrity metadata that can be attached.
57 */
58struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
59 gfp_t gfp_mask,
60 unsigned int nr_vecs)
61{
62 struct bio_integrity_payload *bip;
63 struct bio_set *bs = bio->bi_pool;
64 unsigned inline_vecs;
65
66 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
67 return ERR_PTR(-EOPNOTSUPP);
68
69 if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
70 bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
71 inline_vecs = nr_vecs;
72 } else {
73 bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
74 inline_vecs = BIO_INLINE_VECS;
75 }
76
77 if (unlikely(!bip))
78 return ERR_PTR(-ENOMEM);
79
80 memset(bip, 0, sizeof(*bip));
81
82 /* always report as many vecs as asked explicitly, not inline vecs */
83 bip->bip_max_vcnt = nr_vecs;
84 if (nr_vecs > inline_vecs) {
85 bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
86 &bip->bip_max_vcnt, gfp_mask);
87 if (!bip->bip_vec)
88 goto err;
89 } else if (nr_vecs) {
90 bip->bip_vec = bip->bip_inline_vecs;
91 }
92
93 bip->bip_bio = bio;
94 bio->bi_integrity = bip;
95 bio->bi_opf |= REQ_INTEGRITY;
96
97 return bip;
98err:
99 if (bs && mempool_initialized(&bs->bio_integrity_pool))
100 mempool_free(bip, &bs->bio_integrity_pool);
101 else
102 kfree(bip);
103 return ERR_PTR(-ENOMEM);
104}
105EXPORT_SYMBOL(bio_integrity_alloc);
106
107static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
108 bool dirty)
109{
110 int i;
111
112 for (i = 0; i < nr_vecs; i++) {
113 if (dirty && !PageCompound(bv[i].bv_page))
114 set_page_dirty_lock(bv[i].bv_page);
115 unpin_user_page(bv[i].bv_page);
116 }
117}
118
119static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
120{
121 unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
122 struct bio_vec *orig_bvecs = &bip->bip_vec[1];
123 struct bio_vec *bounce_bvec = &bip->bip_vec[0];
124 size_t bytes = bounce_bvec->bv_len;
125 struct iov_iter orig_iter;
126 int ret;
127
128 iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
129 ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
130 WARN_ON_ONCE(ret != bytes);
131
132 bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
133}
134
135/**
136 * bio_integrity_unmap_user - Unmap user integrity payload
137 * @bio: bio containing bip to be unmapped
138 *
139 * Unmap the user mapped integrity portion of a bio.
140 */
141void bio_integrity_unmap_user(struct bio *bio)
142{
143 struct bio_integrity_payload *bip = bio_integrity(bio);
144
145 if (bip->bip_flags & BIP_COPY_USER) {
146 if (bio_data_dir(bio) == READ)
147 bio_integrity_uncopy_user(bip);
148 kfree(bvec_virt(bip->bip_vec));
149 return;
150 }
151
152 bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
153 bio_data_dir(bio) == READ);
154}
155
156/**
157 * bio_integrity_add_page - Attach integrity metadata
158 * @bio: bio to update
159 * @page: page containing integrity metadata
160 * @len: number of bytes of integrity metadata in page
161 * @offset: start offset within page
162 *
163 * Description: Attach a page containing integrity metadata to bio.
164 */
165int bio_integrity_add_page(struct bio *bio, struct page *page,
166 unsigned int len, unsigned int offset)
167{
168 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
169 struct bio_integrity_payload *bip = bio_integrity(bio);
170
171 if (bip->bip_vcnt > 0) {
172 struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
173 bool same_page = false;
174
175 if (bvec_try_merge_hw_page(q, bv, page, len, offset,
176 &same_page)) {
177 bip->bip_iter.bi_size += len;
178 return len;
179 }
180
181 if (bip->bip_vcnt >=
182 min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
183 return 0;
184
185 /*
186 * If the queue doesn't support SG gaps and adding this segment
187 * would create a gap, disallow it.
188 */
189 if (bvec_gap_to_prev(&q->limits, bv, offset))
190 return 0;
191 }
192
193 bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
194 bip->bip_vcnt++;
195 bip->bip_iter.bi_size += len;
196
197 return len;
198}
199EXPORT_SYMBOL(bio_integrity_add_page);
200
201static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
202 int nr_vecs, unsigned int len,
203 unsigned int direction)
204{
205 bool write = direction == ITER_SOURCE;
206 struct bio_integrity_payload *bip;
207 struct iov_iter iter;
208 void *buf;
209 int ret;
210
211 buf = kmalloc(len, GFP_KERNEL);
212 if (!buf)
213 return -ENOMEM;
214
215 if (write) {
216 iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
217 if (!copy_from_iter_full(buf, len, &iter)) {
218 ret = -EFAULT;
219 goto free_buf;
220 }
221
222 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
223 } else {
224 memset(buf, 0, len);
225
226 /*
227 * We need to preserve the original bvec and the number of vecs
228 * in it for completion handling
229 */
230 bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
231 }
232
233 if (IS_ERR(bip)) {
234 ret = PTR_ERR(bip);
235 goto free_buf;
236 }
237
238 if (write)
239 bio_integrity_unpin_bvec(bvec, nr_vecs, false);
240 else
241 memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
242
243 ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
244 offset_in_page(buf));
245 if (ret != len) {
246 ret = -ENOMEM;
247 goto free_bip;
248 }
249
250 bip->bip_flags |= BIP_COPY_USER;
251 bip->bip_vcnt = nr_vecs;
252 return 0;
253free_bip:
254 bio_integrity_free(bio);
255free_buf:
256 kfree(buf);
257 return ret;
258}
259
260static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
261 int nr_vecs, unsigned int len)
262{
263 struct bio_integrity_payload *bip;
264
265 bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
266 if (IS_ERR(bip))
267 return PTR_ERR(bip);
268
269 memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
270 bip->bip_iter.bi_size = len;
271 bip->bip_vcnt = nr_vecs;
272 return 0;
273}
274
275static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
276 int nr_vecs, ssize_t bytes, ssize_t offset)
277{
278 unsigned int nr_bvecs = 0;
279 int i, j;
280
281 for (i = 0; i < nr_vecs; i = j) {
282 size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
283 struct folio *folio = page_folio(pages[i]);
284
285 bytes -= size;
286 for (j = i + 1; j < nr_vecs; j++) {
287 size_t next = min_t(size_t, PAGE_SIZE, bytes);
288
289 if (page_folio(pages[j]) != folio ||
290 pages[j] != pages[j - 1] + 1)
291 break;
292 unpin_user_page(pages[j]);
293 size += next;
294 bytes -= next;
295 }
296
297 bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
298 offset = 0;
299 nr_bvecs++;
300 }
301
302 return nr_bvecs;
303}
304
305int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes)
306{
307 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
308 unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
309 struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
310 struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
311 unsigned int direction, nr_bvecs;
312 struct iov_iter iter;
313 int ret, nr_vecs;
314 size_t offset;
315 bool copy;
316
317 if (bio_integrity(bio))
318 return -EINVAL;
319 if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
320 return -E2BIG;
321
322 if (bio_data_dir(bio) == READ)
323 direction = ITER_DEST;
324 else
325 direction = ITER_SOURCE;
326
327 iov_iter_ubuf(&iter, direction, ubuf, bytes);
328 nr_vecs = iov_iter_npages(&iter, BIO_MAX_VECS + 1);
329 if (nr_vecs > BIO_MAX_VECS)
330 return -E2BIG;
331 if (nr_vecs > UIO_FASTIOV) {
332 bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
333 if (!bvec)
334 return -ENOMEM;
335 pages = NULL;
336 }
337
338 copy = !iov_iter_is_aligned(&iter, align, align);
339 ret = iov_iter_extract_pages(&iter, &pages, bytes, nr_vecs, 0, &offset);
340 if (unlikely(ret < 0))
341 goto free_bvec;
342
343 nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset);
344 if (pages != stack_pages)
345 kvfree(pages);
346 if (nr_bvecs > queue_max_integrity_segments(q))
347 copy = true;
348
349 if (copy)
350 ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
351 direction);
352 else
353 ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
354 if (ret)
355 goto release_pages;
356 if (bvec != stack_vec)
357 kfree(bvec);
358
359 return 0;
360
361release_pages:
362 bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
363free_bvec:
364 if (bvec != stack_vec)
365 kfree(bvec);
366 return ret;
367}
368
369/**
370 * bio_integrity_prep - Prepare bio for integrity I/O
371 * @bio: bio to prepare
372 *
373 * Description: Checks if the bio already has an integrity payload attached.
374 * If it does, the payload has been generated by another kernel subsystem,
375 * and we just pass it through. Otherwise allocates integrity payload.
376 * The bio must have data direction, target device and start sector set priot
377 * to calling. In the WRITE case, integrity metadata will be generated using
378 * the block device's integrity function. In the READ case, the buffer
379 * will be prepared for DMA and a suitable end_io handler set up.
380 */
381bool bio_integrity_prep(struct bio *bio)
382{
383 struct bio_integrity_payload *bip;
384 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
385 unsigned int len;
386 void *buf;
387 gfp_t gfp = GFP_NOIO;
388
389 if (!bi)
390 return true;
391
392 if (!bio_sectors(bio))
393 return true;
394
395 /* Already protected? */
396 if (bio_integrity(bio))
397 return true;
398
399 switch (bio_op(bio)) {
400 case REQ_OP_READ:
401 if (bi->flags & BLK_INTEGRITY_NOVERIFY)
402 return true;
403 break;
404 case REQ_OP_WRITE:
405 if (bi->flags & BLK_INTEGRITY_NOGENERATE)
406 return true;
407
408 /*
409 * Zero the memory allocated to not leak uninitialized kernel
410 * memory to disk for non-integrity metadata where nothing else
411 * initializes the memory.
412 */
413 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
414 gfp |= __GFP_ZERO;
415 break;
416 default:
417 return true;
418 }
419
420 /* Allocate kernel buffer for protection data */
421 len = bio_integrity_bytes(bi, bio_sectors(bio));
422 buf = kmalloc(len, gfp);
423 if (unlikely(buf == NULL)) {
424 goto err_end_io;
425 }
426
427 bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
428 if (IS_ERR(bip)) {
429 kfree(buf);
430 goto err_end_io;
431 }
432
433 bip->bip_flags |= BIP_BLOCK_INTEGRITY;
434 bip_set_seed(bip, bio->bi_iter.bi_sector);
435
436 if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
437 bip->bip_flags |= BIP_IP_CHECKSUM;
438
439 if (bio_integrity_add_page(bio, virt_to_page(buf), len,
440 offset_in_page(buf)) < len) {
441 printk(KERN_ERR "could not attach integrity payload\n");
442 goto err_end_io;
443 }
444
445 /* Auto-generate integrity metadata if this is a write */
446 if (bio_data_dir(bio) == WRITE)
447 blk_integrity_generate(bio);
448 else
449 bip->bio_iter = bio->bi_iter;
450 return true;
451
452err_end_io:
453 bio->bi_status = BLK_STS_RESOURCE;
454 bio_endio(bio);
455 return false;
456}
457EXPORT_SYMBOL(bio_integrity_prep);
458
459/**
460 * bio_integrity_verify_fn - Integrity I/O completion worker
461 * @work: Work struct stored in bio to be verified
462 *
463 * Description: This workqueue function is called to complete a READ
464 * request. The function verifies the transferred integrity metadata
465 * and then calls the original bio end_io function.
466 */
467static void bio_integrity_verify_fn(struct work_struct *work)
468{
469 struct bio_integrity_payload *bip =
470 container_of(work, struct bio_integrity_payload, bip_work);
471 struct bio *bio = bip->bip_bio;
472
473 blk_integrity_verify(bio);
474
475 kfree(bvec_virt(bip->bip_vec));
476 bio_integrity_free(bio);
477 bio_endio(bio);
478}
479
480/**
481 * __bio_integrity_endio - Integrity I/O completion function
482 * @bio: Protected bio
483 *
484 * Description: Completion for integrity I/O
485 *
486 * Normally I/O completion is done in interrupt context. However,
487 * verifying I/O integrity is a time-consuming task which must be run
488 * in process context. This function postpones completion
489 * accordingly.
490 */
491bool __bio_integrity_endio(struct bio *bio)
492{
493 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
494 struct bio_integrity_payload *bip = bio_integrity(bio);
495
496 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
497 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
498 queue_work(kintegrityd_wq, &bip->bip_work);
499 return false;
500 }
501
502 kfree(bvec_virt(bip->bip_vec));
503 bio_integrity_free(bio);
504 return true;
505}
506
507/**
508 * bio_integrity_advance - Advance integrity vector
509 * @bio: bio whose integrity vector to update
510 * @bytes_done: number of data bytes that have been completed
511 *
512 * Description: This function calculates how many integrity bytes the
513 * number of completed data bytes correspond to and advances the
514 * integrity vector accordingly.
515 */
516void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
517{
518 struct bio_integrity_payload *bip = bio_integrity(bio);
519 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
520 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
521
522 bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
523 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
524}
525
526/**
527 * bio_integrity_trim - Trim integrity vector
528 * @bio: bio whose integrity vector to update
529 *
530 * Description: Used to trim the integrity vector in a cloned bio.
531 */
532void bio_integrity_trim(struct bio *bio)
533{
534 struct bio_integrity_payload *bip = bio_integrity(bio);
535 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
536
537 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
538}
539EXPORT_SYMBOL(bio_integrity_trim);
540
541/**
542 * bio_integrity_clone - Callback for cloning bios with integrity metadata
543 * @bio: New bio
544 * @bio_src: Original bio
545 * @gfp_mask: Memory allocation mask
546 *
547 * Description: Called to allocate a bip when cloning a bio
548 */
549int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
550 gfp_t gfp_mask)
551{
552 struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
553 struct bio_integrity_payload *bip;
554
555 BUG_ON(bip_src == NULL);
556
557 bip = bio_integrity_alloc(bio, gfp_mask, 0);
558 if (IS_ERR(bip))
559 return PTR_ERR(bip);
560
561 bip->bip_vec = bip_src->bip_vec;
562 bip->bip_iter = bip_src->bip_iter;
563 bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
564
565 return 0;
566}
567
568int bioset_integrity_create(struct bio_set *bs, int pool_size)
569{
570 if (mempool_initialized(&bs->bio_integrity_pool))
571 return 0;
572
573 if (mempool_init_slab_pool(&bs->bio_integrity_pool,
574 pool_size, bip_slab))
575 return -1;
576
577 if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
578 mempool_exit(&bs->bio_integrity_pool);
579 return -1;
580 }
581
582 return 0;
583}
584EXPORT_SYMBOL(bioset_integrity_create);
585
586void bioset_integrity_free(struct bio_set *bs)
587{
588 mempool_exit(&bs->bio_integrity_pool);
589 mempool_exit(&bs->bvec_integrity_pool);
590}
591
592void __init bio_integrity_init(void)
593{
594 /*
595 * kintegrityd won't block much but may burn a lot of CPU cycles.
596 * Make it highpri CPU intensive wq with max concurrency of 1.
597 */
598 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
599 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
600 if (!kintegrityd_wq)
601 panic("Failed to create kintegrityd\n");
602
603 bip_slab = kmem_cache_create("bio_integrity_payload",
604 sizeof(struct bio_integrity_payload) +
605 sizeof(struct bio_vec) * BIO_INLINE_VECS,
606 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
607}