Loading...
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/mempool.h>
27#include <linux/workqueue.h>
28#include <scsi/sg.h> /* for struct sg_iovec */
29
30#include <trace/events/block.h>
31
32/*
33 * Test patch to inline a certain number of bi_io_vec's inside the bio
34 * itself, to shrink a bio data allocation from two mempool calls to one
35 */
36#define BIO_INLINE_VECS 4
37
38static mempool_t *bio_split_pool __read_mostly;
39
40/*
41 * if you change this list, also change bvec_alloc or things will
42 * break badly! cannot be bigger than what you can fit into an
43 * unsigned short
44 */
45#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48};
49#undef BV
50
51/*
52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
53 * IO code that does not need private memory pools.
54 */
55struct bio_set *fs_bio_set;
56
57/*
58 * Our slab pool management
59 */
60struct bio_slab {
61 struct kmem_cache *slab;
62 unsigned int slab_ref;
63 unsigned int slab_size;
64 char name[8];
65};
66static DEFINE_MUTEX(bio_slab_lock);
67static struct bio_slab *bio_slabs;
68static unsigned int bio_slab_nr, bio_slab_max;
69
70static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
71{
72 unsigned int sz = sizeof(struct bio) + extra_size;
73 struct kmem_cache *slab = NULL;
74 struct bio_slab *bslab;
75 unsigned int i, entry = -1;
76
77 mutex_lock(&bio_slab_lock);
78
79 i = 0;
80 while (i < bio_slab_nr) {
81 bslab = &bio_slabs[i];
82
83 if (!bslab->slab && entry == -1)
84 entry = i;
85 else if (bslab->slab_size == sz) {
86 slab = bslab->slab;
87 bslab->slab_ref++;
88 break;
89 }
90 i++;
91 }
92
93 if (slab)
94 goto out_unlock;
95
96 if (bio_slab_nr == bio_slab_max && entry == -1) {
97 bio_slab_max <<= 1;
98 bio_slabs = krealloc(bio_slabs,
99 bio_slab_max * sizeof(struct bio_slab),
100 GFP_KERNEL);
101 if (!bio_slabs)
102 goto out_unlock;
103 }
104 if (entry == -1)
105 entry = bio_slab_nr++;
106
107 bslab = &bio_slabs[entry];
108
109 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
110 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
111 if (!slab)
112 goto out_unlock;
113
114 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
115 bslab->slab = slab;
116 bslab->slab_ref = 1;
117 bslab->slab_size = sz;
118out_unlock:
119 mutex_unlock(&bio_slab_lock);
120 return slab;
121}
122
123static void bio_put_slab(struct bio_set *bs)
124{
125 struct bio_slab *bslab = NULL;
126 unsigned int i;
127
128 mutex_lock(&bio_slab_lock);
129
130 for (i = 0; i < bio_slab_nr; i++) {
131 if (bs->bio_slab == bio_slabs[i].slab) {
132 bslab = &bio_slabs[i];
133 break;
134 }
135 }
136
137 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
138 goto out;
139
140 WARN_ON(!bslab->slab_ref);
141
142 if (--bslab->slab_ref)
143 goto out;
144
145 kmem_cache_destroy(bslab->slab);
146 bslab->slab = NULL;
147
148out:
149 mutex_unlock(&bio_slab_lock);
150}
151
152unsigned int bvec_nr_vecs(unsigned short idx)
153{
154 return bvec_slabs[idx].nr_vecs;
155}
156
157void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
158{
159 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
160
161 if (idx == BIOVEC_MAX_IDX)
162 mempool_free(bv, bs->bvec_pool);
163 else {
164 struct biovec_slab *bvs = bvec_slabs + idx;
165
166 kmem_cache_free(bvs->slab, bv);
167 }
168}
169
170struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
171 struct bio_set *bs)
172{
173 struct bio_vec *bvl;
174
175 /*
176 * see comment near bvec_array define!
177 */
178 switch (nr) {
179 case 1:
180 *idx = 0;
181 break;
182 case 2 ... 4:
183 *idx = 1;
184 break;
185 case 5 ... 16:
186 *idx = 2;
187 break;
188 case 17 ... 64:
189 *idx = 3;
190 break;
191 case 65 ... 128:
192 *idx = 4;
193 break;
194 case 129 ... BIO_MAX_PAGES:
195 *idx = 5;
196 break;
197 default:
198 return NULL;
199 }
200
201 /*
202 * idx now points to the pool we want to allocate from. only the
203 * 1-vec entry pool is mempool backed.
204 */
205 if (*idx == BIOVEC_MAX_IDX) {
206fallback:
207 bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
208 } else {
209 struct biovec_slab *bvs = bvec_slabs + *idx;
210 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
211
212 /*
213 * Make this allocation restricted and don't dump info on
214 * allocation failures, since we'll fallback to the mempool
215 * in case of failure.
216 */
217 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
218
219 /*
220 * Try a slab allocation. If this fails and __GFP_WAIT
221 * is set, retry with the 1-entry mempool
222 */
223 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
224 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
225 *idx = BIOVEC_MAX_IDX;
226 goto fallback;
227 }
228 }
229
230 return bvl;
231}
232
233void bio_free(struct bio *bio, struct bio_set *bs)
234{
235 void *p;
236
237 if (bio_has_allocated_vec(bio))
238 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
239
240 if (bio_integrity(bio))
241 bio_integrity_free(bio, bs);
242
243 /*
244 * If we have front padding, adjust the bio pointer before freeing
245 */
246 p = bio;
247 if (bs->front_pad)
248 p -= bs->front_pad;
249
250 mempool_free(p, bs->bio_pool);
251}
252EXPORT_SYMBOL(bio_free);
253
254void bio_init(struct bio *bio)
255{
256 memset(bio, 0, sizeof(*bio));
257 bio->bi_flags = 1 << BIO_UPTODATE;
258 bio->bi_comp_cpu = -1;
259 atomic_set(&bio->bi_cnt, 1);
260}
261EXPORT_SYMBOL(bio_init);
262
263/**
264 * bio_alloc_bioset - allocate a bio for I/O
265 * @gfp_mask: the GFP_ mask given to the slab allocator
266 * @nr_iovecs: number of iovecs to pre-allocate
267 * @bs: the bio_set to allocate from.
268 *
269 * Description:
270 * bio_alloc_bioset will try its own mempool to satisfy the allocation.
271 * If %__GFP_WAIT is set then we will block on the internal pool waiting
272 * for a &struct bio to become free.
273 *
274 * Note that the caller must set ->bi_destructor on successful return
275 * of a bio, to do the appropriate freeing of the bio once the reference
276 * count drops to zero.
277 **/
278struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
279{
280 unsigned long idx = BIO_POOL_NONE;
281 struct bio_vec *bvl = NULL;
282 struct bio *bio;
283 void *p;
284
285 p = mempool_alloc(bs->bio_pool, gfp_mask);
286 if (unlikely(!p))
287 return NULL;
288 bio = p + bs->front_pad;
289
290 bio_init(bio);
291
292 if (unlikely(!nr_iovecs))
293 goto out_set;
294
295 if (nr_iovecs <= BIO_INLINE_VECS) {
296 bvl = bio->bi_inline_vecs;
297 nr_iovecs = BIO_INLINE_VECS;
298 } else {
299 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
300 if (unlikely(!bvl))
301 goto err_free;
302
303 nr_iovecs = bvec_nr_vecs(idx);
304 }
305out_set:
306 bio->bi_flags |= idx << BIO_POOL_OFFSET;
307 bio->bi_max_vecs = nr_iovecs;
308 bio->bi_io_vec = bvl;
309 return bio;
310
311err_free:
312 mempool_free(p, bs->bio_pool);
313 return NULL;
314}
315EXPORT_SYMBOL(bio_alloc_bioset);
316
317static void bio_fs_destructor(struct bio *bio)
318{
319 bio_free(bio, fs_bio_set);
320}
321
322/**
323 * bio_alloc - allocate a new bio, memory pool backed
324 * @gfp_mask: allocation mask to use
325 * @nr_iovecs: number of iovecs
326 *
327 * bio_alloc will allocate a bio and associated bio_vec array that can hold
328 * at least @nr_iovecs entries. Allocations will be done from the
329 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
330 *
331 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
332 * a bio. This is due to the mempool guarantees. To make this work, callers
333 * must never allocate more than 1 bio at a time from this pool. Callers
334 * that need to allocate more than 1 bio must always submit the previously
335 * allocated bio for IO before attempting to allocate a new one. Failure to
336 * do so can cause livelocks under memory pressure.
337 *
338 * RETURNS:
339 * Pointer to new bio on success, NULL on failure.
340 */
341struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
342{
343 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
344
345 if (bio)
346 bio->bi_destructor = bio_fs_destructor;
347
348 return bio;
349}
350EXPORT_SYMBOL(bio_alloc);
351
352static void bio_kmalloc_destructor(struct bio *bio)
353{
354 if (bio_integrity(bio))
355 bio_integrity_free(bio, fs_bio_set);
356 kfree(bio);
357}
358
359/**
360 * bio_kmalloc - allocate a bio for I/O using kmalloc()
361 * @gfp_mask: the GFP_ mask given to the slab allocator
362 * @nr_iovecs: number of iovecs to pre-allocate
363 *
364 * Description:
365 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
366 * %__GFP_WAIT, the allocation is guaranteed to succeed.
367 *
368 **/
369struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
370{
371 struct bio *bio;
372
373 if (nr_iovecs > UIO_MAXIOV)
374 return NULL;
375
376 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
377 gfp_mask);
378 if (unlikely(!bio))
379 return NULL;
380
381 bio_init(bio);
382 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
383 bio->bi_max_vecs = nr_iovecs;
384 bio->bi_io_vec = bio->bi_inline_vecs;
385 bio->bi_destructor = bio_kmalloc_destructor;
386
387 return bio;
388}
389EXPORT_SYMBOL(bio_kmalloc);
390
391void zero_fill_bio(struct bio *bio)
392{
393 unsigned long flags;
394 struct bio_vec *bv;
395 int i;
396
397 bio_for_each_segment(bv, bio, i) {
398 char *data = bvec_kmap_irq(bv, &flags);
399 memset(data, 0, bv->bv_len);
400 flush_dcache_page(bv->bv_page);
401 bvec_kunmap_irq(data, &flags);
402 }
403}
404EXPORT_SYMBOL(zero_fill_bio);
405
406/**
407 * bio_put - release a reference to a bio
408 * @bio: bio to release reference to
409 *
410 * Description:
411 * Put a reference to a &struct bio, either one you have gotten with
412 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
413 **/
414void bio_put(struct bio *bio)
415{
416 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
417
418 /*
419 * last put frees it
420 */
421 if (atomic_dec_and_test(&bio->bi_cnt)) {
422 bio->bi_next = NULL;
423 bio->bi_destructor(bio);
424 }
425}
426EXPORT_SYMBOL(bio_put);
427
428inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
429{
430 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
431 blk_recount_segments(q, bio);
432
433 return bio->bi_phys_segments;
434}
435EXPORT_SYMBOL(bio_phys_segments);
436
437/**
438 * __bio_clone - clone a bio
439 * @bio: destination bio
440 * @bio_src: bio to clone
441 *
442 * Clone a &bio. Caller will own the returned bio, but not
443 * the actual data it points to. Reference count of returned
444 * bio will be one.
445 */
446void __bio_clone(struct bio *bio, struct bio *bio_src)
447{
448 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
449 bio_src->bi_max_vecs * sizeof(struct bio_vec));
450
451 /*
452 * most users will be overriding ->bi_bdev with a new target,
453 * so we don't set nor calculate new physical/hw segment counts here
454 */
455 bio->bi_sector = bio_src->bi_sector;
456 bio->bi_bdev = bio_src->bi_bdev;
457 bio->bi_flags |= 1 << BIO_CLONED;
458 bio->bi_rw = bio_src->bi_rw;
459 bio->bi_vcnt = bio_src->bi_vcnt;
460 bio->bi_size = bio_src->bi_size;
461 bio->bi_idx = bio_src->bi_idx;
462}
463EXPORT_SYMBOL(__bio_clone);
464
465/**
466 * bio_clone - clone a bio
467 * @bio: bio to clone
468 * @gfp_mask: allocation priority
469 *
470 * Like __bio_clone, only also allocates the returned bio
471 */
472struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
473{
474 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
475
476 if (!b)
477 return NULL;
478
479 b->bi_destructor = bio_fs_destructor;
480 __bio_clone(b, bio);
481
482 if (bio_integrity(bio)) {
483 int ret;
484
485 ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
486
487 if (ret < 0) {
488 bio_put(b);
489 return NULL;
490 }
491 }
492
493 return b;
494}
495EXPORT_SYMBOL(bio_clone);
496
497/**
498 * bio_get_nr_vecs - return approx number of vecs
499 * @bdev: I/O target
500 *
501 * Return the approximate number of pages we can send to this target.
502 * There's no guarantee that you will be able to fit this number of pages
503 * into a bio, it does not account for dynamic restrictions that vary
504 * on offset.
505 */
506int bio_get_nr_vecs(struct block_device *bdev)
507{
508 struct request_queue *q = bdev_get_queue(bdev);
509 int nr_pages;
510
511 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
512 if (nr_pages > queue_max_segments(q))
513 nr_pages = queue_max_segments(q);
514
515 return nr_pages;
516}
517EXPORT_SYMBOL(bio_get_nr_vecs);
518
519static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
520 *page, unsigned int len, unsigned int offset,
521 unsigned short max_sectors)
522{
523 int retried_segments = 0;
524 struct bio_vec *bvec;
525
526 /*
527 * cloned bio must not modify vec list
528 */
529 if (unlikely(bio_flagged(bio, BIO_CLONED)))
530 return 0;
531
532 if (((bio->bi_size + len) >> 9) > max_sectors)
533 return 0;
534
535 /*
536 * For filesystems with a blocksize smaller than the pagesize
537 * we will often be called with the same page as last time and
538 * a consecutive offset. Optimize this special case.
539 */
540 if (bio->bi_vcnt > 0) {
541 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
542
543 if (page == prev->bv_page &&
544 offset == prev->bv_offset + prev->bv_len) {
545 unsigned int prev_bv_len = prev->bv_len;
546 prev->bv_len += len;
547
548 if (q->merge_bvec_fn) {
549 struct bvec_merge_data bvm = {
550 /* prev_bvec is already charged in
551 bi_size, discharge it in order to
552 simulate merging updated prev_bvec
553 as new bvec. */
554 .bi_bdev = bio->bi_bdev,
555 .bi_sector = bio->bi_sector,
556 .bi_size = bio->bi_size - prev_bv_len,
557 .bi_rw = bio->bi_rw,
558 };
559
560 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
561 prev->bv_len -= len;
562 return 0;
563 }
564 }
565
566 goto done;
567 }
568 }
569
570 if (bio->bi_vcnt >= bio->bi_max_vecs)
571 return 0;
572
573 /*
574 * we might lose a segment or two here, but rather that than
575 * make this too complex.
576 */
577
578 while (bio->bi_phys_segments >= queue_max_segments(q)) {
579
580 if (retried_segments)
581 return 0;
582
583 retried_segments = 1;
584 blk_recount_segments(q, bio);
585 }
586
587 /*
588 * setup the new entry, we might clear it again later if we
589 * cannot add the page
590 */
591 bvec = &bio->bi_io_vec[bio->bi_vcnt];
592 bvec->bv_page = page;
593 bvec->bv_len = len;
594 bvec->bv_offset = offset;
595
596 /*
597 * if queue has other restrictions (eg varying max sector size
598 * depending on offset), it can specify a merge_bvec_fn in the
599 * queue to get further control
600 */
601 if (q->merge_bvec_fn) {
602 struct bvec_merge_data bvm = {
603 .bi_bdev = bio->bi_bdev,
604 .bi_sector = bio->bi_sector,
605 .bi_size = bio->bi_size,
606 .bi_rw = bio->bi_rw,
607 };
608
609 /*
610 * merge_bvec_fn() returns number of bytes it can accept
611 * at this offset
612 */
613 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
614 bvec->bv_page = NULL;
615 bvec->bv_len = 0;
616 bvec->bv_offset = 0;
617 return 0;
618 }
619 }
620
621 /* If we may be able to merge these biovecs, force a recount */
622 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
623 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
624
625 bio->bi_vcnt++;
626 bio->bi_phys_segments++;
627 done:
628 bio->bi_size += len;
629 return len;
630}
631
632/**
633 * bio_add_pc_page - attempt to add page to bio
634 * @q: the target queue
635 * @bio: destination bio
636 * @page: page to add
637 * @len: vec entry length
638 * @offset: vec entry offset
639 *
640 * Attempt to add a page to the bio_vec maplist. This can fail for a
641 * number of reasons, such as the bio being full or target block device
642 * limitations. The target block device must allow bio's up to PAGE_SIZE,
643 * so it is always possible to add a single page to an empty bio.
644 *
645 * This should only be used by REQ_PC bios.
646 */
647int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
648 unsigned int len, unsigned int offset)
649{
650 return __bio_add_page(q, bio, page, len, offset,
651 queue_max_hw_sectors(q));
652}
653EXPORT_SYMBOL(bio_add_pc_page);
654
655/**
656 * bio_add_page - attempt to add page to bio
657 * @bio: destination bio
658 * @page: page to add
659 * @len: vec entry length
660 * @offset: vec entry offset
661 *
662 * Attempt to add a page to the bio_vec maplist. This can fail for a
663 * number of reasons, such as the bio being full or target block device
664 * limitations. The target block device must allow bio's up to PAGE_SIZE,
665 * so it is always possible to add a single page to an empty bio.
666 */
667int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
668 unsigned int offset)
669{
670 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
671 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
672}
673EXPORT_SYMBOL(bio_add_page);
674
675struct bio_map_data {
676 struct bio_vec *iovecs;
677 struct sg_iovec *sgvecs;
678 int nr_sgvecs;
679 int is_our_pages;
680};
681
682static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
683 struct sg_iovec *iov, int iov_count,
684 int is_our_pages)
685{
686 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
687 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
688 bmd->nr_sgvecs = iov_count;
689 bmd->is_our_pages = is_our_pages;
690 bio->bi_private = bmd;
691}
692
693static void bio_free_map_data(struct bio_map_data *bmd)
694{
695 kfree(bmd->iovecs);
696 kfree(bmd->sgvecs);
697 kfree(bmd);
698}
699
700static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
701 gfp_t gfp_mask)
702{
703 struct bio_map_data *bmd;
704
705 if (iov_count > UIO_MAXIOV)
706 return NULL;
707
708 bmd = kmalloc(sizeof(*bmd), gfp_mask);
709 if (!bmd)
710 return NULL;
711
712 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
713 if (!bmd->iovecs) {
714 kfree(bmd);
715 return NULL;
716 }
717
718 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
719 if (bmd->sgvecs)
720 return bmd;
721
722 kfree(bmd->iovecs);
723 kfree(bmd);
724 return NULL;
725}
726
727static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
728 struct sg_iovec *iov, int iov_count,
729 int to_user, int from_user, int do_free_page)
730{
731 int ret = 0, i;
732 struct bio_vec *bvec;
733 int iov_idx = 0;
734 unsigned int iov_off = 0;
735
736 __bio_for_each_segment(bvec, bio, i, 0) {
737 char *bv_addr = page_address(bvec->bv_page);
738 unsigned int bv_len = iovecs[i].bv_len;
739
740 while (bv_len && iov_idx < iov_count) {
741 unsigned int bytes;
742 char __user *iov_addr;
743
744 bytes = min_t(unsigned int,
745 iov[iov_idx].iov_len - iov_off, bv_len);
746 iov_addr = iov[iov_idx].iov_base + iov_off;
747
748 if (!ret) {
749 if (to_user)
750 ret = copy_to_user(iov_addr, bv_addr,
751 bytes);
752
753 if (from_user)
754 ret = copy_from_user(bv_addr, iov_addr,
755 bytes);
756
757 if (ret)
758 ret = -EFAULT;
759 }
760
761 bv_len -= bytes;
762 bv_addr += bytes;
763 iov_addr += bytes;
764 iov_off += bytes;
765
766 if (iov[iov_idx].iov_len == iov_off) {
767 iov_idx++;
768 iov_off = 0;
769 }
770 }
771
772 if (do_free_page)
773 __free_page(bvec->bv_page);
774 }
775
776 return ret;
777}
778
779/**
780 * bio_uncopy_user - finish previously mapped bio
781 * @bio: bio being terminated
782 *
783 * Free pages allocated from bio_copy_user() and write back data
784 * to user space in case of a read.
785 */
786int bio_uncopy_user(struct bio *bio)
787{
788 struct bio_map_data *bmd = bio->bi_private;
789 int ret = 0;
790
791 if (!bio_flagged(bio, BIO_NULL_MAPPED))
792 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
793 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
794 0, bmd->is_our_pages);
795 bio_free_map_data(bmd);
796 bio_put(bio);
797 return ret;
798}
799EXPORT_SYMBOL(bio_uncopy_user);
800
801/**
802 * bio_copy_user_iov - copy user data to bio
803 * @q: destination block queue
804 * @map_data: pointer to the rq_map_data holding pages (if necessary)
805 * @iov: the iovec.
806 * @iov_count: number of elements in the iovec
807 * @write_to_vm: bool indicating writing to pages or not
808 * @gfp_mask: memory allocation flags
809 *
810 * Prepares and returns a bio for indirect user io, bouncing data
811 * to/from kernel pages as necessary. Must be paired with
812 * call bio_uncopy_user() on io completion.
813 */
814struct bio *bio_copy_user_iov(struct request_queue *q,
815 struct rq_map_data *map_data,
816 struct sg_iovec *iov, int iov_count,
817 int write_to_vm, gfp_t gfp_mask)
818{
819 struct bio_map_data *bmd;
820 struct bio_vec *bvec;
821 struct page *page;
822 struct bio *bio;
823 int i, ret;
824 int nr_pages = 0;
825 unsigned int len = 0;
826 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
827
828 for (i = 0; i < iov_count; i++) {
829 unsigned long uaddr;
830 unsigned long end;
831 unsigned long start;
832
833 uaddr = (unsigned long)iov[i].iov_base;
834 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
835 start = uaddr >> PAGE_SHIFT;
836
837 /*
838 * Overflow, abort
839 */
840 if (end < start)
841 return ERR_PTR(-EINVAL);
842
843 nr_pages += end - start;
844 len += iov[i].iov_len;
845 }
846
847 if (offset)
848 nr_pages++;
849
850 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
851 if (!bmd)
852 return ERR_PTR(-ENOMEM);
853
854 ret = -ENOMEM;
855 bio = bio_kmalloc(gfp_mask, nr_pages);
856 if (!bio)
857 goto out_bmd;
858
859 if (!write_to_vm)
860 bio->bi_rw |= REQ_WRITE;
861
862 ret = 0;
863
864 if (map_data) {
865 nr_pages = 1 << map_data->page_order;
866 i = map_data->offset / PAGE_SIZE;
867 }
868 while (len) {
869 unsigned int bytes = PAGE_SIZE;
870
871 bytes -= offset;
872
873 if (bytes > len)
874 bytes = len;
875
876 if (map_data) {
877 if (i == map_data->nr_entries * nr_pages) {
878 ret = -ENOMEM;
879 break;
880 }
881
882 page = map_data->pages[i / nr_pages];
883 page += (i % nr_pages);
884
885 i++;
886 } else {
887 page = alloc_page(q->bounce_gfp | gfp_mask);
888 if (!page) {
889 ret = -ENOMEM;
890 break;
891 }
892 }
893
894 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
895 break;
896
897 len -= bytes;
898 offset = 0;
899 }
900
901 if (ret)
902 goto cleanup;
903
904 /*
905 * success
906 */
907 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
908 (map_data && map_data->from_user)) {
909 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
910 if (ret)
911 goto cleanup;
912 }
913
914 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
915 return bio;
916cleanup:
917 if (!map_data)
918 bio_for_each_segment(bvec, bio, i)
919 __free_page(bvec->bv_page);
920
921 bio_put(bio);
922out_bmd:
923 bio_free_map_data(bmd);
924 return ERR_PTR(ret);
925}
926
927/**
928 * bio_copy_user - copy user data to bio
929 * @q: destination block queue
930 * @map_data: pointer to the rq_map_data holding pages (if necessary)
931 * @uaddr: start of user address
932 * @len: length in bytes
933 * @write_to_vm: bool indicating writing to pages or not
934 * @gfp_mask: memory allocation flags
935 *
936 * Prepares and returns a bio for indirect user io, bouncing data
937 * to/from kernel pages as necessary. Must be paired with
938 * call bio_uncopy_user() on io completion.
939 */
940struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
941 unsigned long uaddr, unsigned int len,
942 int write_to_vm, gfp_t gfp_mask)
943{
944 struct sg_iovec iov;
945
946 iov.iov_base = (void __user *)uaddr;
947 iov.iov_len = len;
948
949 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
950}
951EXPORT_SYMBOL(bio_copy_user);
952
953static struct bio *__bio_map_user_iov(struct request_queue *q,
954 struct block_device *bdev,
955 struct sg_iovec *iov, int iov_count,
956 int write_to_vm, gfp_t gfp_mask)
957{
958 int i, j;
959 int nr_pages = 0;
960 struct page **pages;
961 struct bio *bio;
962 int cur_page = 0;
963 int ret, offset;
964
965 for (i = 0; i < iov_count; i++) {
966 unsigned long uaddr = (unsigned long)iov[i].iov_base;
967 unsigned long len = iov[i].iov_len;
968 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
969 unsigned long start = uaddr >> PAGE_SHIFT;
970
971 /*
972 * Overflow, abort
973 */
974 if (end < start)
975 return ERR_PTR(-EINVAL);
976
977 nr_pages += end - start;
978 /*
979 * buffer must be aligned to at least hardsector size for now
980 */
981 if (uaddr & queue_dma_alignment(q))
982 return ERR_PTR(-EINVAL);
983 }
984
985 if (!nr_pages)
986 return ERR_PTR(-EINVAL);
987
988 bio = bio_kmalloc(gfp_mask, nr_pages);
989 if (!bio)
990 return ERR_PTR(-ENOMEM);
991
992 ret = -ENOMEM;
993 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
994 if (!pages)
995 goto out;
996
997 for (i = 0; i < iov_count; i++) {
998 unsigned long uaddr = (unsigned long)iov[i].iov_base;
999 unsigned long len = iov[i].iov_len;
1000 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1001 unsigned long start = uaddr >> PAGE_SHIFT;
1002 const int local_nr_pages = end - start;
1003 const int page_limit = cur_page + local_nr_pages;
1004
1005 ret = get_user_pages_fast(uaddr, local_nr_pages,
1006 write_to_vm, &pages[cur_page]);
1007 if (ret < local_nr_pages) {
1008 ret = -EFAULT;
1009 goto out_unmap;
1010 }
1011
1012 offset = uaddr & ~PAGE_MASK;
1013 for (j = cur_page; j < page_limit; j++) {
1014 unsigned int bytes = PAGE_SIZE - offset;
1015
1016 if (len <= 0)
1017 break;
1018
1019 if (bytes > len)
1020 bytes = len;
1021
1022 /*
1023 * sorry...
1024 */
1025 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1026 bytes)
1027 break;
1028
1029 len -= bytes;
1030 offset = 0;
1031 }
1032
1033 cur_page = j;
1034 /*
1035 * release the pages we didn't map into the bio, if any
1036 */
1037 while (j < page_limit)
1038 page_cache_release(pages[j++]);
1039 }
1040
1041 kfree(pages);
1042
1043 /*
1044 * set data direction, and check if mapped pages need bouncing
1045 */
1046 if (!write_to_vm)
1047 bio->bi_rw |= REQ_WRITE;
1048
1049 bio->bi_bdev = bdev;
1050 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1051 return bio;
1052
1053 out_unmap:
1054 for (i = 0; i < nr_pages; i++) {
1055 if(!pages[i])
1056 break;
1057 page_cache_release(pages[i]);
1058 }
1059 out:
1060 kfree(pages);
1061 bio_put(bio);
1062 return ERR_PTR(ret);
1063}
1064
1065/**
1066 * bio_map_user - map user address into bio
1067 * @q: the struct request_queue for the bio
1068 * @bdev: destination block device
1069 * @uaddr: start of user address
1070 * @len: length in bytes
1071 * @write_to_vm: bool indicating writing to pages or not
1072 * @gfp_mask: memory allocation flags
1073 *
1074 * Map the user space address into a bio suitable for io to a block
1075 * device. Returns an error pointer in case of error.
1076 */
1077struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1078 unsigned long uaddr, unsigned int len, int write_to_vm,
1079 gfp_t gfp_mask)
1080{
1081 struct sg_iovec iov;
1082
1083 iov.iov_base = (void __user *)uaddr;
1084 iov.iov_len = len;
1085
1086 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1087}
1088EXPORT_SYMBOL(bio_map_user);
1089
1090/**
1091 * bio_map_user_iov - map user sg_iovec table into bio
1092 * @q: the struct request_queue for the bio
1093 * @bdev: destination block device
1094 * @iov: the iovec.
1095 * @iov_count: number of elements in the iovec
1096 * @write_to_vm: bool indicating writing to pages or not
1097 * @gfp_mask: memory allocation flags
1098 *
1099 * Map the user space address into a bio suitable for io to a block
1100 * device. Returns an error pointer in case of error.
1101 */
1102struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1103 struct sg_iovec *iov, int iov_count,
1104 int write_to_vm, gfp_t gfp_mask)
1105{
1106 struct bio *bio;
1107
1108 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1109 gfp_mask);
1110 if (IS_ERR(bio))
1111 return bio;
1112
1113 /*
1114 * subtle -- if __bio_map_user() ended up bouncing a bio,
1115 * it would normally disappear when its bi_end_io is run.
1116 * however, we need it for the unmap, so grab an extra
1117 * reference to it
1118 */
1119 bio_get(bio);
1120
1121 return bio;
1122}
1123
1124static void __bio_unmap_user(struct bio *bio)
1125{
1126 struct bio_vec *bvec;
1127 int i;
1128
1129 /*
1130 * make sure we dirty pages we wrote to
1131 */
1132 __bio_for_each_segment(bvec, bio, i, 0) {
1133 if (bio_data_dir(bio) == READ)
1134 set_page_dirty_lock(bvec->bv_page);
1135
1136 page_cache_release(bvec->bv_page);
1137 }
1138
1139 bio_put(bio);
1140}
1141
1142/**
1143 * bio_unmap_user - unmap a bio
1144 * @bio: the bio being unmapped
1145 *
1146 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1147 * a process context.
1148 *
1149 * bio_unmap_user() may sleep.
1150 */
1151void bio_unmap_user(struct bio *bio)
1152{
1153 __bio_unmap_user(bio);
1154 bio_put(bio);
1155}
1156EXPORT_SYMBOL(bio_unmap_user);
1157
1158static void bio_map_kern_endio(struct bio *bio, int err)
1159{
1160 bio_put(bio);
1161}
1162
1163static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1164 unsigned int len, gfp_t gfp_mask)
1165{
1166 unsigned long kaddr = (unsigned long)data;
1167 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1168 unsigned long start = kaddr >> PAGE_SHIFT;
1169 const int nr_pages = end - start;
1170 int offset, i;
1171 struct bio *bio;
1172
1173 bio = bio_kmalloc(gfp_mask, nr_pages);
1174 if (!bio)
1175 return ERR_PTR(-ENOMEM);
1176
1177 offset = offset_in_page(kaddr);
1178 for (i = 0; i < nr_pages; i++) {
1179 unsigned int bytes = PAGE_SIZE - offset;
1180
1181 if (len <= 0)
1182 break;
1183
1184 if (bytes > len)
1185 bytes = len;
1186
1187 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1188 offset) < bytes)
1189 break;
1190
1191 data += bytes;
1192 len -= bytes;
1193 offset = 0;
1194 }
1195
1196 bio->bi_end_io = bio_map_kern_endio;
1197 return bio;
1198}
1199
1200/**
1201 * bio_map_kern - map kernel address into bio
1202 * @q: the struct request_queue for the bio
1203 * @data: pointer to buffer to map
1204 * @len: length in bytes
1205 * @gfp_mask: allocation flags for bio allocation
1206 *
1207 * Map the kernel address into a bio suitable for io to a block
1208 * device. Returns an error pointer in case of error.
1209 */
1210struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1211 gfp_t gfp_mask)
1212{
1213 struct bio *bio;
1214
1215 bio = __bio_map_kern(q, data, len, gfp_mask);
1216 if (IS_ERR(bio))
1217 return bio;
1218
1219 if (bio->bi_size == len)
1220 return bio;
1221
1222 /*
1223 * Don't support partial mappings.
1224 */
1225 bio_put(bio);
1226 return ERR_PTR(-EINVAL);
1227}
1228EXPORT_SYMBOL(bio_map_kern);
1229
1230static void bio_copy_kern_endio(struct bio *bio, int err)
1231{
1232 struct bio_vec *bvec;
1233 const int read = bio_data_dir(bio) == READ;
1234 struct bio_map_data *bmd = bio->bi_private;
1235 int i;
1236 char *p = bmd->sgvecs[0].iov_base;
1237
1238 __bio_for_each_segment(bvec, bio, i, 0) {
1239 char *addr = page_address(bvec->bv_page);
1240 int len = bmd->iovecs[i].bv_len;
1241
1242 if (read)
1243 memcpy(p, addr, len);
1244
1245 __free_page(bvec->bv_page);
1246 p += len;
1247 }
1248
1249 bio_free_map_data(bmd);
1250 bio_put(bio);
1251}
1252
1253/**
1254 * bio_copy_kern - copy kernel address into bio
1255 * @q: the struct request_queue for the bio
1256 * @data: pointer to buffer to copy
1257 * @len: length in bytes
1258 * @gfp_mask: allocation flags for bio and page allocation
1259 * @reading: data direction is READ
1260 *
1261 * copy the kernel address into a bio suitable for io to a block
1262 * device. Returns an error pointer in case of error.
1263 */
1264struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1265 gfp_t gfp_mask, int reading)
1266{
1267 struct bio *bio;
1268 struct bio_vec *bvec;
1269 int i;
1270
1271 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1272 if (IS_ERR(bio))
1273 return bio;
1274
1275 if (!reading) {
1276 void *p = data;
1277
1278 bio_for_each_segment(bvec, bio, i) {
1279 char *addr = page_address(bvec->bv_page);
1280
1281 memcpy(addr, p, bvec->bv_len);
1282 p += bvec->bv_len;
1283 }
1284 }
1285
1286 bio->bi_end_io = bio_copy_kern_endio;
1287
1288 return bio;
1289}
1290EXPORT_SYMBOL(bio_copy_kern);
1291
1292/*
1293 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1294 * for performing direct-IO in BIOs.
1295 *
1296 * The problem is that we cannot run set_page_dirty() from interrupt context
1297 * because the required locks are not interrupt-safe. So what we can do is to
1298 * mark the pages dirty _before_ performing IO. And in interrupt context,
1299 * check that the pages are still dirty. If so, fine. If not, redirty them
1300 * in process context.
1301 *
1302 * We special-case compound pages here: normally this means reads into hugetlb
1303 * pages. The logic in here doesn't really work right for compound pages
1304 * because the VM does not uniformly chase down the head page in all cases.
1305 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1306 * handle them at all. So we skip compound pages here at an early stage.
1307 *
1308 * Note that this code is very hard to test under normal circumstances because
1309 * direct-io pins the pages with get_user_pages(). This makes
1310 * is_page_cache_freeable return false, and the VM will not clean the pages.
1311 * But other code (eg, pdflush) could clean the pages if they are mapped
1312 * pagecache.
1313 *
1314 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1315 * deferred bio dirtying paths.
1316 */
1317
1318/*
1319 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1320 */
1321void bio_set_pages_dirty(struct bio *bio)
1322{
1323 struct bio_vec *bvec = bio->bi_io_vec;
1324 int i;
1325
1326 for (i = 0; i < bio->bi_vcnt; i++) {
1327 struct page *page = bvec[i].bv_page;
1328
1329 if (page && !PageCompound(page))
1330 set_page_dirty_lock(page);
1331 }
1332}
1333
1334static void bio_release_pages(struct bio *bio)
1335{
1336 struct bio_vec *bvec = bio->bi_io_vec;
1337 int i;
1338
1339 for (i = 0; i < bio->bi_vcnt; i++) {
1340 struct page *page = bvec[i].bv_page;
1341
1342 if (page)
1343 put_page(page);
1344 }
1345}
1346
1347/*
1348 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1349 * If they are, then fine. If, however, some pages are clean then they must
1350 * have been written out during the direct-IO read. So we take another ref on
1351 * the BIO and the offending pages and re-dirty the pages in process context.
1352 *
1353 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1354 * here on. It will run one page_cache_release() against each page and will
1355 * run one bio_put() against the BIO.
1356 */
1357
1358static void bio_dirty_fn(struct work_struct *work);
1359
1360static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1361static DEFINE_SPINLOCK(bio_dirty_lock);
1362static struct bio *bio_dirty_list;
1363
1364/*
1365 * This runs in process context
1366 */
1367static void bio_dirty_fn(struct work_struct *work)
1368{
1369 unsigned long flags;
1370 struct bio *bio;
1371
1372 spin_lock_irqsave(&bio_dirty_lock, flags);
1373 bio = bio_dirty_list;
1374 bio_dirty_list = NULL;
1375 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1376
1377 while (bio) {
1378 struct bio *next = bio->bi_private;
1379
1380 bio_set_pages_dirty(bio);
1381 bio_release_pages(bio);
1382 bio_put(bio);
1383 bio = next;
1384 }
1385}
1386
1387void bio_check_pages_dirty(struct bio *bio)
1388{
1389 struct bio_vec *bvec = bio->bi_io_vec;
1390 int nr_clean_pages = 0;
1391 int i;
1392
1393 for (i = 0; i < bio->bi_vcnt; i++) {
1394 struct page *page = bvec[i].bv_page;
1395
1396 if (PageDirty(page) || PageCompound(page)) {
1397 page_cache_release(page);
1398 bvec[i].bv_page = NULL;
1399 } else {
1400 nr_clean_pages++;
1401 }
1402 }
1403
1404 if (nr_clean_pages) {
1405 unsigned long flags;
1406
1407 spin_lock_irqsave(&bio_dirty_lock, flags);
1408 bio->bi_private = bio_dirty_list;
1409 bio_dirty_list = bio;
1410 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1411 schedule_work(&bio_dirty_work);
1412 } else {
1413 bio_put(bio);
1414 }
1415}
1416
1417#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1418void bio_flush_dcache_pages(struct bio *bi)
1419{
1420 int i;
1421 struct bio_vec *bvec;
1422
1423 bio_for_each_segment(bvec, bi, i)
1424 flush_dcache_page(bvec->bv_page);
1425}
1426EXPORT_SYMBOL(bio_flush_dcache_pages);
1427#endif
1428
1429/**
1430 * bio_endio - end I/O on a bio
1431 * @bio: bio
1432 * @error: error, if any
1433 *
1434 * Description:
1435 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1436 * preferred way to end I/O on a bio, it takes care of clearing
1437 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1438 * established -Exxxx (-EIO, for instance) error values in case
1439 * something went wrong. No one should call bi_end_io() directly on a
1440 * bio unless they own it and thus know that it has an end_io
1441 * function.
1442 **/
1443void bio_endio(struct bio *bio, int error)
1444{
1445 if (error)
1446 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1447 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1448 error = -EIO;
1449
1450 if (bio->bi_end_io)
1451 bio->bi_end_io(bio, error);
1452}
1453EXPORT_SYMBOL(bio_endio);
1454
1455void bio_pair_release(struct bio_pair *bp)
1456{
1457 if (atomic_dec_and_test(&bp->cnt)) {
1458 struct bio *master = bp->bio1.bi_private;
1459
1460 bio_endio(master, bp->error);
1461 mempool_free(bp, bp->bio2.bi_private);
1462 }
1463}
1464EXPORT_SYMBOL(bio_pair_release);
1465
1466static void bio_pair_end_1(struct bio *bi, int err)
1467{
1468 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1469
1470 if (err)
1471 bp->error = err;
1472
1473 bio_pair_release(bp);
1474}
1475
1476static void bio_pair_end_2(struct bio *bi, int err)
1477{
1478 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1479
1480 if (err)
1481 bp->error = err;
1482
1483 bio_pair_release(bp);
1484}
1485
1486/*
1487 * split a bio - only worry about a bio with a single page in its iovec
1488 */
1489struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1490{
1491 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1492
1493 if (!bp)
1494 return bp;
1495
1496 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1497 bi->bi_sector + first_sectors);
1498
1499 BUG_ON(bi->bi_vcnt != 1);
1500 BUG_ON(bi->bi_idx != 0);
1501 atomic_set(&bp->cnt, 3);
1502 bp->error = 0;
1503 bp->bio1 = *bi;
1504 bp->bio2 = *bi;
1505 bp->bio2.bi_sector += first_sectors;
1506 bp->bio2.bi_size -= first_sectors << 9;
1507 bp->bio1.bi_size = first_sectors << 9;
1508
1509 bp->bv1 = bi->bi_io_vec[0];
1510 bp->bv2 = bi->bi_io_vec[0];
1511 bp->bv2.bv_offset += first_sectors << 9;
1512 bp->bv2.bv_len -= first_sectors << 9;
1513 bp->bv1.bv_len = first_sectors << 9;
1514
1515 bp->bio1.bi_io_vec = &bp->bv1;
1516 bp->bio2.bi_io_vec = &bp->bv2;
1517
1518 bp->bio1.bi_max_vecs = 1;
1519 bp->bio2.bi_max_vecs = 1;
1520
1521 bp->bio1.bi_end_io = bio_pair_end_1;
1522 bp->bio2.bi_end_io = bio_pair_end_2;
1523
1524 bp->bio1.bi_private = bi;
1525 bp->bio2.bi_private = bio_split_pool;
1526
1527 if (bio_integrity(bi))
1528 bio_integrity_split(bi, bp, first_sectors);
1529
1530 return bp;
1531}
1532EXPORT_SYMBOL(bio_split);
1533
1534/**
1535 * bio_sector_offset - Find hardware sector offset in bio
1536 * @bio: bio to inspect
1537 * @index: bio_vec index
1538 * @offset: offset in bv_page
1539 *
1540 * Return the number of hardware sectors between beginning of bio
1541 * and an end point indicated by a bio_vec index and an offset
1542 * within that vector's page.
1543 */
1544sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1545 unsigned int offset)
1546{
1547 unsigned int sector_sz;
1548 struct bio_vec *bv;
1549 sector_t sectors;
1550 int i;
1551
1552 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1553 sectors = 0;
1554
1555 if (index >= bio->bi_idx)
1556 index = bio->bi_vcnt - 1;
1557
1558 __bio_for_each_segment(bv, bio, i, 0) {
1559 if (i == index) {
1560 if (offset > bv->bv_offset)
1561 sectors += (offset - bv->bv_offset) / sector_sz;
1562 break;
1563 }
1564
1565 sectors += bv->bv_len / sector_sz;
1566 }
1567
1568 return sectors;
1569}
1570EXPORT_SYMBOL(bio_sector_offset);
1571
1572/*
1573 * create memory pools for biovec's in a bio_set.
1574 * use the global biovec slabs created for general use.
1575 */
1576static int biovec_create_pools(struct bio_set *bs, int pool_entries)
1577{
1578 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1579
1580 bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
1581 if (!bs->bvec_pool)
1582 return -ENOMEM;
1583
1584 return 0;
1585}
1586
1587static void biovec_free_pools(struct bio_set *bs)
1588{
1589 mempool_destroy(bs->bvec_pool);
1590}
1591
1592void bioset_free(struct bio_set *bs)
1593{
1594 if (bs->bio_pool)
1595 mempool_destroy(bs->bio_pool);
1596
1597 bioset_integrity_free(bs);
1598 biovec_free_pools(bs);
1599 bio_put_slab(bs);
1600
1601 kfree(bs);
1602}
1603EXPORT_SYMBOL(bioset_free);
1604
1605/**
1606 * bioset_create - Create a bio_set
1607 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1608 * @front_pad: Number of bytes to allocate in front of the returned bio
1609 *
1610 * Description:
1611 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1612 * to ask for a number of bytes to be allocated in front of the bio.
1613 * Front pad allocation is useful for embedding the bio inside
1614 * another structure, to avoid allocating extra data to go with the bio.
1615 * Note that the bio must be embedded at the END of that structure always,
1616 * or things will break badly.
1617 */
1618struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1619{
1620 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1621 struct bio_set *bs;
1622
1623 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1624 if (!bs)
1625 return NULL;
1626
1627 bs->front_pad = front_pad;
1628
1629 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1630 if (!bs->bio_slab) {
1631 kfree(bs);
1632 return NULL;
1633 }
1634
1635 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1636 if (!bs->bio_pool)
1637 goto bad;
1638
1639 if (!biovec_create_pools(bs, pool_size))
1640 return bs;
1641
1642bad:
1643 bioset_free(bs);
1644 return NULL;
1645}
1646EXPORT_SYMBOL(bioset_create);
1647
1648static void __init biovec_init_slabs(void)
1649{
1650 int i;
1651
1652 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1653 int size;
1654 struct biovec_slab *bvs = bvec_slabs + i;
1655
1656 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1657 bvs->slab = NULL;
1658 continue;
1659 }
1660
1661 size = bvs->nr_vecs * sizeof(struct bio_vec);
1662 bvs->slab = kmem_cache_create(bvs->name, size, 0,
1663 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1664 }
1665}
1666
1667static int __init init_bio(void)
1668{
1669 bio_slab_max = 2;
1670 bio_slab_nr = 0;
1671 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
1672 if (!bio_slabs)
1673 panic("bio: can't allocate bios\n");
1674
1675 bio_integrity_init();
1676 biovec_init_slabs();
1677
1678 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
1679 if (!fs_bio_set)
1680 panic("bio: can't allocate bios\n");
1681
1682 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
1683 panic("bio: can't create integrity pool\n");
1684
1685 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1686 sizeof(struct bio_pair));
1687 if (!bio_split_pool)
1688 panic("bio: can't create split pool\n");
1689
1690 return 0;
1691}
1692subsys_initcall(init_bio);
1/*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bio.h>
21#include <linux/blkdev.h>
22#include <linux/iocontext.h>
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/export.h>
27#include <linux/mempool.h>
28#include <linux/workqueue.h>
29#include <linux/cgroup.h>
30#include <scsi/sg.h> /* for struct sg_iovec */
31
32#include <trace/events/block.h>
33
34/*
35 * Test patch to inline a certain number of bi_io_vec's inside the bio
36 * itself, to shrink a bio data allocation from two mempool calls to one
37 */
38#define BIO_INLINE_VECS 4
39
40static mempool_t *bio_split_pool __read_mostly;
41
42/*
43 * if you change this list, also change bvec_alloc or things will
44 * break badly! cannot be bigger than what you can fit into an
45 * unsigned short
46 */
47#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
48static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
49 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
50};
51#undef BV
52
53/*
54 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
55 * IO code that does not need private memory pools.
56 */
57struct bio_set *fs_bio_set;
58
59/*
60 * Our slab pool management
61 */
62struct bio_slab {
63 struct kmem_cache *slab;
64 unsigned int slab_ref;
65 unsigned int slab_size;
66 char name[8];
67};
68static DEFINE_MUTEX(bio_slab_lock);
69static struct bio_slab *bio_slabs;
70static unsigned int bio_slab_nr, bio_slab_max;
71
72static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
73{
74 unsigned int sz = sizeof(struct bio) + extra_size;
75 struct kmem_cache *slab = NULL;
76 struct bio_slab *bslab;
77 unsigned int i, entry = -1;
78
79 mutex_lock(&bio_slab_lock);
80
81 i = 0;
82 while (i < bio_slab_nr) {
83 bslab = &bio_slabs[i];
84
85 if (!bslab->slab && entry == -1)
86 entry = i;
87 else if (bslab->slab_size == sz) {
88 slab = bslab->slab;
89 bslab->slab_ref++;
90 break;
91 }
92 i++;
93 }
94
95 if (slab)
96 goto out_unlock;
97
98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 bio_slab_max <<= 1;
100 bio_slabs = krealloc(bio_slabs,
101 bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL);
103 if (!bio_slabs)
104 goto out_unlock;
105 }
106 if (entry == -1)
107 entry = bio_slab_nr++;
108
109 bslab = &bio_slabs[entry];
110
111 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
112 slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
113 if (!slab)
114 goto out_unlock;
115
116 printk(KERN_INFO "bio: create slab <%s> at %d\n", bslab->name, entry);
117 bslab->slab = slab;
118 bslab->slab_ref = 1;
119 bslab->slab_size = sz;
120out_unlock:
121 mutex_unlock(&bio_slab_lock);
122 return slab;
123}
124
125static void bio_put_slab(struct bio_set *bs)
126{
127 struct bio_slab *bslab = NULL;
128 unsigned int i;
129
130 mutex_lock(&bio_slab_lock);
131
132 for (i = 0; i < bio_slab_nr; i++) {
133 if (bs->bio_slab == bio_slabs[i].slab) {
134 bslab = &bio_slabs[i];
135 break;
136 }
137 }
138
139 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
140 goto out;
141
142 WARN_ON(!bslab->slab_ref);
143
144 if (--bslab->slab_ref)
145 goto out;
146
147 kmem_cache_destroy(bslab->slab);
148 bslab->slab = NULL;
149
150out:
151 mutex_unlock(&bio_slab_lock);
152}
153
154unsigned int bvec_nr_vecs(unsigned short idx)
155{
156 return bvec_slabs[idx].nr_vecs;
157}
158
159void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
160{
161 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
162
163 if (idx == BIOVEC_MAX_IDX)
164 mempool_free(bv, bs->bvec_pool);
165 else {
166 struct biovec_slab *bvs = bvec_slabs + idx;
167
168 kmem_cache_free(bvs->slab, bv);
169 }
170}
171
172struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
173 struct bio_set *bs)
174{
175 struct bio_vec *bvl;
176
177 /*
178 * see comment near bvec_array define!
179 */
180 switch (nr) {
181 case 1:
182 *idx = 0;
183 break;
184 case 2 ... 4:
185 *idx = 1;
186 break;
187 case 5 ... 16:
188 *idx = 2;
189 break;
190 case 17 ... 64:
191 *idx = 3;
192 break;
193 case 65 ... 128:
194 *idx = 4;
195 break;
196 case 129 ... BIO_MAX_PAGES:
197 *idx = 5;
198 break;
199 default:
200 return NULL;
201 }
202
203 /*
204 * idx now points to the pool we want to allocate from. only the
205 * 1-vec entry pool is mempool backed.
206 */
207 if (*idx == BIOVEC_MAX_IDX) {
208fallback:
209 bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
210 } else {
211 struct biovec_slab *bvs = bvec_slabs + *idx;
212 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
213
214 /*
215 * Make this allocation restricted and don't dump info on
216 * allocation failures, since we'll fallback to the mempool
217 * in case of failure.
218 */
219 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
220
221 /*
222 * Try a slab allocation. If this fails and __GFP_WAIT
223 * is set, retry with the 1-entry mempool
224 */
225 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
226 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
227 *idx = BIOVEC_MAX_IDX;
228 goto fallback;
229 }
230 }
231
232 return bvl;
233}
234
235void bio_free(struct bio *bio, struct bio_set *bs)
236{
237 void *p;
238
239 if (bio_has_allocated_vec(bio))
240 bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
241
242 if (bio_integrity(bio))
243 bio_integrity_free(bio, bs);
244
245 /*
246 * If we have front padding, adjust the bio pointer before freeing
247 */
248 p = bio;
249 if (bs->front_pad)
250 p -= bs->front_pad;
251
252 mempool_free(p, bs->bio_pool);
253}
254EXPORT_SYMBOL(bio_free);
255
256void bio_init(struct bio *bio)
257{
258 memset(bio, 0, sizeof(*bio));
259 bio->bi_flags = 1 << BIO_UPTODATE;
260 atomic_set(&bio->bi_cnt, 1);
261}
262EXPORT_SYMBOL(bio_init);
263
264/**
265 * bio_alloc_bioset - allocate a bio for I/O
266 * @gfp_mask: the GFP_ mask given to the slab allocator
267 * @nr_iovecs: number of iovecs to pre-allocate
268 * @bs: the bio_set to allocate from.
269 *
270 * Description:
271 * bio_alloc_bioset will try its own mempool to satisfy the allocation.
272 * If %__GFP_WAIT is set then we will block on the internal pool waiting
273 * for a &struct bio to become free.
274 *
275 * Note that the caller must set ->bi_destructor on successful return
276 * of a bio, to do the appropriate freeing of the bio once the reference
277 * count drops to zero.
278 **/
279struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
280{
281 unsigned long idx = BIO_POOL_NONE;
282 struct bio_vec *bvl = NULL;
283 struct bio *bio;
284 void *p;
285
286 p = mempool_alloc(bs->bio_pool, gfp_mask);
287 if (unlikely(!p))
288 return NULL;
289 bio = p + bs->front_pad;
290
291 bio_init(bio);
292
293 if (unlikely(!nr_iovecs))
294 goto out_set;
295
296 if (nr_iovecs <= BIO_INLINE_VECS) {
297 bvl = bio->bi_inline_vecs;
298 nr_iovecs = BIO_INLINE_VECS;
299 } else {
300 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
301 if (unlikely(!bvl))
302 goto err_free;
303
304 nr_iovecs = bvec_nr_vecs(idx);
305 }
306out_set:
307 bio->bi_flags |= idx << BIO_POOL_OFFSET;
308 bio->bi_max_vecs = nr_iovecs;
309 bio->bi_io_vec = bvl;
310 return bio;
311
312err_free:
313 mempool_free(p, bs->bio_pool);
314 return NULL;
315}
316EXPORT_SYMBOL(bio_alloc_bioset);
317
318static void bio_fs_destructor(struct bio *bio)
319{
320 bio_free(bio, fs_bio_set);
321}
322
323/**
324 * bio_alloc - allocate a new bio, memory pool backed
325 * @gfp_mask: allocation mask to use
326 * @nr_iovecs: number of iovecs
327 *
328 * bio_alloc will allocate a bio and associated bio_vec array that can hold
329 * at least @nr_iovecs entries. Allocations will be done from the
330 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
331 *
332 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
333 * a bio. This is due to the mempool guarantees. To make this work, callers
334 * must never allocate more than 1 bio at a time from this pool. Callers
335 * that need to allocate more than 1 bio must always submit the previously
336 * allocated bio for IO before attempting to allocate a new one. Failure to
337 * do so can cause livelocks under memory pressure.
338 *
339 * RETURNS:
340 * Pointer to new bio on success, NULL on failure.
341 */
342struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
343{
344 struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
345
346 if (bio)
347 bio->bi_destructor = bio_fs_destructor;
348
349 return bio;
350}
351EXPORT_SYMBOL(bio_alloc);
352
353static void bio_kmalloc_destructor(struct bio *bio)
354{
355 if (bio_integrity(bio))
356 bio_integrity_free(bio, fs_bio_set);
357 kfree(bio);
358}
359
360/**
361 * bio_kmalloc - allocate a bio for I/O using kmalloc()
362 * @gfp_mask: the GFP_ mask given to the slab allocator
363 * @nr_iovecs: number of iovecs to pre-allocate
364 *
365 * Description:
366 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
367 * %__GFP_WAIT, the allocation is guaranteed to succeed.
368 *
369 **/
370struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
371{
372 struct bio *bio;
373
374 if (nr_iovecs > UIO_MAXIOV)
375 return NULL;
376
377 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
378 gfp_mask);
379 if (unlikely(!bio))
380 return NULL;
381
382 bio_init(bio);
383 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
384 bio->bi_max_vecs = nr_iovecs;
385 bio->bi_io_vec = bio->bi_inline_vecs;
386 bio->bi_destructor = bio_kmalloc_destructor;
387
388 return bio;
389}
390EXPORT_SYMBOL(bio_kmalloc);
391
392void zero_fill_bio(struct bio *bio)
393{
394 unsigned long flags;
395 struct bio_vec *bv;
396 int i;
397
398 bio_for_each_segment(bv, bio, i) {
399 char *data = bvec_kmap_irq(bv, &flags);
400 memset(data, 0, bv->bv_len);
401 flush_dcache_page(bv->bv_page);
402 bvec_kunmap_irq(data, &flags);
403 }
404}
405EXPORT_SYMBOL(zero_fill_bio);
406
407/**
408 * bio_put - release a reference to a bio
409 * @bio: bio to release reference to
410 *
411 * Description:
412 * Put a reference to a &struct bio, either one you have gotten with
413 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
414 **/
415void bio_put(struct bio *bio)
416{
417 BIO_BUG_ON(!atomic_read(&bio->bi_cnt));
418
419 /*
420 * last put frees it
421 */
422 if (atomic_dec_and_test(&bio->bi_cnt)) {
423 bio_disassociate_task(bio);
424 bio->bi_next = NULL;
425 bio->bi_destructor(bio);
426 }
427}
428EXPORT_SYMBOL(bio_put);
429
430inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
431{
432 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
433 blk_recount_segments(q, bio);
434
435 return bio->bi_phys_segments;
436}
437EXPORT_SYMBOL(bio_phys_segments);
438
439/**
440 * __bio_clone - clone a bio
441 * @bio: destination bio
442 * @bio_src: bio to clone
443 *
444 * Clone a &bio. Caller will own the returned bio, but not
445 * the actual data it points to. Reference count of returned
446 * bio will be one.
447 */
448void __bio_clone(struct bio *bio, struct bio *bio_src)
449{
450 memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
451 bio_src->bi_max_vecs * sizeof(struct bio_vec));
452
453 /*
454 * most users will be overriding ->bi_bdev with a new target,
455 * so we don't set nor calculate new physical/hw segment counts here
456 */
457 bio->bi_sector = bio_src->bi_sector;
458 bio->bi_bdev = bio_src->bi_bdev;
459 bio->bi_flags |= 1 << BIO_CLONED;
460 bio->bi_rw = bio_src->bi_rw;
461 bio->bi_vcnt = bio_src->bi_vcnt;
462 bio->bi_size = bio_src->bi_size;
463 bio->bi_idx = bio_src->bi_idx;
464}
465EXPORT_SYMBOL(__bio_clone);
466
467/**
468 * bio_clone - clone a bio
469 * @bio: bio to clone
470 * @gfp_mask: allocation priority
471 *
472 * Like __bio_clone, only also allocates the returned bio
473 */
474struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
475{
476 struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
477
478 if (!b)
479 return NULL;
480
481 b->bi_destructor = bio_fs_destructor;
482 __bio_clone(b, bio);
483
484 if (bio_integrity(bio)) {
485 int ret;
486
487 ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
488
489 if (ret < 0) {
490 bio_put(b);
491 return NULL;
492 }
493 }
494
495 return b;
496}
497EXPORT_SYMBOL(bio_clone);
498
499/**
500 * bio_get_nr_vecs - return approx number of vecs
501 * @bdev: I/O target
502 *
503 * Return the approximate number of pages we can send to this target.
504 * There's no guarantee that you will be able to fit this number of pages
505 * into a bio, it does not account for dynamic restrictions that vary
506 * on offset.
507 */
508int bio_get_nr_vecs(struct block_device *bdev)
509{
510 struct request_queue *q = bdev_get_queue(bdev);
511 int nr_pages;
512
513 nr_pages = min_t(unsigned,
514 queue_max_segments(q),
515 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1);
516
517 return min_t(unsigned, nr_pages, BIO_MAX_PAGES);
518
519}
520EXPORT_SYMBOL(bio_get_nr_vecs);
521
522static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
523 *page, unsigned int len, unsigned int offset,
524 unsigned short max_sectors)
525{
526 int retried_segments = 0;
527 struct bio_vec *bvec;
528
529 /*
530 * cloned bio must not modify vec list
531 */
532 if (unlikely(bio_flagged(bio, BIO_CLONED)))
533 return 0;
534
535 if (((bio->bi_size + len) >> 9) > max_sectors)
536 return 0;
537
538 /*
539 * For filesystems with a blocksize smaller than the pagesize
540 * we will often be called with the same page as last time and
541 * a consecutive offset. Optimize this special case.
542 */
543 if (bio->bi_vcnt > 0) {
544 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
545
546 if (page == prev->bv_page &&
547 offset == prev->bv_offset + prev->bv_len) {
548 unsigned int prev_bv_len = prev->bv_len;
549 prev->bv_len += len;
550
551 if (q->merge_bvec_fn) {
552 struct bvec_merge_data bvm = {
553 /* prev_bvec is already charged in
554 bi_size, discharge it in order to
555 simulate merging updated prev_bvec
556 as new bvec. */
557 .bi_bdev = bio->bi_bdev,
558 .bi_sector = bio->bi_sector,
559 .bi_size = bio->bi_size - prev_bv_len,
560 .bi_rw = bio->bi_rw,
561 };
562
563 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
564 prev->bv_len -= len;
565 return 0;
566 }
567 }
568
569 goto done;
570 }
571 }
572
573 if (bio->bi_vcnt >= bio->bi_max_vecs)
574 return 0;
575
576 /*
577 * we might lose a segment or two here, but rather that than
578 * make this too complex.
579 */
580
581 while (bio->bi_phys_segments >= queue_max_segments(q)) {
582
583 if (retried_segments)
584 return 0;
585
586 retried_segments = 1;
587 blk_recount_segments(q, bio);
588 }
589
590 /*
591 * setup the new entry, we might clear it again later if we
592 * cannot add the page
593 */
594 bvec = &bio->bi_io_vec[bio->bi_vcnt];
595 bvec->bv_page = page;
596 bvec->bv_len = len;
597 bvec->bv_offset = offset;
598
599 /*
600 * if queue has other restrictions (eg varying max sector size
601 * depending on offset), it can specify a merge_bvec_fn in the
602 * queue to get further control
603 */
604 if (q->merge_bvec_fn) {
605 struct bvec_merge_data bvm = {
606 .bi_bdev = bio->bi_bdev,
607 .bi_sector = bio->bi_sector,
608 .bi_size = bio->bi_size,
609 .bi_rw = bio->bi_rw,
610 };
611
612 /*
613 * merge_bvec_fn() returns number of bytes it can accept
614 * at this offset
615 */
616 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
617 bvec->bv_page = NULL;
618 bvec->bv_len = 0;
619 bvec->bv_offset = 0;
620 return 0;
621 }
622 }
623
624 /* If we may be able to merge these biovecs, force a recount */
625 if (bio->bi_vcnt && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
626 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
627
628 bio->bi_vcnt++;
629 bio->bi_phys_segments++;
630 done:
631 bio->bi_size += len;
632 return len;
633}
634
635/**
636 * bio_add_pc_page - attempt to add page to bio
637 * @q: the target queue
638 * @bio: destination bio
639 * @page: page to add
640 * @len: vec entry length
641 * @offset: vec entry offset
642 *
643 * Attempt to add a page to the bio_vec maplist. This can fail for a
644 * number of reasons, such as the bio being full or target block device
645 * limitations. The target block device must allow bio's up to PAGE_SIZE,
646 * so it is always possible to add a single page to an empty bio.
647 *
648 * This should only be used by REQ_PC bios.
649 */
650int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
651 unsigned int len, unsigned int offset)
652{
653 return __bio_add_page(q, bio, page, len, offset,
654 queue_max_hw_sectors(q));
655}
656EXPORT_SYMBOL(bio_add_pc_page);
657
658/**
659 * bio_add_page - attempt to add page to bio
660 * @bio: destination bio
661 * @page: page to add
662 * @len: vec entry length
663 * @offset: vec entry offset
664 *
665 * Attempt to add a page to the bio_vec maplist. This can fail for a
666 * number of reasons, such as the bio being full or target block device
667 * limitations. The target block device must allow bio's up to PAGE_SIZE,
668 * so it is always possible to add a single page to an empty bio.
669 */
670int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
671 unsigned int offset)
672{
673 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
674 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
675}
676EXPORT_SYMBOL(bio_add_page);
677
678struct bio_map_data {
679 struct bio_vec *iovecs;
680 struct sg_iovec *sgvecs;
681 int nr_sgvecs;
682 int is_our_pages;
683};
684
685static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
686 struct sg_iovec *iov, int iov_count,
687 int is_our_pages)
688{
689 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
690 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
691 bmd->nr_sgvecs = iov_count;
692 bmd->is_our_pages = is_our_pages;
693 bio->bi_private = bmd;
694}
695
696static void bio_free_map_data(struct bio_map_data *bmd)
697{
698 kfree(bmd->iovecs);
699 kfree(bmd->sgvecs);
700 kfree(bmd);
701}
702
703static struct bio_map_data *bio_alloc_map_data(int nr_segs,
704 unsigned int iov_count,
705 gfp_t gfp_mask)
706{
707 struct bio_map_data *bmd;
708
709 if (iov_count > UIO_MAXIOV)
710 return NULL;
711
712 bmd = kmalloc(sizeof(*bmd), gfp_mask);
713 if (!bmd)
714 return NULL;
715
716 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
717 if (!bmd->iovecs) {
718 kfree(bmd);
719 return NULL;
720 }
721
722 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
723 if (bmd->sgvecs)
724 return bmd;
725
726 kfree(bmd->iovecs);
727 kfree(bmd);
728 return NULL;
729}
730
731static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
732 struct sg_iovec *iov, int iov_count,
733 int to_user, int from_user, int do_free_page)
734{
735 int ret = 0, i;
736 struct bio_vec *bvec;
737 int iov_idx = 0;
738 unsigned int iov_off = 0;
739
740 __bio_for_each_segment(bvec, bio, i, 0) {
741 char *bv_addr = page_address(bvec->bv_page);
742 unsigned int bv_len = iovecs[i].bv_len;
743
744 while (bv_len && iov_idx < iov_count) {
745 unsigned int bytes;
746 char __user *iov_addr;
747
748 bytes = min_t(unsigned int,
749 iov[iov_idx].iov_len - iov_off, bv_len);
750 iov_addr = iov[iov_idx].iov_base + iov_off;
751
752 if (!ret) {
753 if (to_user)
754 ret = copy_to_user(iov_addr, bv_addr,
755 bytes);
756
757 if (from_user)
758 ret = copy_from_user(bv_addr, iov_addr,
759 bytes);
760
761 if (ret)
762 ret = -EFAULT;
763 }
764
765 bv_len -= bytes;
766 bv_addr += bytes;
767 iov_addr += bytes;
768 iov_off += bytes;
769
770 if (iov[iov_idx].iov_len == iov_off) {
771 iov_idx++;
772 iov_off = 0;
773 }
774 }
775
776 if (do_free_page)
777 __free_page(bvec->bv_page);
778 }
779
780 return ret;
781}
782
783/**
784 * bio_uncopy_user - finish previously mapped bio
785 * @bio: bio being terminated
786 *
787 * Free pages allocated from bio_copy_user() and write back data
788 * to user space in case of a read.
789 */
790int bio_uncopy_user(struct bio *bio)
791{
792 struct bio_map_data *bmd = bio->bi_private;
793 int ret = 0;
794
795 if (!bio_flagged(bio, BIO_NULL_MAPPED))
796 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
797 bmd->nr_sgvecs, bio_data_dir(bio) == READ,
798 0, bmd->is_our_pages);
799 bio_free_map_data(bmd);
800 bio_put(bio);
801 return ret;
802}
803EXPORT_SYMBOL(bio_uncopy_user);
804
805/**
806 * bio_copy_user_iov - copy user data to bio
807 * @q: destination block queue
808 * @map_data: pointer to the rq_map_data holding pages (if necessary)
809 * @iov: the iovec.
810 * @iov_count: number of elements in the iovec
811 * @write_to_vm: bool indicating writing to pages or not
812 * @gfp_mask: memory allocation flags
813 *
814 * Prepares and returns a bio for indirect user io, bouncing data
815 * to/from kernel pages as necessary. Must be paired with
816 * call bio_uncopy_user() on io completion.
817 */
818struct bio *bio_copy_user_iov(struct request_queue *q,
819 struct rq_map_data *map_data,
820 struct sg_iovec *iov, int iov_count,
821 int write_to_vm, gfp_t gfp_mask)
822{
823 struct bio_map_data *bmd;
824 struct bio_vec *bvec;
825 struct page *page;
826 struct bio *bio;
827 int i, ret;
828 int nr_pages = 0;
829 unsigned int len = 0;
830 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0;
831
832 for (i = 0; i < iov_count; i++) {
833 unsigned long uaddr;
834 unsigned long end;
835 unsigned long start;
836
837 uaddr = (unsigned long)iov[i].iov_base;
838 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
839 start = uaddr >> PAGE_SHIFT;
840
841 /*
842 * Overflow, abort
843 */
844 if (end < start)
845 return ERR_PTR(-EINVAL);
846
847 nr_pages += end - start;
848 len += iov[i].iov_len;
849 }
850
851 if (offset)
852 nr_pages++;
853
854 bmd = bio_alloc_map_data(nr_pages, iov_count, gfp_mask);
855 if (!bmd)
856 return ERR_PTR(-ENOMEM);
857
858 ret = -ENOMEM;
859 bio = bio_kmalloc(gfp_mask, nr_pages);
860 if (!bio)
861 goto out_bmd;
862
863 if (!write_to_vm)
864 bio->bi_rw |= REQ_WRITE;
865
866 ret = 0;
867
868 if (map_data) {
869 nr_pages = 1 << map_data->page_order;
870 i = map_data->offset / PAGE_SIZE;
871 }
872 while (len) {
873 unsigned int bytes = PAGE_SIZE;
874
875 bytes -= offset;
876
877 if (bytes > len)
878 bytes = len;
879
880 if (map_data) {
881 if (i == map_data->nr_entries * nr_pages) {
882 ret = -ENOMEM;
883 break;
884 }
885
886 page = map_data->pages[i / nr_pages];
887 page += (i % nr_pages);
888
889 i++;
890 } else {
891 page = alloc_page(q->bounce_gfp | gfp_mask);
892 if (!page) {
893 ret = -ENOMEM;
894 break;
895 }
896 }
897
898 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
899 break;
900
901 len -= bytes;
902 offset = 0;
903 }
904
905 if (ret)
906 goto cleanup;
907
908 /*
909 * success
910 */
911 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
912 (map_data && map_data->from_user)) {
913 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0);
914 if (ret)
915 goto cleanup;
916 }
917
918 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1);
919 return bio;
920cleanup:
921 if (!map_data)
922 bio_for_each_segment(bvec, bio, i)
923 __free_page(bvec->bv_page);
924
925 bio_put(bio);
926out_bmd:
927 bio_free_map_data(bmd);
928 return ERR_PTR(ret);
929}
930
931/**
932 * bio_copy_user - copy user data to bio
933 * @q: destination block queue
934 * @map_data: pointer to the rq_map_data holding pages (if necessary)
935 * @uaddr: start of user address
936 * @len: length in bytes
937 * @write_to_vm: bool indicating writing to pages or not
938 * @gfp_mask: memory allocation flags
939 *
940 * Prepares and returns a bio for indirect user io, bouncing data
941 * to/from kernel pages as necessary. Must be paired with
942 * call bio_uncopy_user() on io completion.
943 */
944struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data,
945 unsigned long uaddr, unsigned int len,
946 int write_to_vm, gfp_t gfp_mask)
947{
948 struct sg_iovec iov;
949
950 iov.iov_base = (void __user *)uaddr;
951 iov.iov_len = len;
952
953 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask);
954}
955EXPORT_SYMBOL(bio_copy_user);
956
957static struct bio *__bio_map_user_iov(struct request_queue *q,
958 struct block_device *bdev,
959 struct sg_iovec *iov, int iov_count,
960 int write_to_vm, gfp_t gfp_mask)
961{
962 int i, j;
963 int nr_pages = 0;
964 struct page **pages;
965 struct bio *bio;
966 int cur_page = 0;
967 int ret, offset;
968
969 for (i = 0; i < iov_count; i++) {
970 unsigned long uaddr = (unsigned long)iov[i].iov_base;
971 unsigned long len = iov[i].iov_len;
972 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
973 unsigned long start = uaddr >> PAGE_SHIFT;
974
975 /*
976 * Overflow, abort
977 */
978 if (end < start)
979 return ERR_PTR(-EINVAL);
980
981 nr_pages += end - start;
982 /*
983 * buffer must be aligned to at least hardsector size for now
984 */
985 if (uaddr & queue_dma_alignment(q))
986 return ERR_PTR(-EINVAL);
987 }
988
989 if (!nr_pages)
990 return ERR_PTR(-EINVAL);
991
992 bio = bio_kmalloc(gfp_mask, nr_pages);
993 if (!bio)
994 return ERR_PTR(-ENOMEM);
995
996 ret = -ENOMEM;
997 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
998 if (!pages)
999 goto out;
1000
1001 for (i = 0; i < iov_count; i++) {
1002 unsigned long uaddr = (unsigned long)iov[i].iov_base;
1003 unsigned long len = iov[i].iov_len;
1004 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1005 unsigned long start = uaddr >> PAGE_SHIFT;
1006 const int local_nr_pages = end - start;
1007 const int page_limit = cur_page + local_nr_pages;
1008
1009 ret = get_user_pages_fast(uaddr, local_nr_pages,
1010 write_to_vm, &pages[cur_page]);
1011 if (ret < local_nr_pages) {
1012 ret = -EFAULT;
1013 goto out_unmap;
1014 }
1015
1016 offset = uaddr & ~PAGE_MASK;
1017 for (j = cur_page; j < page_limit; j++) {
1018 unsigned int bytes = PAGE_SIZE - offset;
1019
1020 if (len <= 0)
1021 break;
1022
1023 if (bytes > len)
1024 bytes = len;
1025
1026 /*
1027 * sorry...
1028 */
1029 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1030 bytes)
1031 break;
1032
1033 len -= bytes;
1034 offset = 0;
1035 }
1036
1037 cur_page = j;
1038 /*
1039 * release the pages we didn't map into the bio, if any
1040 */
1041 while (j < page_limit)
1042 page_cache_release(pages[j++]);
1043 }
1044
1045 kfree(pages);
1046
1047 /*
1048 * set data direction, and check if mapped pages need bouncing
1049 */
1050 if (!write_to_vm)
1051 bio->bi_rw |= REQ_WRITE;
1052
1053 bio->bi_bdev = bdev;
1054 bio->bi_flags |= (1 << BIO_USER_MAPPED);
1055 return bio;
1056
1057 out_unmap:
1058 for (i = 0; i < nr_pages; i++) {
1059 if(!pages[i])
1060 break;
1061 page_cache_release(pages[i]);
1062 }
1063 out:
1064 kfree(pages);
1065 bio_put(bio);
1066 return ERR_PTR(ret);
1067}
1068
1069/**
1070 * bio_map_user - map user address into bio
1071 * @q: the struct request_queue for the bio
1072 * @bdev: destination block device
1073 * @uaddr: start of user address
1074 * @len: length in bytes
1075 * @write_to_vm: bool indicating writing to pages or not
1076 * @gfp_mask: memory allocation flags
1077 *
1078 * Map the user space address into a bio suitable for io to a block
1079 * device. Returns an error pointer in case of error.
1080 */
1081struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev,
1082 unsigned long uaddr, unsigned int len, int write_to_vm,
1083 gfp_t gfp_mask)
1084{
1085 struct sg_iovec iov;
1086
1087 iov.iov_base = (void __user *)uaddr;
1088 iov.iov_len = len;
1089
1090 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask);
1091}
1092EXPORT_SYMBOL(bio_map_user);
1093
1094/**
1095 * bio_map_user_iov - map user sg_iovec table into bio
1096 * @q: the struct request_queue for the bio
1097 * @bdev: destination block device
1098 * @iov: the iovec.
1099 * @iov_count: number of elements in the iovec
1100 * @write_to_vm: bool indicating writing to pages or not
1101 * @gfp_mask: memory allocation flags
1102 *
1103 * Map the user space address into a bio suitable for io to a block
1104 * device. Returns an error pointer in case of error.
1105 */
1106struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
1107 struct sg_iovec *iov, int iov_count,
1108 int write_to_vm, gfp_t gfp_mask)
1109{
1110 struct bio *bio;
1111
1112 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm,
1113 gfp_mask);
1114 if (IS_ERR(bio))
1115 return bio;
1116
1117 /*
1118 * subtle -- if __bio_map_user() ended up bouncing a bio,
1119 * it would normally disappear when its bi_end_io is run.
1120 * however, we need it for the unmap, so grab an extra
1121 * reference to it
1122 */
1123 bio_get(bio);
1124
1125 return bio;
1126}
1127
1128static void __bio_unmap_user(struct bio *bio)
1129{
1130 struct bio_vec *bvec;
1131 int i;
1132
1133 /*
1134 * make sure we dirty pages we wrote to
1135 */
1136 __bio_for_each_segment(bvec, bio, i, 0) {
1137 if (bio_data_dir(bio) == READ)
1138 set_page_dirty_lock(bvec->bv_page);
1139
1140 page_cache_release(bvec->bv_page);
1141 }
1142
1143 bio_put(bio);
1144}
1145
1146/**
1147 * bio_unmap_user - unmap a bio
1148 * @bio: the bio being unmapped
1149 *
1150 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1151 * a process context.
1152 *
1153 * bio_unmap_user() may sleep.
1154 */
1155void bio_unmap_user(struct bio *bio)
1156{
1157 __bio_unmap_user(bio);
1158 bio_put(bio);
1159}
1160EXPORT_SYMBOL(bio_unmap_user);
1161
1162static void bio_map_kern_endio(struct bio *bio, int err)
1163{
1164 bio_put(bio);
1165}
1166
1167static struct bio *__bio_map_kern(struct request_queue *q, void *data,
1168 unsigned int len, gfp_t gfp_mask)
1169{
1170 unsigned long kaddr = (unsigned long)data;
1171 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1172 unsigned long start = kaddr >> PAGE_SHIFT;
1173 const int nr_pages = end - start;
1174 int offset, i;
1175 struct bio *bio;
1176
1177 bio = bio_kmalloc(gfp_mask, nr_pages);
1178 if (!bio)
1179 return ERR_PTR(-ENOMEM);
1180
1181 offset = offset_in_page(kaddr);
1182 for (i = 0; i < nr_pages; i++) {
1183 unsigned int bytes = PAGE_SIZE - offset;
1184
1185 if (len <= 0)
1186 break;
1187
1188 if (bytes > len)
1189 bytes = len;
1190
1191 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1192 offset) < bytes)
1193 break;
1194
1195 data += bytes;
1196 len -= bytes;
1197 offset = 0;
1198 }
1199
1200 bio->bi_end_io = bio_map_kern_endio;
1201 return bio;
1202}
1203
1204/**
1205 * bio_map_kern - map kernel address into bio
1206 * @q: the struct request_queue for the bio
1207 * @data: pointer to buffer to map
1208 * @len: length in bytes
1209 * @gfp_mask: allocation flags for bio allocation
1210 *
1211 * Map the kernel address into a bio suitable for io to a block
1212 * device. Returns an error pointer in case of error.
1213 */
1214struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1215 gfp_t gfp_mask)
1216{
1217 struct bio *bio;
1218
1219 bio = __bio_map_kern(q, data, len, gfp_mask);
1220 if (IS_ERR(bio))
1221 return bio;
1222
1223 if (bio->bi_size == len)
1224 return bio;
1225
1226 /*
1227 * Don't support partial mappings.
1228 */
1229 bio_put(bio);
1230 return ERR_PTR(-EINVAL);
1231}
1232EXPORT_SYMBOL(bio_map_kern);
1233
1234static void bio_copy_kern_endio(struct bio *bio, int err)
1235{
1236 struct bio_vec *bvec;
1237 const int read = bio_data_dir(bio) == READ;
1238 struct bio_map_data *bmd = bio->bi_private;
1239 int i;
1240 char *p = bmd->sgvecs[0].iov_base;
1241
1242 __bio_for_each_segment(bvec, bio, i, 0) {
1243 char *addr = page_address(bvec->bv_page);
1244 int len = bmd->iovecs[i].bv_len;
1245
1246 if (read)
1247 memcpy(p, addr, len);
1248
1249 __free_page(bvec->bv_page);
1250 p += len;
1251 }
1252
1253 bio_free_map_data(bmd);
1254 bio_put(bio);
1255}
1256
1257/**
1258 * bio_copy_kern - copy kernel address into bio
1259 * @q: the struct request_queue for the bio
1260 * @data: pointer to buffer to copy
1261 * @len: length in bytes
1262 * @gfp_mask: allocation flags for bio and page allocation
1263 * @reading: data direction is READ
1264 *
1265 * copy the kernel address into a bio suitable for io to a block
1266 * device. Returns an error pointer in case of error.
1267 */
1268struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1269 gfp_t gfp_mask, int reading)
1270{
1271 struct bio *bio;
1272 struct bio_vec *bvec;
1273 int i;
1274
1275 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask);
1276 if (IS_ERR(bio))
1277 return bio;
1278
1279 if (!reading) {
1280 void *p = data;
1281
1282 bio_for_each_segment(bvec, bio, i) {
1283 char *addr = page_address(bvec->bv_page);
1284
1285 memcpy(addr, p, bvec->bv_len);
1286 p += bvec->bv_len;
1287 }
1288 }
1289
1290 bio->bi_end_io = bio_copy_kern_endio;
1291
1292 return bio;
1293}
1294EXPORT_SYMBOL(bio_copy_kern);
1295
1296/*
1297 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1298 * for performing direct-IO in BIOs.
1299 *
1300 * The problem is that we cannot run set_page_dirty() from interrupt context
1301 * because the required locks are not interrupt-safe. So what we can do is to
1302 * mark the pages dirty _before_ performing IO. And in interrupt context,
1303 * check that the pages are still dirty. If so, fine. If not, redirty them
1304 * in process context.
1305 *
1306 * We special-case compound pages here: normally this means reads into hugetlb
1307 * pages. The logic in here doesn't really work right for compound pages
1308 * because the VM does not uniformly chase down the head page in all cases.
1309 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1310 * handle them at all. So we skip compound pages here at an early stage.
1311 *
1312 * Note that this code is very hard to test under normal circumstances because
1313 * direct-io pins the pages with get_user_pages(). This makes
1314 * is_page_cache_freeable return false, and the VM will not clean the pages.
1315 * But other code (eg, pdflush) could clean the pages if they are mapped
1316 * pagecache.
1317 *
1318 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1319 * deferred bio dirtying paths.
1320 */
1321
1322/*
1323 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1324 */
1325void bio_set_pages_dirty(struct bio *bio)
1326{
1327 struct bio_vec *bvec = bio->bi_io_vec;
1328 int i;
1329
1330 for (i = 0; i < bio->bi_vcnt; i++) {
1331 struct page *page = bvec[i].bv_page;
1332
1333 if (page && !PageCompound(page))
1334 set_page_dirty_lock(page);
1335 }
1336}
1337
1338static void bio_release_pages(struct bio *bio)
1339{
1340 struct bio_vec *bvec = bio->bi_io_vec;
1341 int i;
1342
1343 for (i = 0; i < bio->bi_vcnt; i++) {
1344 struct page *page = bvec[i].bv_page;
1345
1346 if (page)
1347 put_page(page);
1348 }
1349}
1350
1351/*
1352 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1353 * If they are, then fine. If, however, some pages are clean then they must
1354 * have been written out during the direct-IO read. So we take another ref on
1355 * the BIO and the offending pages and re-dirty the pages in process context.
1356 *
1357 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1358 * here on. It will run one page_cache_release() against each page and will
1359 * run one bio_put() against the BIO.
1360 */
1361
1362static void bio_dirty_fn(struct work_struct *work);
1363
1364static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1365static DEFINE_SPINLOCK(bio_dirty_lock);
1366static struct bio *bio_dirty_list;
1367
1368/*
1369 * This runs in process context
1370 */
1371static void bio_dirty_fn(struct work_struct *work)
1372{
1373 unsigned long flags;
1374 struct bio *bio;
1375
1376 spin_lock_irqsave(&bio_dirty_lock, flags);
1377 bio = bio_dirty_list;
1378 bio_dirty_list = NULL;
1379 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1380
1381 while (bio) {
1382 struct bio *next = bio->bi_private;
1383
1384 bio_set_pages_dirty(bio);
1385 bio_release_pages(bio);
1386 bio_put(bio);
1387 bio = next;
1388 }
1389}
1390
1391void bio_check_pages_dirty(struct bio *bio)
1392{
1393 struct bio_vec *bvec = bio->bi_io_vec;
1394 int nr_clean_pages = 0;
1395 int i;
1396
1397 for (i = 0; i < bio->bi_vcnt; i++) {
1398 struct page *page = bvec[i].bv_page;
1399
1400 if (PageDirty(page) || PageCompound(page)) {
1401 page_cache_release(page);
1402 bvec[i].bv_page = NULL;
1403 } else {
1404 nr_clean_pages++;
1405 }
1406 }
1407
1408 if (nr_clean_pages) {
1409 unsigned long flags;
1410
1411 spin_lock_irqsave(&bio_dirty_lock, flags);
1412 bio->bi_private = bio_dirty_list;
1413 bio_dirty_list = bio;
1414 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1415 schedule_work(&bio_dirty_work);
1416 } else {
1417 bio_put(bio);
1418 }
1419}
1420
1421#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1422void bio_flush_dcache_pages(struct bio *bi)
1423{
1424 int i;
1425 struct bio_vec *bvec;
1426
1427 bio_for_each_segment(bvec, bi, i)
1428 flush_dcache_page(bvec->bv_page);
1429}
1430EXPORT_SYMBOL(bio_flush_dcache_pages);
1431#endif
1432
1433/**
1434 * bio_endio - end I/O on a bio
1435 * @bio: bio
1436 * @error: error, if any
1437 *
1438 * Description:
1439 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1440 * preferred way to end I/O on a bio, it takes care of clearing
1441 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1442 * established -Exxxx (-EIO, for instance) error values in case
1443 * something went wrong. No one should call bi_end_io() directly on a
1444 * bio unless they own it and thus know that it has an end_io
1445 * function.
1446 **/
1447void bio_endio(struct bio *bio, int error)
1448{
1449 if (error)
1450 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1451 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1452 error = -EIO;
1453
1454 if (bio->bi_end_io)
1455 bio->bi_end_io(bio, error);
1456}
1457EXPORT_SYMBOL(bio_endio);
1458
1459void bio_pair_release(struct bio_pair *bp)
1460{
1461 if (atomic_dec_and_test(&bp->cnt)) {
1462 struct bio *master = bp->bio1.bi_private;
1463
1464 bio_endio(master, bp->error);
1465 mempool_free(bp, bp->bio2.bi_private);
1466 }
1467}
1468EXPORT_SYMBOL(bio_pair_release);
1469
1470static void bio_pair_end_1(struct bio *bi, int err)
1471{
1472 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1);
1473
1474 if (err)
1475 bp->error = err;
1476
1477 bio_pair_release(bp);
1478}
1479
1480static void bio_pair_end_2(struct bio *bi, int err)
1481{
1482 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2);
1483
1484 if (err)
1485 bp->error = err;
1486
1487 bio_pair_release(bp);
1488}
1489
1490/*
1491 * split a bio - only worry about a bio with a single page in its iovec
1492 */
1493struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1494{
1495 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO);
1496
1497 if (!bp)
1498 return bp;
1499
1500 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1501 bi->bi_sector + first_sectors);
1502
1503 BUG_ON(bi->bi_vcnt != 1);
1504 BUG_ON(bi->bi_idx != 0);
1505 atomic_set(&bp->cnt, 3);
1506 bp->error = 0;
1507 bp->bio1 = *bi;
1508 bp->bio2 = *bi;
1509 bp->bio2.bi_sector += first_sectors;
1510 bp->bio2.bi_size -= first_sectors << 9;
1511 bp->bio1.bi_size = first_sectors << 9;
1512
1513 bp->bv1 = bi->bi_io_vec[0];
1514 bp->bv2 = bi->bi_io_vec[0];
1515 bp->bv2.bv_offset += first_sectors << 9;
1516 bp->bv2.bv_len -= first_sectors << 9;
1517 bp->bv1.bv_len = first_sectors << 9;
1518
1519 bp->bio1.bi_io_vec = &bp->bv1;
1520 bp->bio2.bi_io_vec = &bp->bv2;
1521
1522 bp->bio1.bi_max_vecs = 1;
1523 bp->bio2.bi_max_vecs = 1;
1524
1525 bp->bio1.bi_end_io = bio_pair_end_1;
1526 bp->bio2.bi_end_io = bio_pair_end_2;
1527
1528 bp->bio1.bi_private = bi;
1529 bp->bio2.bi_private = bio_split_pool;
1530
1531 if (bio_integrity(bi))
1532 bio_integrity_split(bi, bp, first_sectors);
1533
1534 return bp;
1535}
1536EXPORT_SYMBOL(bio_split);
1537
1538/**
1539 * bio_sector_offset - Find hardware sector offset in bio
1540 * @bio: bio to inspect
1541 * @index: bio_vec index
1542 * @offset: offset in bv_page
1543 *
1544 * Return the number of hardware sectors between beginning of bio
1545 * and an end point indicated by a bio_vec index and an offset
1546 * within that vector's page.
1547 */
1548sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1549 unsigned int offset)
1550{
1551 unsigned int sector_sz;
1552 struct bio_vec *bv;
1553 sector_t sectors;
1554 int i;
1555
1556 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1557 sectors = 0;
1558
1559 if (index >= bio->bi_idx)
1560 index = bio->bi_vcnt - 1;
1561
1562 __bio_for_each_segment(bv, bio, i, 0) {
1563 if (i == index) {
1564 if (offset > bv->bv_offset)
1565 sectors += (offset - bv->bv_offset) / sector_sz;
1566 break;
1567 }
1568
1569 sectors += bv->bv_len / sector_sz;
1570 }
1571
1572 return sectors;
1573}
1574EXPORT_SYMBOL(bio_sector_offset);
1575
1576/*
1577 * create memory pools for biovec's in a bio_set.
1578 * use the global biovec slabs created for general use.
1579 */
1580static int biovec_create_pools(struct bio_set *bs, int pool_entries)
1581{
1582 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
1583
1584 bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
1585 if (!bs->bvec_pool)
1586 return -ENOMEM;
1587
1588 return 0;
1589}
1590
1591static void biovec_free_pools(struct bio_set *bs)
1592{
1593 mempool_destroy(bs->bvec_pool);
1594}
1595
1596void bioset_free(struct bio_set *bs)
1597{
1598 if (bs->bio_pool)
1599 mempool_destroy(bs->bio_pool);
1600
1601 bioset_integrity_free(bs);
1602 biovec_free_pools(bs);
1603 bio_put_slab(bs);
1604
1605 kfree(bs);
1606}
1607EXPORT_SYMBOL(bioset_free);
1608
1609/**
1610 * bioset_create - Create a bio_set
1611 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1612 * @front_pad: Number of bytes to allocate in front of the returned bio
1613 *
1614 * Description:
1615 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1616 * to ask for a number of bytes to be allocated in front of the bio.
1617 * Front pad allocation is useful for embedding the bio inside
1618 * another structure, to avoid allocating extra data to go with the bio.
1619 * Note that the bio must be embedded at the END of that structure always,
1620 * or things will break badly.
1621 */
1622struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1623{
1624 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1625 struct bio_set *bs;
1626
1627 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1628 if (!bs)
1629 return NULL;
1630
1631 bs->front_pad = front_pad;
1632
1633 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1634 if (!bs->bio_slab) {
1635 kfree(bs);
1636 return NULL;
1637 }
1638
1639 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1640 if (!bs->bio_pool)
1641 goto bad;
1642
1643 if (!biovec_create_pools(bs, pool_size))
1644 return bs;
1645
1646bad:
1647 bioset_free(bs);
1648 return NULL;
1649}
1650EXPORT_SYMBOL(bioset_create);
1651
1652#ifdef CONFIG_BLK_CGROUP
1653/**
1654 * bio_associate_current - associate a bio with %current
1655 * @bio: target bio
1656 *
1657 * Associate @bio with %current if it hasn't been associated yet. Block
1658 * layer will treat @bio as if it were issued by %current no matter which
1659 * task actually issues it.
1660 *
1661 * This function takes an extra reference of @task's io_context and blkcg
1662 * which will be put when @bio is released. The caller must own @bio,
1663 * ensure %current->io_context exists, and is responsible for synchronizing
1664 * calls to this function.
1665 */
1666int bio_associate_current(struct bio *bio)
1667{
1668 struct io_context *ioc;
1669 struct cgroup_subsys_state *css;
1670
1671 if (bio->bi_ioc)
1672 return -EBUSY;
1673
1674 ioc = current->io_context;
1675 if (!ioc)
1676 return -ENOENT;
1677
1678 /* acquire active ref on @ioc and associate */
1679 get_io_context_active(ioc);
1680 bio->bi_ioc = ioc;
1681
1682 /* associate blkcg if exists */
1683 rcu_read_lock();
1684 css = task_subsys_state(current, blkio_subsys_id);
1685 if (css && css_tryget(css))
1686 bio->bi_css = css;
1687 rcu_read_unlock();
1688
1689 return 0;
1690}
1691
1692/**
1693 * bio_disassociate_task - undo bio_associate_current()
1694 * @bio: target bio
1695 */
1696void bio_disassociate_task(struct bio *bio)
1697{
1698 if (bio->bi_ioc) {
1699 put_io_context(bio->bi_ioc);
1700 bio->bi_ioc = NULL;
1701 }
1702 if (bio->bi_css) {
1703 css_put(bio->bi_css);
1704 bio->bi_css = NULL;
1705 }
1706}
1707
1708#endif /* CONFIG_BLK_CGROUP */
1709
1710static void __init biovec_init_slabs(void)
1711{
1712 int i;
1713
1714 for (i = 0; i < BIOVEC_NR_POOLS; i++) {
1715 int size;
1716 struct biovec_slab *bvs = bvec_slabs + i;
1717
1718 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
1719 bvs->slab = NULL;
1720 continue;
1721 }
1722
1723 size = bvs->nr_vecs * sizeof(struct bio_vec);
1724 bvs->slab = kmem_cache_create(bvs->name, size, 0,
1725 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1726 }
1727}
1728
1729static int __init init_bio(void)
1730{
1731 bio_slab_max = 2;
1732 bio_slab_nr = 0;
1733 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
1734 if (!bio_slabs)
1735 panic("bio: can't allocate bios\n");
1736
1737 bio_integrity_init();
1738 biovec_init_slabs();
1739
1740 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
1741 if (!fs_bio_set)
1742 panic("bio: can't allocate bios\n");
1743
1744 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
1745 panic("bio: can't create integrity pool\n");
1746
1747 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
1748 sizeof(struct bio_pair));
1749 if (!bio_split_pool)
1750 panic("bio: can't create split pool\n");
1751
1752 return 0;
1753}
1754subsys_initcall(init_bio);