Loading...
1/* bounce buffer handling for block devices
2 *
3 * - Split from highmem.c
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/mm.h>
9#include <linux/export.h>
10#include <linux/swap.h>
11#include <linux/gfp.h>
12#include <linux/bio.h>
13#include <linux/pagemap.h>
14#include <linux/mempool.h>
15#include <linux/blkdev.h>
16#include <linux/backing-dev.h>
17#include <linux/init.h>
18#include <linux/hash.h>
19#include <linux/highmem.h>
20#include <linux/bootmem.h>
21#include <linux/printk.h>
22#include <asm/tlbflush.h>
23
24#include <trace/events/block.h>
25
26#define POOL_SIZE 64
27#define ISA_POOL_SIZE 16
28
29static mempool_t *page_pool, *isa_page_pool;
30
31#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
32static __init int init_emergency_pool(void)
33{
34#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
35 if (max_pfn <= max_low_pfn)
36 return 0;
37#endif
38
39 page_pool = mempool_create_page_pool(POOL_SIZE, 0);
40 BUG_ON(!page_pool);
41 pr_info("pool size: %d pages\n", POOL_SIZE);
42
43 return 0;
44}
45
46__initcall(init_emergency_pool);
47#endif
48
49#ifdef CONFIG_HIGHMEM
50/*
51 * highmem version, map in to vec
52 */
53static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
54{
55 unsigned long flags;
56 unsigned char *vto;
57
58 local_irq_save(flags);
59 vto = kmap_atomic(to->bv_page);
60 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
61 kunmap_atomic(vto);
62 local_irq_restore(flags);
63}
64
65#else /* CONFIG_HIGHMEM */
66
67#define bounce_copy_vec(to, vfrom) \
68 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
69
70#endif /* CONFIG_HIGHMEM */
71
72/*
73 * allocate pages in the DMA region for the ISA pool
74 */
75static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
76{
77 return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
78}
79
80/*
81 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
82 * as the max address, so check if the pool has already been created.
83 */
84int init_emergency_isa_pool(void)
85{
86 if (isa_page_pool)
87 return 0;
88
89 isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
90 mempool_free_pages, (void *) 0);
91 BUG_ON(!isa_page_pool);
92
93 pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
94 return 0;
95}
96
97/*
98 * Simple bounce buffer support for highmem pages. Depending on the
99 * queue gfp mask set, *to may or may not be a highmem page. kmap it
100 * always, it will do the Right Thing
101 */
102static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
103{
104 unsigned char *vfrom;
105 struct bio_vec tovec, *fromvec = from->bi_io_vec;
106 struct bvec_iter iter;
107
108 bio_for_each_segment(tovec, to, iter) {
109 if (tovec.bv_page != fromvec->bv_page) {
110 /*
111 * fromvec->bv_offset and fromvec->bv_len might have
112 * been modified by the block layer, so use the original
113 * copy, bounce_copy_vec already uses tovec->bv_len
114 */
115 vfrom = page_address(fromvec->bv_page) +
116 tovec.bv_offset;
117
118 bounce_copy_vec(&tovec, vfrom);
119 flush_dcache_page(tovec.bv_page);
120 }
121
122 fromvec++;
123 }
124}
125
126static void bounce_end_io(struct bio *bio, mempool_t *pool)
127{
128 struct bio *bio_orig = bio->bi_private;
129 struct bio_vec *bvec, *org_vec;
130 int i;
131 int start = bio_orig->bi_iter.bi_idx;
132
133 /*
134 * free up bounce indirect pages used
135 */
136 bio_for_each_segment_all(bvec, bio, i) {
137 org_vec = bio_orig->bi_io_vec + i + start;
138
139 if (bvec->bv_page == org_vec->bv_page)
140 continue;
141
142 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
143 mempool_free(bvec->bv_page, pool);
144 }
145
146 bio_orig->bi_error = bio->bi_error;
147 bio_endio(bio_orig);
148 bio_put(bio);
149}
150
151static void bounce_end_io_write(struct bio *bio)
152{
153 bounce_end_io(bio, page_pool);
154}
155
156static void bounce_end_io_write_isa(struct bio *bio)
157{
158
159 bounce_end_io(bio, isa_page_pool);
160}
161
162static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
163{
164 struct bio *bio_orig = bio->bi_private;
165
166 if (!bio->bi_error)
167 copy_to_high_bio_irq(bio_orig, bio);
168
169 bounce_end_io(bio, pool);
170}
171
172static void bounce_end_io_read(struct bio *bio)
173{
174 __bounce_end_io_read(bio, page_pool);
175}
176
177static void bounce_end_io_read_isa(struct bio *bio)
178{
179 __bounce_end_io_read(bio, isa_page_pool);
180}
181
182static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
183 mempool_t *pool)
184{
185 struct bio *bio;
186 int rw = bio_data_dir(*bio_orig);
187 struct bio_vec *to, from;
188 struct bvec_iter iter;
189 unsigned i;
190
191 bio_for_each_segment(from, *bio_orig, iter)
192 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
193 goto bounce;
194
195 return;
196bounce:
197 bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
198
199 bio_for_each_segment_all(to, bio, i) {
200 struct page *page = to->bv_page;
201
202 if (page_to_pfn(page) <= queue_bounce_pfn(q))
203 continue;
204
205 to->bv_page = mempool_alloc(pool, q->bounce_gfp);
206 inc_zone_page_state(to->bv_page, NR_BOUNCE);
207
208 if (rw == WRITE) {
209 char *vto, *vfrom;
210
211 flush_dcache_page(page);
212
213 vto = page_address(to->bv_page) + to->bv_offset;
214 vfrom = kmap_atomic(page) + to->bv_offset;
215 memcpy(vto, vfrom, to->bv_len);
216 kunmap_atomic(vfrom);
217 }
218 }
219
220 trace_block_bio_bounce(q, *bio_orig);
221
222 bio->bi_flags |= (1 << BIO_BOUNCED);
223
224 if (pool == page_pool) {
225 bio->bi_end_io = bounce_end_io_write;
226 if (rw == READ)
227 bio->bi_end_io = bounce_end_io_read;
228 } else {
229 bio->bi_end_io = bounce_end_io_write_isa;
230 if (rw == READ)
231 bio->bi_end_io = bounce_end_io_read_isa;
232 }
233
234 bio->bi_private = *bio_orig;
235 *bio_orig = bio;
236}
237
238void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
239{
240 mempool_t *pool;
241
242 /*
243 * Data-less bio, nothing to bounce
244 */
245 if (!bio_has_data(*bio_orig))
246 return;
247
248 /*
249 * for non-isa bounce case, just check if the bounce pfn is equal
250 * to or bigger than the highest pfn in the system -- in that case,
251 * don't waste time iterating over bio segments
252 */
253 if (!(q->bounce_gfp & GFP_DMA)) {
254 if (queue_bounce_pfn(q) >= blk_max_pfn)
255 return;
256 pool = page_pool;
257 } else {
258 BUG_ON(!isa_page_pool);
259 pool = isa_page_pool;
260 }
261
262 /*
263 * slow path
264 */
265 __blk_queue_bounce(q, bio_orig, pool);
266}
267
268EXPORT_SYMBOL(blk_queue_bounce);
1// SPDX-License-Identifier: GPL-2.0
2/* bounce buffer handling for block devices
3 *
4 * - Split from highmem.c
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/mm.h>
10#include <linux/export.h>
11#include <linux/swap.h>
12#include <linux/gfp.h>
13#include <linux/bio.h>
14#include <linux/pagemap.h>
15#include <linux/mempool.h>
16#include <linux/blkdev.h>
17#include <linux/backing-dev.h>
18#include <linux/init.h>
19#include <linux/hash.h>
20#include <linux/highmem.h>
21#include <linux/printk.h>
22#include <asm/tlbflush.h>
23
24#include <trace/events/block.h>
25#include "blk.h"
26#include "blk-cgroup.h"
27
28#define POOL_SIZE 64
29#define ISA_POOL_SIZE 16
30
31static struct bio_set bounce_bio_set, bounce_bio_split;
32static mempool_t page_pool;
33
34static void init_bounce_bioset(void)
35{
36 static bool bounce_bs_setup;
37 int ret;
38
39 if (bounce_bs_setup)
40 return;
41
42 ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
43 BUG_ON(ret);
44 if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
45 BUG_ON(1);
46
47 ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
48 BUG_ON(ret);
49 bounce_bs_setup = true;
50}
51
52static __init int init_emergency_pool(void)
53{
54 int ret;
55
56#ifndef CONFIG_MEMORY_HOTPLUG
57 if (max_pfn <= max_low_pfn)
58 return 0;
59#endif
60
61 ret = mempool_init_page_pool(&page_pool, POOL_SIZE, 0);
62 BUG_ON(ret);
63 pr_info("pool size: %d pages\n", POOL_SIZE);
64
65 init_bounce_bioset();
66 return 0;
67}
68
69__initcall(init_emergency_pool);
70
71/*
72 * Simple bounce buffer support for highmem pages. Depending on the
73 * queue gfp mask set, *to may or may not be a highmem page. kmap it
74 * always, it will do the Right Thing
75 */
76static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
77{
78 struct bio_vec tovec, fromvec;
79 struct bvec_iter iter;
80 /*
81 * The bio of @from is created by bounce, so we can iterate
82 * its bvec from start to end, but the @from->bi_iter can't be
83 * trusted because it might be changed by splitting.
84 */
85 struct bvec_iter from_iter = BVEC_ITER_ALL_INIT;
86
87 bio_for_each_segment(tovec, to, iter) {
88 fromvec = bio_iter_iovec(from, from_iter);
89 if (tovec.bv_page != fromvec.bv_page) {
90 /*
91 * fromvec->bv_offset and fromvec->bv_len might have
92 * been modified by the block layer, so use the original
93 * copy, bounce_copy_vec already uses tovec->bv_len
94 */
95 memcpy_to_bvec(&tovec, page_address(fromvec.bv_page) +
96 tovec.bv_offset);
97 }
98 bio_advance_iter(from, &from_iter, tovec.bv_len);
99 }
100}
101
102static void bounce_end_io(struct bio *bio)
103{
104 struct bio *bio_orig = bio->bi_private;
105 struct bio_vec *bvec, orig_vec;
106 struct bvec_iter orig_iter = bio_orig->bi_iter;
107 struct bvec_iter_all iter_all;
108
109 /*
110 * free up bounce indirect pages used
111 */
112 bio_for_each_segment_all(bvec, bio, iter_all) {
113 orig_vec = bio_iter_iovec(bio_orig, orig_iter);
114 if (bvec->bv_page != orig_vec.bv_page) {
115 dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
116 mempool_free(bvec->bv_page, &page_pool);
117 }
118 bio_advance_iter(bio_orig, &orig_iter, orig_vec.bv_len);
119 }
120
121 bio_orig->bi_status = bio->bi_status;
122 bio_endio(bio_orig);
123 bio_put(bio);
124}
125
126static void bounce_end_io_write(struct bio *bio)
127{
128 bounce_end_io(bio);
129}
130
131static void bounce_end_io_read(struct bio *bio)
132{
133 struct bio *bio_orig = bio->bi_private;
134
135 if (!bio->bi_status)
136 copy_to_high_bio_irq(bio_orig, bio);
137
138 bounce_end_io(bio);
139}
140
141static struct bio *bounce_clone_bio(struct bio *bio_src)
142{
143 struct bvec_iter iter;
144 struct bio_vec bv;
145 struct bio *bio;
146
147 /*
148 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
149 * bio_src->bi_io_vec to bio->bi_io_vec.
150 *
151 * We can't do that anymore, because:
152 *
153 * - The point of cloning the biovec is to produce a bio with a biovec
154 * the caller can modify: bi_idx and bi_bvec_done should be 0.
155 *
156 * - The original bio could've had more than BIO_MAX_VECS biovecs; if
157 * we tried to clone the whole thing bio_alloc_bioset() would fail.
158 * But the clone should succeed as long as the number of biovecs we
159 * actually need to allocate is fewer than BIO_MAX_VECS.
160 *
161 * - Lastly, bi_vcnt should not be looked at or relied upon by code
162 * that does not own the bio - reason being drivers don't use it for
163 * iterating over the biovec anymore, so expecting it to be kept up
164 * to date (i.e. for clones that share the parent biovec) is just
165 * asking for trouble and would force extra work.
166 */
167 bio = bio_alloc_bioset(bio_src->bi_bdev, bio_segments(bio_src),
168 bio_src->bi_opf, GFP_NOIO, &bounce_bio_set);
169 if (bio_flagged(bio_src, BIO_REMAPPED))
170 bio_set_flag(bio, BIO_REMAPPED);
171 bio->bi_ioprio = bio_src->bi_ioprio;
172 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
173 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
174
175 switch (bio_op(bio)) {
176 case REQ_OP_DISCARD:
177 case REQ_OP_SECURE_ERASE:
178 case REQ_OP_WRITE_ZEROES:
179 break;
180 default:
181 bio_for_each_segment(bv, bio_src, iter)
182 bio->bi_io_vec[bio->bi_vcnt++] = bv;
183 break;
184 }
185
186 if (bio_crypt_clone(bio, bio_src, GFP_NOIO) < 0)
187 goto err_put;
188
189 if (bio_integrity(bio_src) &&
190 bio_integrity_clone(bio, bio_src, GFP_NOIO) < 0)
191 goto err_put;
192
193 bio_clone_blkg_association(bio, bio_src);
194
195 return bio;
196
197err_put:
198 bio_put(bio);
199 return NULL;
200}
201
202struct bio *__blk_queue_bounce(struct bio *bio_orig, struct request_queue *q)
203{
204 struct bio *bio;
205 int rw = bio_data_dir(bio_orig);
206 struct bio_vec *to, from;
207 struct bvec_iter iter;
208 unsigned i = 0, bytes = 0;
209 bool bounce = false;
210 int sectors;
211
212 bio_for_each_segment(from, bio_orig, iter) {
213 if (i++ < BIO_MAX_VECS)
214 bytes += from.bv_len;
215 if (PageHighMem(from.bv_page))
216 bounce = true;
217 }
218 if (!bounce)
219 return bio_orig;
220
221 /*
222 * Individual bvecs might not be logical block aligned. Round down
223 * the split size so that each bio is properly block size aligned,
224 * even if we do not use the full hardware limits.
225 */
226 sectors = ALIGN_DOWN(bytes, queue_logical_block_size(q)) >>
227 SECTOR_SHIFT;
228 if (sectors < bio_sectors(bio_orig)) {
229 bio = bio_split(bio_orig, sectors, GFP_NOIO, &bounce_bio_split);
230 bio_chain(bio, bio_orig);
231 submit_bio_noacct(bio_orig);
232 bio_orig = bio;
233 }
234 bio = bounce_clone_bio(bio_orig);
235
236 /*
237 * Bvec table can't be updated by bio_for_each_segment_all(),
238 * so retrieve bvec from the table directly. This way is safe
239 * because the 'bio' is single-page bvec.
240 */
241 for (i = 0, to = bio->bi_io_vec; i < bio->bi_vcnt; to++, i++) {
242 struct page *bounce_page;
243
244 if (!PageHighMem(to->bv_page))
245 continue;
246
247 bounce_page = mempool_alloc(&page_pool, GFP_NOIO);
248 inc_zone_page_state(bounce_page, NR_BOUNCE);
249
250 if (rw == WRITE) {
251 flush_dcache_page(to->bv_page);
252 memcpy_from_bvec(page_address(bounce_page), to);
253 }
254 to->bv_page = bounce_page;
255 }
256
257 trace_block_bio_bounce(bio_orig);
258
259 bio->bi_flags |= (1 << BIO_BOUNCED);
260
261 if (rw == READ)
262 bio->bi_end_io = bounce_end_io_read;
263 else
264 bio->bi_end_io = bounce_end_io_write;
265
266 bio->bi_private = bio_orig;
267 return bio;
268}