Linux Audio

Check our new training course

Loading...
v3.1
  1/* bounce buffer handling for block devices
  2 *
  3 * - Split from highmem.c
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/module.h>
  8#include <linux/swap.h>
  9#include <linux/gfp.h>
 10#include <linux/bio.h>
 11#include <linux/pagemap.h>
 12#include <linux/mempool.h>
 13#include <linux/blkdev.h>
 14#include <linux/init.h>
 15#include <linux/hash.h>
 16#include <linux/highmem.h>
 
 17#include <asm/tlbflush.h>
 18
 19#include <trace/events/block.h>
 20
 21#define POOL_SIZE	64
 22#define ISA_POOL_SIZE	16
 23
 24static mempool_t *page_pool, *isa_page_pool;
 25
 26#ifdef CONFIG_HIGHMEM
 27static __init int init_emergency_pool(void)
 28{
 29	struct sysinfo i;
 30	si_meminfo(&i);
 31	si_swapinfo(&i);
 32
 33	if (!i.totalhigh)
 34		return 0;
 
 35
 36	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
 37	BUG_ON(!page_pool);
 38	printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
 39
 40	return 0;
 41}
 42
 43__initcall(init_emergency_pool);
 
 44
 
 45/*
 46 * highmem version, map in to vec
 47 */
 48static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
 49{
 50	unsigned long flags;
 51	unsigned char *vto;
 52
 53	local_irq_save(flags);
 54	vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
 55	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
 56	kunmap_atomic(vto, KM_BOUNCE_READ);
 57	local_irq_restore(flags);
 58}
 59
 60#else /* CONFIG_HIGHMEM */
 61
 62#define bounce_copy_vec(to, vfrom)	\
 63	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
 64
 65#endif /* CONFIG_HIGHMEM */
 66
 67/*
 68 * allocate pages in the DMA region for the ISA pool
 69 */
 70static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
 71{
 72	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
 73}
 74
 75/*
 76 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
 77 * as the max address, so check if the pool has already been created.
 78 */
 79int init_emergency_isa_pool(void)
 80{
 81	if (isa_page_pool)
 82		return 0;
 83
 84	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
 85				       mempool_free_pages, (void *) 0);
 86	BUG_ON(!isa_page_pool);
 87
 88	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
 89	return 0;
 90}
 91
 92/*
 93 * Simple bounce buffer support for highmem pages. Depending on the
 94 * queue gfp mask set, *to may or may not be a highmem page. kmap it
 95 * always, it will do the Right Thing
 96 */
 97static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 98{
 99	unsigned char *vfrom;
100	struct bio_vec *tovec, *fromvec;
101	int i;
102
103	__bio_for_each_segment(tovec, to, i, 0) {
104		fromvec = from->bi_io_vec + i;
 
 
 
 
 
 
 
105
106		/*
107		 * not bounced
108		 */
109		if (tovec->bv_page == fromvec->bv_page)
110			continue;
111
112		/*
113		 * fromvec->bv_offset and fromvec->bv_len might have been
114		 * modified by the block layer, so use the original copy,
115		 * bounce_copy_vec already uses tovec->bv_len
116		 */
117		vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
118
119		bounce_copy_vec(tovec, vfrom);
120		flush_dcache_page(tovec->bv_page);
121	}
122}
123
124static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
125{
126	struct bio *bio_orig = bio->bi_private;
127	struct bio_vec *bvec, *org_vec;
128	int i;
129
130	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
131		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
132
133	/*
134	 * free up bounce indirect pages used
135	 */
136	__bio_for_each_segment(bvec, bio, i, 0) {
137		org_vec = bio_orig->bi_io_vec + i;
138		if (bvec->bv_page == org_vec->bv_page)
139			continue;
140
141		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
142		mempool_free(bvec->bv_page, pool);
143	}
144
145	bio_endio(bio_orig, err);
146	bio_put(bio);
147}
148
149static void bounce_end_io_write(struct bio *bio, int err)
150{
151	bounce_end_io(bio, page_pool, err);
152}
153
154static void bounce_end_io_write_isa(struct bio *bio, int err)
155{
156
157	bounce_end_io(bio, isa_page_pool, err);
158}
159
160static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
161{
162	struct bio *bio_orig = bio->bi_private;
163
164	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
165		copy_to_high_bio_irq(bio_orig, bio);
166
167	bounce_end_io(bio, pool, err);
168}
169
170static void bounce_end_io_read(struct bio *bio, int err)
171{
172	__bounce_end_io_read(bio, page_pool, err);
173}
174
175static void bounce_end_io_read_isa(struct bio *bio, int err)
176{
177	__bounce_end_io_read(bio, isa_page_pool, err);
178}
179
180static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
181			       mempool_t *pool)
182{
183	struct page *page;
184	struct bio *bio = NULL;
185	int i, rw = bio_data_dir(*bio_orig);
186	struct bio_vec *to, *from;
187
188	bio_for_each_segment(from, *bio_orig, i) {
189		page = from->bv_page;
190
191		/*
192		 * is destination page below bounce pfn?
193		 */
194		if (page_to_pfn(page) <= queue_bounce_pfn(q))
195			continue;
196
197		/*
198		 * irk, bounce it
199		 */
200		if (!bio) {
201			unsigned int cnt = (*bio_orig)->bi_vcnt;
202
203			bio = bio_alloc(GFP_NOIO, cnt);
204			memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
205		}
206			
 
 
 
 
207
208		to = bio->bi_io_vec + i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
210		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
211		to->bv_len = from->bv_len;
212		to->bv_offset = from->bv_offset;
213		inc_zone_page_state(to->bv_page, NR_BOUNCE);
 
214
215		if (rw == WRITE) {
216			char *vto, *vfrom;
217
218			flush_dcache_page(from->bv_page);
 
219			vto = page_address(to->bv_page) + to->bv_offset;
220			vfrom = kmap(from->bv_page) + from->bv_offset;
221			memcpy(vto, vfrom, to->bv_len);
222			kunmap(from->bv_page);
223		}
224	}
225
226	/*
227	 * no pages bounced
228	 */
229	if (!bio)
230		return;
231
232	trace_block_bio_bounce(q, *bio_orig);
233
234	/*
235	 * at least one page was bounced, fill in possible non-highmem
236	 * pages
237	 */
238	__bio_for_each_segment(from, *bio_orig, i, 0) {
239		to = bio_iovec_idx(bio, i);
240		if (!to->bv_page) {
241			to->bv_page = from->bv_page;
242			to->bv_len = from->bv_len;
243			to->bv_offset = from->bv_offset;
244		}
245	}
246
247	bio->bi_bdev = (*bio_orig)->bi_bdev;
248	bio->bi_flags |= (1 << BIO_BOUNCED);
249	bio->bi_sector = (*bio_orig)->bi_sector;
250	bio->bi_rw = (*bio_orig)->bi_rw;
251
252	bio->bi_vcnt = (*bio_orig)->bi_vcnt;
253	bio->bi_idx = (*bio_orig)->bi_idx;
254	bio->bi_size = (*bio_orig)->bi_size;
255
256	if (pool == page_pool) {
257		bio->bi_end_io = bounce_end_io_write;
258		if (rw == READ)
259			bio->bi_end_io = bounce_end_io_read;
260	} else {
261		bio->bi_end_io = bounce_end_io_write_isa;
262		if (rw == READ)
263			bio->bi_end_io = bounce_end_io_read_isa;
264	}
265
266	bio->bi_private = *bio_orig;
267	*bio_orig = bio;
268}
269
270void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
271{
 
272	mempool_t *pool;
273
274	/*
275	 * Data-less bio, nothing to bounce
276	 */
277	if (!bio_has_data(*bio_orig))
278		return;
279
 
 
280	/*
281	 * for non-isa bounce case, just check if the bounce pfn is equal
282	 * to or bigger than the highest pfn in the system -- in that case,
283	 * don't waste time iterating over bio segments
284	 */
285	if (!(q->bounce_gfp & GFP_DMA)) {
286		if (queue_bounce_pfn(q) >= blk_max_pfn)
287			return;
288		pool = page_pool;
289	} else {
290		BUG_ON(!isa_page_pool);
291		pool = isa_page_pool;
292	}
293
294	/*
295	 * slow path
296	 */
297	__blk_queue_bounce(q, bio_orig, pool);
298}
299
300EXPORT_SYMBOL(blk_queue_bounce);
v3.15
  1/* bounce buffer handling for block devices
  2 *
  3 * - Split from highmem.c
  4 */
  5
  6#include <linux/mm.h>
  7#include <linux/export.h>
  8#include <linux/swap.h>
  9#include <linux/gfp.h>
 10#include <linux/bio.h>
 11#include <linux/pagemap.h>
 12#include <linux/mempool.h>
 13#include <linux/blkdev.h>
 14#include <linux/init.h>
 15#include <linux/hash.h>
 16#include <linux/highmem.h>
 17#include <linux/bootmem.h>
 18#include <asm/tlbflush.h>
 19
 20#include <trace/events/block.h>
 21
 22#define POOL_SIZE	64
 23#define ISA_POOL_SIZE	16
 24
 25static mempool_t *page_pool, *isa_page_pool;
 26
 27#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
 28static __init int init_emergency_pool(void)
 29{
 30#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
 31	if (max_pfn <= max_low_pfn)
 
 
 
 32		return 0;
 33#endif
 34
 35	page_pool = mempool_create_page_pool(POOL_SIZE, 0);
 36	BUG_ON(!page_pool);
 37	printk("bounce pool size: %d pages\n", POOL_SIZE);
 38
 39	return 0;
 40}
 41
 42__initcall(init_emergency_pool);
 43#endif
 44
 45#ifdef CONFIG_HIGHMEM
 46/*
 47 * highmem version, map in to vec
 48 */
 49static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
 50{
 51	unsigned long flags;
 52	unsigned char *vto;
 53
 54	local_irq_save(flags);
 55	vto = kmap_atomic(to->bv_page);
 56	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
 57	kunmap_atomic(vto);
 58	local_irq_restore(flags);
 59}
 60
 61#else /* CONFIG_HIGHMEM */
 62
 63#define bounce_copy_vec(to, vfrom)	\
 64	memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
 65
 66#endif /* CONFIG_HIGHMEM */
 67
 68/*
 69 * allocate pages in the DMA region for the ISA pool
 70 */
 71static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
 72{
 73	return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
 74}
 75
 76/*
 77 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
 78 * as the max address, so check if the pool has already been created.
 79 */
 80int init_emergency_isa_pool(void)
 81{
 82	if (isa_page_pool)
 83		return 0;
 84
 85	isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
 86				       mempool_free_pages, (void *) 0);
 87	BUG_ON(!isa_page_pool);
 88
 89	printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
 90	return 0;
 91}
 92
 93/*
 94 * Simple bounce buffer support for highmem pages. Depending on the
 95 * queue gfp mask set, *to may or may not be a highmem page. kmap it
 96 * always, it will do the Right Thing
 97 */
 98static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
 99{
100	unsigned char *vfrom;
101	struct bio_vec tovec, *fromvec = from->bi_io_vec;
102	struct bvec_iter iter;
103
104	bio_for_each_segment(tovec, to, iter) {
105		if (tovec.bv_page != fromvec->bv_page) {
106			/*
107			 * fromvec->bv_offset and fromvec->bv_len might have
108			 * been modified by the block layer, so use the original
109			 * copy, bounce_copy_vec already uses tovec->bv_len
110			 */
111			vfrom = page_address(fromvec->bv_page) +
112				tovec.bv_offset;
113
114			bounce_copy_vec(&tovec, vfrom);
115			flush_dcache_page(tovec.bv_page);
116		}
 
 
 
 
 
 
 
 
 
117
118		fromvec++;
 
119	}
120}
121
122static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
123{
124	struct bio *bio_orig = bio->bi_private;
125	struct bio_vec *bvec, *org_vec;
126	int i;
127
128	if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
129		set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
130
131	/*
132	 * free up bounce indirect pages used
133	 */
134	bio_for_each_segment_all(bvec, bio, i) {
135		org_vec = bio_orig->bi_io_vec + i;
136		if (bvec->bv_page == org_vec->bv_page)
137			continue;
138
139		dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
140		mempool_free(bvec->bv_page, pool);
141	}
142
143	bio_endio(bio_orig, err);
144	bio_put(bio);
145}
146
147static void bounce_end_io_write(struct bio *bio, int err)
148{
149	bounce_end_io(bio, page_pool, err);
150}
151
152static void bounce_end_io_write_isa(struct bio *bio, int err)
153{
154
155	bounce_end_io(bio, isa_page_pool, err);
156}
157
158static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
159{
160	struct bio *bio_orig = bio->bi_private;
161
162	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
163		copy_to_high_bio_irq(bio_orig, bio);
164
165	bounce_end_io(bio, pool, err);
166}
167
168static void bounce_end_io_read(struct bio *bio, int err)
169{
170	__bounce_end_io_read(bio, page_pool, err);
171}
172
173static void bounce_end_io_read_isa(struct bio *bio, int err)
174{
175	__bounce_end_io_read(bio, isa_page_pool, err);
176}
177
178#ifdef CONFIG_NEED_BOUNCE_POOL
179static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
180{
181	if (bio_data_dir(bio) != WRITE)
182		return 0;
 
 
 
 
 
 
 
 
 
 
 
183
184	if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
185		return 0;
 
 
 
186
187	return test_bit(BIO_SNAP_STABLE, &bio->bi_flags);
188}
189#else
190static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
191{
192	return 0;
193}
194#endif /* CONFIG_NEED_BOUNCE_POOL */
195
196static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
197			       mempool_t *pool, int force)
198{
199	struct bio *bio;
200	int rw = bio_data_dir(*bio_orig);
201	struct bio_vec *to, from;
202	struct bvec_iter iter;
203	unsigned i;
204
205	if (force)
206		goto bounce;
207	bio_for_each_segment(from, *bio_orig, iter)
208		if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
209			goto bounce;
210
211	return;
212bounce:
213	bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
214
215	bio_for_each_segment_all(to, bio, i) {
216		struct page *page = to->bv_page;
217
218		if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
219			continue;
220
 
 
 
221		inc_zone_page_state(to->bv_page, NR_BOUNCE);
222		to->bv_page = mempool_alloc(pool, q->bounce_gfp);
223
224		if (rw == WRITE) {
225			char *vto, *vfrom;
226
227			flush_dcache_page(page);
228
229			vto = page_address(to->bv_page) + to->bv_offset;
230			vfrom = kmap_atomic(page) + to->bv_offset;
231			memcpy(vto, vfrom, to->bv_len);
232			kunmap_atomic(vfrom);
233		}
234	}
235
 
 
 
 
 
 
236	trace_block_bio_bounce(q, *bio_orig);
237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
238	bio->bi_flags |= (1 << BIO_BOUNCED);
 
 
 
 
 
 
239
240	if (pool == page_pool) {
241		bio->bi_end_io = bounce_end_io_write;
242		if (rw == READ)
243			bio->bi_end_io = bounce_end_io_read;
244	} else {
245		bio->bi_end_io = bounce_end_io_write_isa;
246		if (rw == READ)
247			bio->bi_end_io = bounce_end_io_read_isa;
248	}
249
250	bio->bi_private = *bio_orig;
251	*bio_orig = bio;
252}
253
254void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
255{
256	int must_bounce;
257	mempool_t *pool;
258
259	/*
260	 * Data-less bio, nothing to bounce
261	 */
262	if (!bio_has_data(*bio_orig))
263		return;
264
265	must_bounce = must_snapshot_stable_pages(q, *bio_orig);
266
267	/*
268	 * for non-isa bounce case, just check if the bounce pfn is equal
269	 * to or bigger than the highest pfn in the system -- in that case,
270	 * don't waste time iterating over bio segments
271	 */
272	if (!(q->bounce_gfp & GFP_DMA)) {
273		if (queue_bounce_pfn(q) >= blk_max_pfn && !must_bounce)
274			return;
275		pool = page_pool;
276	} else {
277		BUG_ON(!isa_page_pool);
278		pool = isa_page_pool;
279	}
280
281	/*
282	 * slow path
283	 */
284	__blk_queue_bounce(q, bio_orig, pool, must_bounce);
285}
286
287EXPORT_SYMBOL(blk_queue_bounce);