Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2003 Sistina Software
  4 * Copyright (C) 2006 Red Hat GmbH
  5 *
  6 * This file is released under the GPL.
  7 */
  8
  9#include "dm-core.h"
 10
 11#include <linux/device-mapper.h>
 12
 13#include <linux/bio.h>
 14#include <linux/completion.h>
 15#include <linux/mempool.h>
 16#include <linux/module.h>
 17#include <linux/sched.h>
 18#include <linux/slab.h>
 19#include <linux/dm-io.h>
 20
 21#define DM_MSG_PREFIX "io"
 22
 23#define DM_IO_MAX_REGIONS	BITS_PER_LONG
 
 
 24
 25struct dm_io_client {
 26	mempool_t pool;
 27	struct bio_set bios;
 28};
 29
 30/*
 31 * Aligning 'struct io' reduces the number of bits required to store
 32 * its address.  Refer to store_io_and_region_in_bio() below.
 33 */
 34struct io {
 35	unsigned long error_bits;
 36	atomic_t count;
 
 37	struct dm_io_client *client;
 38	io_notify_fn callback;
 39	void *context;
 40	void *vma_invalidate_address;
 41	unsigned long vma_invalidate_size;
 42} __aligned(DM_IO_MAX_REGIONS);
 43
 44static struct kmem_cache *_dm_io_cache;
 45
 46/*
 47 * Create a client with mempool and bioset.
 48 */
 49struct dm_io_client *dm_io_client_create(void)
 50{
 51	struct dm_io_client *client;
 52	unsigned int min_ios = dm_get_reserved_bio_based_ios();
 53	int ret;
 54
 55	client = kzalloc(sizeof(*client), GFP_KERNEL);
 56	if (!client)
 57		return ERR_PTR(-ENOMEM);
 58
 59	ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
 60	if (ret)
 61		goto bad;
 62
 63	ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
 64	if (ret)
 65		goto bad;
 66
 67	return client;
 68
 69bad:
 70	mempool_exit(&client->pool);
 
 71	kfree(client);
 72	return ERR_PTR(ret);
 73}
 74EXPORT_SYMBOL(dm_io_client_create);
 75
 76void dm_io_client_destroy(struct dm_io_client *client)
 77{
 78	mempool_exit(&client->pool);
 79	bioset_exit(&client->bios);
 80	kfree(client);
 81}
 82EXPORT_SYMBOL(dm_io_client_destroy);
 83
 84/*
 85 *-------------------------------------------------------------------
 86 * We need to keep track of which region a bio is doing io for.
 87 * To avoid a memory allocation to store just 5 or 6 bits, we
 88 * ensure the 'struct io' pointer is aligned so enough low bits are
 89 * always zero and then combine it with the region number directly in
 90 * bi_private.
 91 *-------------------------------------------------------------------
 92 */
 93static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
 94				       unsigned int region)
 95{
 96	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
 97		DMCRIT("Unaligned struct io pointer %p", io);
 98		BUG();
 99	}
100
101	bio->bi_private = (void *)((unsigned long)io | region);
102}
103
104static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
105				       unsigned int *region)
106{
107	unsigned long val = (unsigned long)bio->bi_private;
108
109	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
110	*region = val & (DM_IO_MAX_REGIONS - 1);
111}
112
113/*
114 *--------------------------------------------------------------
115 * We need an io object to keep track of the number of bios that
116 * have been dispatched for a particular io.
117 *--------------------------------------------------------------
118 */
119static void complete_io(struct io *io)
120{
121	unsigned long error_bits = io->error_bits;
122	io_notify_fn fn = io->callback;
123	void *context = io->context;
124
125	if (io->vma_invalidate_size)
126		invalidate_kernel_vmap_range(io->vma_invalidate_address,
127					     io->vma_invalidate_size);
128
129	mempool_free(io, &io->client->pool);
130	fn(error_bits, context);
131}
132
133static void dec_count(struct io *io, unsigned int region, blk_status_t error)
134{
135	if (error)
136		set_bit(region, &io->error_bits);
137
138	if (atomic_dec_and_test(&io->count))
139		complete_io(io);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140}
141
142static void endio(struct bio *bio)
143{
144	struct io *io;
145	unsigned int region;
146	blk_status_t error;
147
148	if (bio->bi_status && bio_data_dir(bio) == READ)
149		zero_fill_bio(bio);
150
151	/*
152	 * The bio destructor in bio_put() may use the io object.
153	 */
154	retrieve_io_and_region_from_bio(bio, &io, &region);
155
156	error = bio->bi_status;
157	bio_put(bio);
158
159	dec_count(io, region, error);
160}
161
162/*
163 *--------------------------------------------------------------
164 * These little objects provide an abstraction for getting a new
165 * destination page for io.
166 *--------------------------------------------------------------
167 */
168struct dpages {
169	void (*get_page)(struct dpages *dp,
170			 struct page **p, unsigned long *len, unsigned int *offset);
171	void (*next_page)(struct dpages *dp);
172
173	union {
174		unsigned int context_u;
175		struct bvec_iter context_bi;
176	};
177	void *context_ptr;
178
179	void *vma_invalidate_address;
180	unsigned long vma_invalidate_size;
181};
182
183/*
184 * Functions for getting the pages from a list.
185 */
186static void list_get_page(struct dpages *dp,
187		  struct page **p, unsigned long *len, unsigned int *offset)
188{
189	unsigned int o = dp->context_u;
190	struct page_list *pl = dp->context_ptr;
191
192	*p = pl->page;
193	*len = PAGE_SIZE - o;
194	*offset = o;
195}
196
197static void list_next_page(struct dpages *dp)
198{
199	struct page_list *pl = dp->context_ptr;
200
201	dp->context_ptr = pl->next;
202	dp->context_u = 0;
203}
204
205static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
206{
207	dp->get_page = list_get_page;
208	dp->next_page = list_next_page;
209	dp->context_u = offset;
210	dp->context_ptr = pl;
211}
212
213/*
214 * Functions for getting the pages from a bvec.
215 */
216static void bio_get_page(struct dpages *dp, struct page **p,
217			 unsigned long *len, unsigned int *offset)
218{
219	struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
220					     dp->context_bi);
221
222	*p = bvec.bv_page;
223	*len = bvec.bv_len;
224	*offset = bvec.bv_offset;
225
226	/* avoid figuring it out again in bio_next_page() */
227	dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
228}
229
230static void bio_next_page(struct dpages *dp)
231{
232	unsigned int len = (unsigned int)dp->context_bi.bi_sector;
233
234	bvec_iter_advance((struct bio_vec *)dp->context_ptr,
235			  &dp->context_bi, len);
236}
237
238static void bio_dp_init(struct dpages *dp, struct bio *bio)
239{
240	dp->get_page = bio_get_page;
241	dp->next_page = bio_next_page;
242
243	/*
244	 * We just use bvec iterator to retrieve pages, so it is ok to
245	 * access the bvec table directly here
246	 */
247	dp->context_ptr = bio->bi_io_vec;
248	dp->context_bi = bio->bi_iter;
249}
250
251/*
252 * Functions for getting the pages from a VMA.
253 */
254static void vm_get_page(struct dpages *dp,
255		 struct page **p, unsigned long *len, unsigned int *offset)
256{
257	*p = vmalloc_to_page(dp->context_ptr);
258	*offset = dp->context_u;
259	*len = PAGE_SIZE - dp->context_u;
260}
261
262static void vm_next_page(struct dpages *dp)
263{
264	dp->context_ptr += PAGE_SIZE - dp->context_u;
265	dp->context_u = 0;
266}
267
268static void vm_dp_init(struct dpages *dp, void *data)
269{
270	dp->get_page = vm_get_page;
271	dp->next_page = vm_next_page;
272	dp->context_u = offset_in_page(data);
273	dp->context_ptr = data;
274}
275
 
 
 
 
 
 
 
 
 
 
276/*
277 * Functions for getting the pages from kernel memory.
278 */
279static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
280			unsigned int *offset)
281{
282	*p = virt_to_page(dp->context_ptr);
283	*offset = dp->context_u;
284	*len = PAGE_SIZE - dp->context_u;
285}
286
287static void km_next_page(struct dpages *dp)
288{
289	dp->context_ptr += PAGE_SIZE - dp->context_u;
290	dp->context_u = 0;
291}
292
293static void km_dp_init(struct dpages *dp, void *data)
294{
295	dp->get_page = km_get_page;
296	dp->next_page = km_next_page;
297	dp->context_u = offset_in_page(data);
298	dp->context_ptr = data;
299}
300
301/*
302 *---------------------------------------------------------------
303 * IO routines that accept a list of pages.
304 *---------------------------------------------------------------
305 */
306static void do_region(const blk_opf_t opf, unsigned int region,
307		      struct dm_io_region *where, struct dpages *dp,
308		      struct io *io, unsigned short ioprio)
309{
310	struct bio *bio;
311	struct page *page;
312	unsigned long len;
313	unsigned int offset;
314	unsigned int num_bvecs;
315	sector_t remaining = where->count;
316	struct request_queue *q = bdev_get_queue(where->bdev);
317	sector_t num_sectors;
318	unsigned int special_cmd_max_sectors;
319	const enum req_op op = opf & REQ_OP_MASK;
320
321	/*
322	 * Reject unsupported discard and write same requests.
323	 */
324	if (op == REQ_OP_DISCARD)
325		special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
326	else if (op == REQ_OP_WRITE_ZEROES)
327		special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
328	if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
329	    special_cmd_max_sectors == 0) {
330		atomic_inc(&io->count);
331		dec_count(io, region, BLK_STS_NOTSUPP);
332		return;
333	}
334
335	/*
336	 * where->count may be zero if op holds a flush and we need to
337	 * send a zero-sized flush.
338	 */
339	do {
340		/*
341		 * Allocate a suitably sized-bio.
342		 */
343		switch (op) {
344		case REQ_OP_DISCARD:
345		case REQ_OP_WRITE_ZEROES:
346			num_bvecs = 0;
347			break;
348		default:
349			num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
350						(PAGE_SIZE >> SECTOR_SHIFT)) + 1);
351		}
352
353		bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
354				       &io->client->bios);
355		bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
356		bio->bi_end_io = endio;
357		bio->bi_ioprio = ioprio;
358		store_io_and_region_in_bio(bio, io, region);
359
360		if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
361			num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
362			bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
363			remaining -= num_sectors;
364		} else {
365			while (remaining) {
366				/*
367				 * Try and add as many pages as possible.
368				 */
369				dp->get_page(dp, &page, &len, &offset);
370				len = min(len, to_bytes(remaining));
371				if (!bio_add_page(bio, page, len, offset))
372					break;
373
374				offset = 0;
375				remaining -= to_sector(len);
376				dp->next_page(dp);
377			}
378		}
379
380		atomic_inc(&io->count);
381		submit_bio(bio);
382	} while (remaining);
383}
384
385static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
386			struct dm_io_region *where, struct dpages *dp,
387			struct io *io, unsigned short ioprio)
388{
389	int i;
390	struct dpages old_pages = *dp;
391
392	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
393
 
 
 
394	/*
395	 * For multiple regions we need to be careful to rewind
396	 * the dp object for each call to do_region.
397	 */
398	for (i = 0; i < num_regions; i++) {
399		*dp = old_pages;
400		if (where[i].count || (opf & REQ_PREFLUSH))
401			do_region(opf, i, where + i, dp, io, ioprio);
402	}
403
404	/*
405	 * Drop the extra reference that we were holding to avoid
406	 * the io being completed too early.
407	 */
408	dec_count(io, 0, 0);
409}
410
411static void async_io(struct dm_io_client *client, unsigned int num_regions,
412		     struct dm_io_region *where, blk_opf_t opf,
413		     struct dpages *dp, io_notify_fn fn, void *context,
414		     unsigned short ioprio)
415{
416	struct io *io;
 
 
 
 
 
 
 
 
 
 
 
 
417
418	io = mempool_alloc(&client->pool, GFP_NOIO);
419	io->error_bits = 0;
420	atomic_set(&io->count, 1); /* see dispatch_io() */
 
421	io->client = client;
422	io->callback = fn;
423	io->context = context;
424
425	io->vma_invalidate_address = dp->vma_invalidate_address;
426	io->vma_invalidate_size = dp->vma_invalidate_size;
427
428	dispatch_io(opf, num_regions, where, dp, io, ioprio);
429}
430
431struct sync_io {
432	unsigned long error_bits;
433	struct completion wait;
434};
435
436static void sync_io_complete(unsigned long error, void *context)
437{
438	struct sync_io *sio = context;
 
 
 
439
440	sio->error_bits = error;
441	complete(&sio->wait);
 
 
442}
443
444static int sync_io(struct dm_io_client *client, unsigned int num_regions,
445		   struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
446		   unsigned long *error_bits, unsigned short ioprio)
447{
448	struct sync_io sio;
449
450	init_completion(&sio.wait);
451
452	async_io(client, num_regions, where, opf | REQ_SYNC, dp,
453		 sync_io_complete, &sio, ioprio);
 
 
 
454
455	wait_for_completion_io(&sio.wait);
 
 
 
 
 
 
456
457	if (error_bits)
458		*error_bits = sio.error_bits;
459
460	return sio.error_bits ? -EIO : 0;
 
461}
462
463static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
464		   unsigned long size)
465{
466	/* Set up dpages based on memory type */
467
468	dp->vma_invalidate_address = NULL;
469	dp->vma_invalidate_size = 0;
470
471	switch (io_req->mem.type) {
472	case DM_IO_PAGE_LIST:
473		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
474		break;
475
476	case DM_IO_BIO:
477		bio_dp_init(dp, io_req->mem.ptr.bio);
478		break;
479
480	case DM_IO_VMA:
481		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
482		if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
483			dp->vma_invalidate_address = io_req->mem.ptr.vma;
484			dp->vma_invalidate_size = size;
485		}
486		vm_dp_init(dp, io_req->mem.ptr.vma);
487		break;
488
489	case DM_IO_KMEM:
490		km_dp_init(dp, io_req->mem.ptr.addr);
491		break;
492
493	default:
494		return -EINVAL;
495	}
496
497	return 0;
498}
499
500int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
501	  struct dm_io_region *where, unsigned long *sync_error_bits,
502	  unsigned short ioprio)
 
 
 
 
 
 
 
503{
504	int r;
505	struct dpages dp;
506
507	if (num_regions > 1 && !op_is_write(io_req->bi_opf)) {
508		WARN_ON(1);
509		return -EIO;
510	}
511
512	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
513	if (r)
514		return r;
515
516	if (!io_req->notify.fn)
517		return sync_io(io_req->client, num_regions, where,
518			       io_req->bi_opf, &dp, sync_error_bits, ioprio);
519
520	async_io(io_req->client, num_regions, where, io_req->bi_opf, &dp,
521		 io_req->notify.fn, io_req->notify.context, ioprio);
522	return 0;
523}
524EXPORT_SYMBOL(dm_io);
525
526int __init dm_io_init(void)
527{
528	_dm_io_cache = KMEM_CACHE(io, 0);
529	if (!_dm_io_cache)
530		return -ENOMEM;
531
532	return 0;
533}
534
535void dm_io_exit(void)
536{
537	kmem_cache_destroy(_dm_io_cache);
538	_dm_io_cache = NULL;
539}
v3.1
 
  1/*
  2 * Copyright (C) 2003 Sistina Software
  3 * Copyright (C) 2006 Red Hat GmbH
  4 *
  5 * This file is released under the GPL.
  6 */
  7
  8#include "dm.h"
  9
 10#include <linux/device-mapper.h>
 11
 12#include <linux/bio.h>
 
 13#include <linux/mempool.h>
 14#include <linux/module.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/dm-io.h>
 18
 19#define DM_MSG_PREFIX "io"
 20
 21#define DM_IO_MAX_REGIONS	BITS_PER_LONG
 22#define MIN_IOS		16
 23#define MIN_BIOS	16
 24
 25struct dm_io_client {
 26	mempool_t *pool;
 27	struct bio_set *bios;
 28};
 29
 30/*
 31 * Aligning 'struct io' reduces the number of bits required to store
 32 * its address.  Refer to store_io_and_region_in_bio() below.
 33 */
 34struct io {
 35	unsigned long error_bits;
 36	atomic_t count;
 37	struct task_struct *sleeper;
 38	struct dm_io_client *client;
 39	io_notify_fn callback;
 40	void *context;
 41	void *vma_invalidate_address;
 42	unsigned long vma_invalidate_size;
 43} __attribute__((aligned(DM_IO_MAX_REGIONS)));
 44
 45static struct kmem_cache *_dm_io_cache;
 46
 47/*
 48 * Create a client with mempool and bioset.
 49 */
 50struct dm_io_client *dm_io_client_create(void)
 51{
 52	struct dm_io_client *client;
 
 
 53
 54	client = kmalloc(sizeof(*client), GFP_KERNEL);
 55	if (!client)
 56		return ERR_PTR(-ENOMEM);
 57
 58	client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
 59	if (!client->pool)
 60		goto bad;
 61
 62	client->bios = bioset_create(MIN_BIOS, 0);
 63	if (!client->bios)
 64		goto bad;
 65
 66	return client;
 67
 68   bad:
 69	if (client->pool)
 70		mempool_destroy(client->pool);
 71	kfree(client);
 72	return ERR_PTR(-ENOMEM);
 73}
 74EXPORT_SYMBOL(dm_io_client_create);
 75
 76void dm_io_client_destroy(struct dm_io_client *client)
 77{
 78	mempool_destroy(client->pool);
 79	bioset_free(client->bios);
 80	kfree(client);
 81}
 82EXPORT_SYMBOL(dm_io_client_destroy);
 83
 84/*-----------------------------------------------------------------
 
 85 * We need to keep track of which region a bio is doing io for.
 86 * To avoid a memory allocation to store just 5 or 6 bits, we
 87 * ensure the 'struct io' pointer is aligned so enough low bits are
 88 * always zero and then combine it with the region number directly in
 89 * bi_private.
 90 *---------------------------------------------------------------*/
 
 91static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
 92				       unsigned region)
 93{
 94	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
 95		DMCRIT("Unaligned struct io pointer %p", io);
 96		BUG();
 97	}
 98
 99	bio->bi_private = (void *)((unsigned long)io | region);
100}
101
102static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103				       unsigned *region)
104{
105	unsigned long val = (unsigned long)bio->bi_private;
106
107	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108	*region = val & (DM_IO_MAX_REGIONS - 1);
109}
110
111/*-----------------------------------------------------------------
 
112 * We need an io object to keep track of the number of bios that
113 * have been dispatched for a particular io.
114 *---------------------------------------------------------------*/
115static void dec_count(struct io *io, unsigned int region, int error)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116{
117	if (error)
118		set_bit(region, &io->error_bits);
119
120	if (atomic_dec_and_test(&io->count)) {
121		if (io->vma_invalidate_size)
122			invalidate_kernel_vmap_range(io->vma_invalidate_address,
123						     io->vma_invalidate_size);
124
125		if (io->sleeper)
126			wake_up_process(io->sleeper);
127
128		else {
129			unsigned long r = io->error_bits;
130			io_notify_fn fn = io->callback;
131			void *context = io->context;
132
133			mempool_free(io, io->client->pool);
134			fn(r, context);
135		}
136	}
137}
138
139static void endio(struct bio *bio, int error)
140{
141	struct io *io;
142	unsigned region;
 
143
144	if (error && bio_data_dir(bio) == READ)
145		zero_fill_bio(bio);
146
147	/*
148	 * The bio destructor in bio_put() may use the io object.
149	 */
150	retrieve_io_and_region_from_bio(bio, &io, &region);
151
 
152	bio_put(bio);
153
154	dec_count(io, region, error);
155}
156
157/*-----------------------------------------------------------------
 
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
 
161struct dpages {
162	void (*get_page)(struct dpages *dp,
163			 struct page **p, unsigned long *len, unsigned *offset);
164	void (*next_page)(struct dpages *dp);
165
166	unsigned context_u;
 
 
 
167	void *context_ptr;
168
169	void *vma_invalidate_address;
170	unsigned long vma_invalidate_size;
171};
172
173/*
174 * Functions for getting the pages from a list.
175 */
176static void list_get_page(struct dpages *dp,
177		  struct page **p, unsigned long *len, unsigned *offset)
178{
179	unsigned o = dp->context_u;
180	struct page_list *pl = (struct page_list *) dp->context_ptr;
181
182	*p = pl->page;
183	*len = PAGE_SIZE - o;
184	*offset = o;
185}
186
187static void list_next_page(struct dpages *dp)
188{
189	struct page_list *pl = (struct page_list *) dp->context_ptr;
 
190	dp->context_ptr = pl->next;
191	dp->context_u = 0;
192}
193
194static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195{
196	dp->get_page = list_get_page;
197	dp->next_page = list_next_page;
198	dp->context_u = offset;
199	dp->context_ptr = pl;
200}
201
202/*
203 * Functions for getting the pages from a bvec.
204 */
205static void bvec_get_page(struct dpages *dp,
206		  struct page **p, unsigned long *len, unsigned *offset)
207{
208	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
209	*p = bvec->bv_page;
210	*len = bvec->bv_len;
211	*offset = bvec->bv_offset;
 
 
 
 
 
212}
213
214static void bvec_next_page(struct dpages *dp)
215{
216	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
217	dp->context_ptr = bvec + 1;
 
 
218}
219
220static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
221{
222	dp->get_page = bvec_get_page;
223	dp->next_page = bvec_next_page;
224	dp->context_ptr = bvec;
 
 
 
 
 
 
225}
226
227/*
228 * Functions for getting the pages from a VMA.
229 */
230static void vm_get_page(struct dpages *dp,
231		 struct page **p, unsigned long *len, unsigned *offset)
232{
233	*p = vmalloc_to_page(dp->context_ptr);
234	*offset = dp->context_u;
235	*len = PAGE_SIZE - dp->context_u;
236}
237
238static void vm_next_page(struct dpages *dp)
239{
240	dp->context_ptr += PAGE_SIZE - dp->context_u;
241	dp->context_u = 0;
242}
243
244static void vm_dp_init(struct dpages *dp, void *data)
245{
246	dp->get_page = vm_get_page;
247	dp->next_page = vm_next_page;
248	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249	dp->context_ptr = data;
250}
251
252static void dm_bio_destructor(struct bio *bio)
253{
254	unsigned region;
255	struct io *io;
256
257	retrieve_io_and_region_from_bio(bio, &io, &region);
258
259	bio_free(bio, io->client->bios);
260}
261
262/*
263 * Functions for getting the pages from kernel memory.
264 */
265static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
266			unsigned *offset)
267{
268	*p = virt_to_page(dp->context_ptr);
269	*offset = dp->context_u;
270	*len = PAGE_SIZE - dp->context_u;
271}
272
273static void km_next_page(struct dpages *dp)
274{
275	dp->context_ptr += PAGE_SIZE - dp->context_u;
276	dp->context_u = 0;
277}
278
279static void km_dp_init(struct dpages *dp, void *data)
280{
281	dp->get_page = km_get_page;
282	dp->next_page = km_next_page;
283	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
284	dp->context_ptr = data;
285}
286
287/*-----------------------------------------------------------------
 
288 * IO routines that accept a list of pages.
289 *---------------------------------------------------------------*/
290static void do_region(int rw, unsigned region, struct dm_io_region *where,
291		      struct dpages *dp, struct io *io)
 
 
292{
293	struct bio *bio;
294	struct page *page;
295	unsigned long len;
296	unsigned offset;
297	unsigned num_bvecs;
298	sector_t remaining = where->count;
 
 
 
 
299
300	/*
301	 * where->count may be zero if rw holds a flush and we need to
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302	 * send a zero-sized flush.
303	 */
304	do {
305		/*
306		 * Allocate a suitably sized-bio.
307		 */
308		num_bvecs = dm_sector_div_up(remaining,
309					     (PAGE_SIZE >> SECTOR_SHIFT));
310		num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
311		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
312		bio->bi_sector = where->sector + (where->count - remaining);
313		bio->bi_bdev = where->bdev;
 
 
 
 
 
 
 
314		bio->bi_end_io = endio;
315		bio->bi_destructor = dm_bio_destructor;
316		store_io_and_region_in_bio(bio, io, region);
317
318		/*
319		 * Try and add as many pages as possible.
320		 */
321		while (remaining) {
322			dp->get_page(dp, &page, &len, &offset);
323			len = min(len, to_bytes(remaining));
324			if (!bio_add_page(bio, page, len, offset))
325				break;
326
327			offset = 0;
328			remaining -= to_sector(len);
329			dp->next_page(dp);
 
 
 
 
 
 
330		}
331
332		atomic_inc(&io->count);
333		submit_bio(rw, bio);
334	} while (remaining);
335}
336
337static void dispatch_io(int rw, unsigned int num_regions,
338			struct dm_io_region *where, struct dpages *dp,
339			struct io *io, int sync)
340{
341	int i;
342	struct dpages old_pages = *dp;
343
344	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
345
346	if (sync)
347		rw |= REQ_SYNC;
348
349	/*
350	 * For multiple regions we need to be careful to rewind
351	 * the dp object for each call to do_region.
352	 */
353	for (i = 0; i < num_regions; i++) {
354		*dp = old_pages;
355		if (where[i].count || (rw & REQ_FLUSH))
356			do_region(rw, i, where + i, dp, io);
357	}
358
359	/*
360	 * Drop the extra reference that we were holding to avoid
361	 * the io being completed too early.
362	 */
363	dec_count(io, 0, 0);
364}
365
366static int sync_io(struct dm_io_client *client, unsigned int num_regions,
367		   struct dm_io_region *where, int rw, struct dpages *dp,
368		   unsigned long *error_bits)
 
369{
370	/*
371	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
372	 * align it on our own.
373	 * volatile prevents the optimizer from removing or reusing
374	 * "io_" field from the stack frame (allowed in ANSI C).
375	 */
376	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
377	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
378
379	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
380		WARN_ON(1);
381		return -EIO;
382	}
383
 
384	io->error_bits = 0;
385	atomic_set(&io->count, 1); /* see dispatch_io() */
386	io->sleeper = current;
387	io->client = client;
 
 
388
389	io->vma_invalidate_address = dp->vma_invalidate_address;
390	io->vma_invalidate_size = dp->vma_invalidate_size;
391
392	dispatch_io(rw, num_regions, where, dp, io, 1);
 
393
394	while (1) {
395		set_current_state(TASK_UNINTERRUPTIBLE);
 
 
396
397		if (!atomic_read(&io->count))
398			break;
399
400		io_schedule();
401	}
402	set_current_state(TASK_RUNNING);
403
404	if (error_bits)
405		*error_bits = io->error_bits;
406
407	return io->error_bits ? -EIO : 0;
408}
409
410static int async_io(struct dm_io_client *client, unsigned int num_regions,
411		    struct dm_io_region *where, int rw, struct dpages *dp,
412		    io_notify_fn fn, void *context)
413{
414	struct io *io;
 
 
415
416	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
417		WARN_ON(1);
418		fn(1, context);
419		return -EIO;
420	}
421
422	io = mempool_alloc(client->pool, GFP_NOIO);
423	io->error_bits = 0;
424	atomic_set(&io->count, 1); /* see dispatch_io() */
425	io->sleeper = NULL;
426	io->client = client;
427	io->callback = fn;
428	io->context = context;
429
430	io->vma_invalidate_address = dp->vma_invalidate_address;
431	io->vma_invalidate_size = dp->vma_invalidate_size;
432
433	dispatch_io(rw, num_regions, where, dp, io, 0);
434	return 0;
435}
436
437static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
438		   unsigned long size)
439{
440	/* Set up dpages based on memory type */
441
442	dp->vma_invalidate_address = NULL;
443	dp->vma_invalidate_size = 0;
444
445	switch (io_req->mem.type) {
446	case DM_IO_PAGE_LIST:
447		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
448		break;
449
450	case DM_IO_BVEC:
451		bvec_dp_init(dp, io_req->mem.ptr.bvec);
452		break;
453
454	case DM_IO_VMA:
455		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
456		if ((io_req->bi_rw & RW_MASK) == READ) {
457			dp->vma_invalidate_address = io_req->mem.ptr.vma;
458			dp->vma_invalidate_size = size;
459		}
460		vm_dp_init(dp, io_req->mem.ptr.vma);
461		break;
462
463	case DM_IO_KMEM:
464		km_dp_init(dp, io_req->mem.ptr.addr);
465		break;
466
467	default:
468		return -EINVAL;
469	}
470
471	return 0;
472}
473
474/*
475 * New collapsed (a)synchronous interface.
476 *
477 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
478 * the queue with blk_unplug() some time later or set REQ_SYNC in
479io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
480 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
481 */
482int dm_io(struct dm_io_request *io_req, unsigned num_regions,
483	  struct dm_io_region *where, unsigned long *sync_error_bits)
484{
485	int r;
486	struct dpages dp;
487
 
 
 
 
 
488	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
489	if (r)
490		return r;
491
492	if (!io_req->notify.fn)
493		return sync_io(io_req->client, num_regions, where,
494			       io_req->bi_rw, &dp, sync_error_bits);
495
496	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
497			&dp, io_req->notify.fn, io_req->notify.context);
 
498}
499EXPORT_SYMBOL(dm_io);
500
501int __init dm_io_init(void)
502{
503	_dm_io_cache = KMEM_CACHE(io, 0);
504	if (!_dm_io_cache)
505		return -ENOMEM;
506
507	return 0;
508}
509
510void dm_io_exit(void)
511{
512	kmem_cache_destroy(_dm_io_cache);
513	_dm_io_cache = NULL;
514}