Loading...
1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dm-io.h>
18
19#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22#define MIN_IOS 16
23#define MIN_BIOS 16
24
25struct dm_io_client {
26 mempool_t *pool;
27 struct bio_set *bios;
28};
29
30/*
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
33 */
34struct io {
35 unsigned long error_bits;
36 atomic_t count;
37 struct task_struct *sleeper;
38 struct dm_io_client *client;
39 io_notify_fn callback;
40 void *context;
41 void *vma_invalidate_address;
42 unsigned long vma_invalidate_size;
43} __attribute__((aligned(DM_IO_MAX_REGIONS)));
44
45static struct kmem_cache *_dm_io_cache;
46
47/*
48 * Create a client with mempool and bioset.
49 */
50struct dm_io_client *dm_io_client_create(void)
51{
52 struct dm_io_client *client;
53
54 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client)
56 return ERR_PTR(-ENOMEM);
57
58 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
59 if (!client->pool)
60 goto bad;
61
62 client->bios = bioset_create(MIN_BIOS, 0);
63 if (!client->bios)
64 goto bad;
65
66 return client;
67
68 bad:
69 if (client->pool)
70 mempool_destroy(client->pool);
71 kfree(client);
72 return ERR_PTR(-ENOMEM);
73}
74EXPORT_SYMBOL(dm_io_client_create);
75
76void dm_io_client_destroy(struct dm_io_client *client)
77{
78 mempool_destroy(client->pool);
79 bioset_free(client->bios);
80 kfree(client);
81}
82EXPORT_SYMBOL(dm_io_client_destroy);
83
84/*-----------------------------------------------------------------
85 * We need to keep track of which region a bio is doing io for.
86 * To avoid a memory allocation to store just 5 or 6 bits, we
87 * ensure the 'struct io' pointer is aligned so enough low bits are
88 * always zero and then combine it with the region number directly in
89 * bi_private.
90 *---------------------------------------------------------------*/
91static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
92 unsigned region)
93{
94 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
95 DMCRIT("Unaligned struct io pointer %p", io);
96 BUG();
97 }
98
99 bio->bi_private = (void *)((unsigned long)io | region);
100}
101
102static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103 unsigned *region)
104{
105 unsigned long val = (unsigned long)bio->bi_private;
106
107 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108 *region = val & (DM_IO_MAX_REGIONS - 1);
109}
110
111/*-----------------------------------------------------------------
112 * We need an io object to keep track of the number of bios that
113 * have been dispatched for a particular io.
114 *---------------------------------------------------------------*/
115static void dec_count(struct io *io, unsigned int region, int error)
116{
117 if (error)
118 set_bit(region, &io->error_bits);
119
120 if (atomic_dec_and_test(&io->count)) {
121 if (io->vma_invalidate_size)
122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 io->vma_invalidate_size);
124
125 if (io->sleeper)
126 wake_up_process(io->sleeper);
127
128 else {
129 unsigned long r = io->error_bits;
130 io_notify_fn fn = io->callback;
131 void *context = io->context;
132
133 mempool_free(io, io->client->pool);
134 fn(r, context);
135 }
136 }
137}
138
139static void endio(struct bio *bio, int error)
140{
141 struct io *io;
142 unsigned region;
143
144 if (error && bio_data_dir(bio) == READ)
145 zero_fill_bio(bio);
146
147 /*
148 * The bio destructor in bio_put() may use the io object.
149 */
150 retrieve_io_and_region_from_bio(bio, &io, ®ion);
151
152 bio_put(bio);
153
154 dec_count(io, region, error);
155}
156
157/*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
161struct dpages {
162 void (*get_page)(struct dpages *dp,
163 struct page **p, unsigned long *len, unsigned *offset);
164 void (*next_page)(struct dpages *dp);
165
166 unsigned context_u;
167 void *context_ptr;
168
169 void *vma_invalidate_address;
170 unsigned long vma_invalidate_size;
171};
172
173/*
174 * Functions for getting the pages from a list.
175 */
176static void list_get_page(struct dpages *dp,
177 struct page **p, unsigned long *len, unsigned *offset)
178{
179 unsigned o = dp->context_u;
180 struct page_list *pl = (struct page_list *) dp->context_ptr;
181
182 *p = pl->page;
183 *len = PAGE_SIZE - o;
184 *offset = o;
185}
186
187static void list_next_page(struct dpages *dp)
188{
189 struct page_list *pl = (struct page_list *) dp->context_ptr;
190 dp->context_ptr = pl->next;
191 dp->context_u = 0;
192}
193
194static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195{
196 dp->get_page = list_get_page;
197 dp->next_page = list_next_page;
198 dp->context_u = offset;
199 dp->context_ptr = pl;
200}
201
202/*
203 * Functions for getting the pages from a bvec.
204 */
205static void bvec_get_page(struct dpages *dp,
206 struct page **p, unsigned long *len, unsigned *offset)
207{
208 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
209 *p = bvec->bv_page;
210 *len = bvec->bv_len;
211 *offset = bvec->bv_offset;
212}
213
214static void bvec_next_page(struct dpages *dp)
215{
216 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
217 dp->context_ptr = bvec + 1;
218}
219
220static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
221{
222 dp->get_page = bvec_get_page;
223 dp->next_page = bvec_next_page;
224 dp->context_ptr = bvec;
225}
226
227/*
228 * Functions for getting the pages from a VMA.
229 */
230static void vm_get_page(struct dpages *dp,
231 struct page **p, unsigned long *len, unsigned *offset)
232{
233 *p = vmalloc_to_page(dp->context_ptr);
234 *offset = dp->context_u;
235 *len = PAGE_SIZE - dp->context_u;
236}
237
238static void vm_next_page(struct dpages *dp)
239{
240 dp->context_ptr += PAGE_SIZE - dp->context_u;
241 dp->context_u = 0;
242}
243
244static void vm_dp_init(struct dpages *dp, void *data)
245{
246 dp->get_page = vm_get_page;
247 dp->next_page = vm_next_page;
248 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249 dp->context_ptr = data;
250}
251
252static void dm_bio_destructor(struct bio *bio)
253{
254 unsigned region;
255 struct io *io;
256
257 retrieve_io_and_region_from_bio(bio, &io, ®ion);
258
259 bio_free(bio, io->client->bios);
260}
261
262/*
263 * Functions for getting the pages from kernel memory.
264 */
265static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
266 unsigned *offset)
267{
268 *p = virt_to_page(dp->context_ptr);
269 *offset = dp->context_u;
270 *len = PAGE_SIZE - dp->context_u;
271}
272
273static void km_next_page(struct dpages *dp)
274{
275 dp->context_ptr += PAGE_SIZE - dp->context_u;
276 dp->context_u = 0;
277}
278
279static void km_dp_init(struct dpages *dp, void *data)
280{
281 dp->get_page = km_get_page;
282 dp->next_page = km_next_page;
283 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
284 dp->context_ptr = data;
285}
286
287/*-----------------------------------------------------------------
288 * IO routines that accept a list of pages.
289 *---------------------------------------------------------------*/
290static void do_region(int rw, unsigned region, struct dm_io_region *where,
291 struct dpages *dp, struct io *io)
292{
293 struct bio *bio;
294 struct page *page;
295 unsigned long len;
296 unsigned offset;
297 unsigned num_bvecs;
298 sector_t remaining = where->count;
299
300 /*
301 * where->count may be zero if rw holds a flush and we need to
302 * send a zero-sized flush.
303 */
304 do {
305 /*
306 * Allocate a suitably sized-bio.
307 */
308 num_bvecs = dm_sector_div_up(remaining,
309 (PAGE_SIZE >> SECTOR_SHIFT));
310 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
311 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
312 bio->bi_sector = where->sector + (where->count - remaining);
313 bio->bi_bdev = where->bdev;
314 bio->bi_end_io = endio;
315 bio->bi_destructor = dm_bio_destructor;
316 store_io_and_region_in_bio(bio, io, region);
317
318 /*
319 * Try and add as many pages as possible.
320 */
321 while (remaining) {
322 dp->get_page(dp, &page, &len, &offset);
323 len = min(len, to_bytes(remaining));
324 if (!bio_add_page(bio, page, len, offset))
325 break;
326
327 offset = 0;
328 remaining -= to_sector(len);
329 dp->next_page(dp);
330 }
331
332 atomic_inc(&io->count);
333 submit_bio(rw, bio);
334 } while (remaining);
335}
336
337static void dispatch_io(int rw, unsigned int num_regions,
338 struct dm_io_region *where, struct dpages *dp,
339 struct io *io, int sync)
340{
341 int i;
342 struct dpages old_pages = *dp;
343
344 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
345
346 if (sync)
347 rw |= REQ_SYNC;
348
349 /*
350 * For multiple regions we need to be careful to rewind
351 * the dp object for each call to do_region.
352 */
353 for (i = 0; i < num_regions; i++) {
354 *dp = old_pages;
355 if (where[i].count || (rw & REQ_FLUSH))
356 do_region(rw, i, where + i, dp, io);
357 }
358
359 /*
360 * Drop the extra reference that we were holding to avoid
361 * the io being completed too early.
362 */
363 dec_count(io, 0, 0);
364}
365
366static int sync_io(struct dm_io_client *client, unsigned int num_regions,
367 struct dm_io_region *where, int rw, struct dpages *dp,
368 unsigned long *error_bits)
369{
370 /*
371 * gcc <= 4.3 can't do the alignment for stack variables, so we must
372 * align it on our own.
373 * volatile prevents the optimizer from removing or reusing
374 * "io_" field from the stack frame (allowed in ANSI C).
375 */
376 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
377 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
378
379 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
380 WARN_ON(1);
381 return -EIO;
382 }
383
384 io->error_bits = 0;
385 atomic_set(&io->count, 1); /* see dispatch_io() */
386 io->sleeper = current;
387 io->client = client;
388
389 io->vma_invalidate_address = dp->vma_invalidate_address;
390 io->vma_invalidate_size = dp->vma_invalidate_size;
391
392 dispatch_io(rw, num_regions, where, dp, io, 1);
393
394 while (1) {
395 set_current_state(TASK_UNINTERRUPTIBLE);
396
397 if (!atomic_read(&io->count))
398 break;
399
400 io_schedule();
401 }
402 set_current_state(TASK_RUNNING);
403
404 if (error_bits)
405 *error_bits = io->error_bits;
406
407 return io->error_bits ? -EIO : 0;
408}
409
410static int async_io(struct dm_io_client *client, unsigned int num_regions,
411 struct dm_io_region *where, int rw, struct dpages *dp,
412 io_notify_fn fn, void *context)
413{
414 struct io *io;
415
416 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
417 WARN_ON(1);
418 fn(1, context);
419 return -EIO;
420 }
421
422 io = mempool_alloc(client->pool, GFP_NOIO);
423 io->error_bits = 0;
424 atomic_set(&io->count, 1); /* see dispatch_io() */
425 io->sleeper = NULL;
426 io->client = client;
427 io->callback = fn;
428 io->context = context;
429
430 io->vma_invalidate_address = dp->vma_invalidate_address;
431 io->vma_invalidate_size = dp->vma_invalidate_size;
432
433 dispatch_io(rw, num_regions, where, dp, io, 0);
434 return 0;
435}
436
437static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
438 unsigned long size)
439{
440 /* Set up dpages based on memory type */
441
442 dp->vma_invalidate_address = NULL;
443 dp->vma_invalidate_size = 0;
444
445 switch (io_req->mem.type) {
446 case DM_IO_PAGE_LIST:
447 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
448 break;
449
450 case DM_IO_BVEC:
451 bvec_dp_init(dp, io_req->mem.ptr.bvec);
452 break;
453
454 case DM_IO_VMA:
455 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
456 if ((io_req->bi_rw & RW_MASK) == READ) {
457 dp->vma_invalidate_address = io_req->mem.ptr.vma;
458 dp->vma_invalidate_size = size;
459 }
460 vm_dp_init(dp, io_req->mem.ptr.vma);
461 break;
462
463 case DM_IO_KMEM:
464 km_dp_init(dp, io_req->mem.ptr.addr);
465 break;
466
467 default:
468 return -EINVAL;
469 }
470
471 return 0;
472}
473
474/*
475 * New collapsed (a)synchronous interface.
476 *
477 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
478 * the queue with blk_unplug() some time later or set REQ_SYNC in
479io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
480 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
481 */
482int dm_io(struct dm_io_request *io_req, unsigned num_regions,
483 struct dm_io_region *where, unsigned long *sync_error_bits)
484{
485 int r;
486 struct dpages dp;
487
488 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
489 if (r)
490 return r;
491
492 if (!io_req->notify.fn)
493 return sync_io(io_req->client, num_regions, where,
494 io_req->bi_rw, &dp, sync_error_bits);
495
496 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
497 &dp, io_req->notify.fn, io_req->notify.context);
498}
499EXPORT_SYMBOL(dm_io);
500
501int __init dm_io_init(void)
502{
503 _dm_io_cache = KMEM_CACHE(io, 0);
504 if (!_dm_io_cache)
505 return -ENOMEM;
506
507 return 0;
508}
509
510void dm_io_exit(void)
511{
512 kmem_cache_destroy(_dm_io_cache);
513 _dm_io_cache = NULL;
514}
1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dm-io.h>
18
19#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22
23struct dm_io_client {
24 mempool_t *pool;
25 struct bio_set *bios;
26};
27
28/*
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address. Refer to store_io_and_region_in_bio() below.
31 */
32struct io {
33 unsigned long error_bits;
34 atomic_t count;
35 struct task_struct *sleeper;
36 struct dm_io_client *client;
37 io_notify_fn callback;
38 void *context;
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
42
43static struct kmem_cache *_dm_io_cache;
44
45/*
46 * Create a client with mempool and bioset.
47 */
48struct dm_io_client *dm_io_client_create(void)
49{
50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
52
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
58 if (!client->pool)
59 goto bad;
60
61 client->bios = bioset_create(min_ios, 0);
62 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 if (client->pool)
69 mempool_destroy(client->pool);
70 kfree(client);
71 return ERR_PTR(-ENOMEM);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
75void dm_io_client_destroy(struct dm_io_client *client)
76{
77 mempool_destroy(client->pool);
78 bioset_free(client->bios);
79 kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
83/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
89 *---------------------------------------------------------------*/
90static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 unsigned region)
92{
93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
95 BUG();
96 }
97
98 bio->bi_private = (void *)((unsigned long)io | region);
99}
100
101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 unsigned *region)
103{
104 unsigned long val = (unsigned long)bio->bi_private;
105
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
114static void dec_count(struct io *io, unsigned int region, int error)
115{
116 if (error)
117 set_bit(region, &io->error_bits);
118
119 if (atomic_dec_and_test(&io->count)) {
120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
123
124 if (io->sleeper)
125 wake_up_process(io->sleeper);
126
127 else {
128 unsigned long r = io->error_bits;
129 io_notify_fn fn = io->callback;
130 void *context = io->context;
131
132 mempool_free(io, io->client->pool);
133 fn(r, context);
134 }
135 }
136}
137
138static void endio(struct bio *bio, int error)
139{
140 struct io *io;
141 unsigned region;
142
143 if (error && bio_data_dir(bio) == READ)
144 zero_fill_bio(bio);
145
146 /*
147 * The bio destructor in bio_put() may use the io object.
148 */
149 retrieve_io_and_region_from_bio(bio, &io, ®ion);
150
151 bio_put(bio);
152
153 dec_count(io, region, error);
154}
155
156/*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 unsigned context_u;
166 void *context_ptr;
167
168 void *vma_invalidate_address;
169 unsigned long vma_invalidate_size;
170};
171
172/*
173 * Functions for getting the pages from a list.
174 */
175static void list_get_page(struct dpages *dp,
176 struct page **p, unsigned long *len, unsigned *offset)
177{
178 unsigned o = dp->context_u;
179 struct page_list *pl = (struct page_list *) dp->context_ptr;
180
181 *p = pl->page;
182 *len = PAGE_SIZE - o;
183 *offset = o;
184}
185
186static void list_next_page(struct dpages *dp)
187{
188 struct page_list *pl = (struct page_list *) dp->context_ptr;
189 dp->context_ptr = pl->next;
190 dp->context_u = 0;
191}
192
193static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
194{
195 dp->get_page = list_get_page;
196 dp->next_page = list_next_page;
197 dp->context_u = offset;
198 dp->context_ptr = pl;
199}
200
201/*
202 * Functions for getting the pages from a bvec.
203 */
204static void bio_get_page(struct dpages *dp, struct page **p,
205 unsigned long *len, unsigned *offset)
206{
207 struct bio_vec *bvec = dp->context_ptr;
208 *p = bvec->bv_page;
209 *len = bvec->bv_len - dp->context_u;
210 *offset = bvec->bv_offset + dp->context_u;
211}
212
213static void bio_next_page(struct dpages *dp)
214{
215 struct bio_vec *bvec = dp->context_ptr;
216 dp->context_ptr = bvec + 1;
217 dp->context_u = 0;
218}
219
220static void bio_dp_init(struct dpages *dp, struct bio *bio)
221{
222 dp->get_page = bio_get_page;
223 dp->next_page = bio_next_page;
224 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
225 dp->context_u = bio->bi_iter.bi_bvec_done;
226}
227
228/*
229 * Functions for getting the pages from a VMA.
230 */
231static void vm_get_page(struct dpages *dp,
232 struct page **p, unsigned long *len, unsigned *offset)
233{
234 *p = vmalloc_to_page(dp->context_ptr);
235 *offset = dp->context_u;
236 *len = PAGE_SIZE - dp->context_u;
237}
238
239static void vm_next_page(struct dpages *dp)
240{
241 dp->context_ptr += PAGE_SIZE - dp->context_u;
242 dp->context_u = 0;
243}
244
245static void vm_dp_init(struct dpages *dp, void *data)
246{
247 dp->get_page = vm_get_page;
248 dp->next_page = vm_next_page;
249 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
250 dp->context_ptr = data;
251}
252
253/*
254 * Functions for getting the pages from kernel memory.
255 */
256static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
257 unsigned *offset)
258{
259 *p = virt_to_page(dp->context_ptr);
260 *offset = dp->context_u;
261 *len = PAGE_SIZE - dp->context_u;
262}
263
264static void km_next_page(struct dpages *dp)
265{
266 dp->context_ptr += PAGE_SIZE - dp->context_u;
267 dp->context_u = 0;
268}
269
270static void km_dp_init(struct dpages *dp, void *data)
271{
272 dp->get_page = km_get_page;
273 dp->next_page = km_next_page;
274 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
275 dp->context_ptr = data;
276}
277
278/*-----------------------------------------------------------------
279 * IO routines that accept a list of pages.
280 *---------------------------------------------------------------*/
281static void do_region(int rw, unsigned region, struct dm_io_region *where,
282 struct dpages *dp, struct io *io)
283{
284 struct bio *bio;
285 struct page *page;
286 unsigned long len;
287 unsigned offset;
288 unsigned num_bvecs;
289 sector_t remaining = where->count;
290 struct request_queue *q = bdev_get_queue(where->bdev);
291 unsigned short logical_block_size = queue_logical_block_size(q);
292 sector_t num_sectors;
293
294 /*
295 * where->count may be zero if rw holds a flush and we need to
296 * send a zero-sized flush.
297 */
298 do {
299 /*
300 * Allocate a suitably sized-bio.
301 */
302 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
303 num_bvecs = 1;
304 else
305 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
306 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
307
308 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
309 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
310 bio->bi_bdev = where->bdev;
311 bio->bi_end_io = endio;
312 store_io_and_region_in_bio(bio, io, region);
313
314 if (rw & REQ_DISCARD) {
315 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
316 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
317 remaining -= num_sectors;
318 } else if (rw & REQ_WRITE_SAME) {
319 /*
320 * WRITE SAME only uses a single page.
321 */
322 dp->get_page(dp, &page, &len, &offset);
323 bio_add_page(bio, page, logical_block_size, offset);
324 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
325 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
326
327 offset = 0;
328 remaining -= num_sectors;
329 dp->next_page(dp);
330 } else while (remaining) {
331 /*
332 * Try and add as many pages as possible.
333 */
334 dp->get_page(dp, &page, &len, &offset);
335 len = min(len, to_bytes(remaining));
336 if (!bio_add_page(bio, page, len, offset))
337 break;
338
339 offset = 0;
340 remaining -= to_sector(len);
341 dp->next_page(dp);
342 }
343
344 atomic_inc(&io->count);
345 submit_bio(rw, bio);
346 } while (remaining);
347}
348
349static void dispatch_io(int rw, unsigned int num_regions,
350 struct dm_io_region *where, struct dpages *dp,
351 struct io *io, int sync)
352{
353 int i;
354 struct dpages old_pages = *dp;
355
356 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
357
358 if (sync)
359 rw |= REQ_SYNC;
360
361 /*
362 * For multiple regions we need to be careful to rewind
363 * the dp object for each call to do_region.
364 */
365 for (i = 0; i < num_regions; i++) {
366 *dp = old_pages;
367 if (where[i].count || (rw & REQ_FLUSH))
368 do_region(rw, i, where + i, dp, io);
369 }
370
371 /*
372 * Drop the extra reference that we were holding to avoid
373 * the io being completed too early.
374 */
375 dec_count(io, 0, 0);
376}
377
378static int sync_io(struct dm_io_client *client, unsigned int num_regions,
379 struct dm_io_region *where, int rw, struct dpages *dp,
380 unsigned long *error_bits)
381{
382 /*
383 * gcc <= 4.3 can't do the alignment for stack variables, so we must
384 * align it on our own.
385 * volatile prevents the optimizer from removing or reusing
386 * "io_" field from the stack frame (allowed in ANSI C).
387 */
388 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
389 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
390
391 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
392 WARN_ON(1);
393 return -EIO;
394 }
395
396 io->error_bits = 0;
397 atomic_set(&io->count, 1); /* see dispatch_io() */
398 io->sleeper = current;
399 io->client = client;
400
401 io->vma_invalidate_address = dp->vma_invalidate_address;
402 io->vma_invalidate_size = dp->vma_invalidate_size;
403
404 dispatch_io(rw, num_regions, where, dp, io, 1);
405
406 while (1) {
407 set_current_state(TASK_UNINTERRUPTIBLE);
408
409 if (!atomic_read(&io->count))
410 break;
411
412 io_schedule();
413 }
414 set_current_state(TASK_RUNNING);
415
416 if (error_bits)
417 *error_bits = io->error_bits;
418
419 return io->error_bits ? -EIO : 0;
420}
421
422static int async_io(struct dm_io_client *client, unsigned int num_regions,
423 struct dm_io_region *where, int rw, struct dpages *dp,
424 io_notify_fn fn, void *context)
425{
426 struct io *io;
427
428 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
429 WARN_ON(1);
430 fn(1, context);
431 return -EIO;
432 }
433
434 io = mempool_alloc(client->pool, GFP_NOIO);
435 io->error_bits = 0;
436 atomic_set(&io->count, 1); /* see dispatch_io() */
437 io->sleeper = NULL;
438 io->client = client;
439 io->callback = fn;
440 io->context = context;
441
442 io->vma_invalidate_address = dp->vma_invalidate_address;
443 io->vma_invalidate_size = dp->vma_invalidate_size;
444
445 dispatch_io(rw, num_regions, where, dp, io, 0);
446 return 0;
447}
448
449static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
450 unsigned long size)
451{
452 /* Set up dpages based on memory type */
453
454 dp->vma_invalidate_address = NULL;
455 dp->vma_invalidate_size = 0;
456
457 switch (io_req->mem.type) {
458 case DM_IO_PAGE_LIST:
459 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
460 break;
461
462 case DM_IO_BIO:
463 bio_dp_init(dp, io_req->mem.ptr.bio);
464 break;
465
466 case DM_IO_VMA:
467 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
468 if ((io_req->bi_rw & RW_MASK) == READ) {
469 dp->vma_invalidate_address = io_req->mem.ptr.vma;
470 dp->vma_invalidate_size = size;
471 }
472 vm_dp_init(dp, io_req->mem.ptr.vma);
473 break;
474
475 case DM_IO_KMEM:
476 km_dp_init(dp, io_req->mem.ptr.addr);
477 break;
478
479 default:
480 return -EINVAL;
481 }
482
483 return 0;
484}
485
486/*
487 * New collapsed (a)synchronous interface.
488 *
489 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
490 * the queue with blk_unplug() some time later or set REQ_SYNC in
491io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
492 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
493 */
494int dm_io(struct dm_io_request *io_req, unsigned num_regions,
495 struct dm_io_region *where, unsigned long *sync_error_bits)
496{
497 int r;
498 struct dpages dp;
499
500 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
501 if (r)
502 return r;
503
504 if (!io_req->notify.fn)
505 return sync_io(io_req->client, num_regions, where,
506 io_req->bi_rw, &dp, sync_error_bits);
507
508 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
509 &dp, io_req->notify.fn, io_req->notify.context);
510}
511EXPORT_SYMBOL(dm_io);
512
513int __init dm_io_init(void)
514{
515 _dm_io_cache = KMEM_CACHE(io, 0);
516 if (!_dm_io_cache)
517 return -ENOMEM;
518
519 return 0;
520}
521
522void dm_io_exit(void)
523{
524 kmem_cache_destroy(_dm_io_cache);
525 _dm_io_cache = NULL;
526}