Loading...
1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/completion.h>
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dm-io.h>
19
20#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
24struct dm_io_client {
25 mempool_t pool;
26 struct bio_set bios;
27};
28
29/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
33struct io {
34 unsigned long error_bits;
35 atomic_t count;
36 struct dm_io_client *client;
37 io_notify_fn callback;
38 void *context;
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
42
43static struct kmem_cache *_dm_io_cache;
44
45/*
46 * Create a client with mempool and bioset.
47 */
48struct dm_io_client *dm_io_client_create(void)
49{
50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
52 int ret;
53
54 client = kzalloc(sizeof(*client), GFP_KERNEL);
55 if (!client)
56 return ERR_PTR(-ENOMEM);
57
58 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
59 if (ret)
60 goto bad;
61
62 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
63 if (ret)
64 goto bad;
65
66 return client;
67
68 bad:
69 mempool_exit(&client->pool);
70 kfree(client);
71 return ERR_PTR(ret);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
75void dm_io_client_destroy(struct dm_io_client *client)
76{
77 mempool_exit(&client->pool);
78 bioset_exit(&client->bios);
79 kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
83/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
89 *---------------------------------------------------------------*/
90static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 unsigned region)
92{
93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
95 BUG();
96 }
97
98 bio->bi_private = (void *)((unsigned long)io | region);
99}
100
101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 unsigned *region)
103{
104 unsigned long val = (unsigned long)bio->bi_private;
105
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
114static void complete_io(struct io *io)
115{
116 unsigned long error_bits = io->error_bits;
117 io_notify_fn fn = io->callback;
118 void *context = io->context;
119
120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
123
124 mempool_free(io, &io->client->pool);
125 fn(error_bits, context);
126}
127
128static void dec_count(struct io *io, unsigned int region, blk_status_t error)
129{
130 if (error)
131 set_bit(region, &io->error_bits);
132
133 if (atomic_dec_and_test(&io->count))
134 complete_io(io);
135}
136
137static void endio(struct bio *bio)
138{
139 struct io *io;
140 unsigned region;
141 blk_status_t error;
142
143 if (bio->bi_status && bio_data_dir(bio) == READ)
144 zero_fill_bio(bio);
145
146 /*
147 * The bio destructor in bio_put() may use the io object.
148 */
149 retrieve_io_and_region_from_bio(bio, &io, ®ion);
150
151 error = bio->bi_status;
152 bio_put(bio);
153
154 dec_count(io, region, error);
155}
156
157/*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
161struct dpages {
162 void (*get_page)(struct dpages *dp,
163 struct page **p, unsigned long *len, unsigned *offset);
164 void (*next_page)(struct dpages *dp);
165
166 union {
167 unsigned context_u;
168 struct bvec_iter context_bi;
169 };
170 void *context_ptr;
171
172 void *vma_invalidate_address;
173 unsigned long vma_invalidate_size;
174};
175
176/*
177 * Functions for getting the pages from a list.
178 */
179static void list_get_page(struct dpages *dp,
180 struct page **p, unsigned long *len, unsigned *offset)
181{
182 unsigned o = dp->context_u;
183 struct page_list *pl = (struct page_list *) dp->context_ptr;
184
185 *p = pl->page;
186 *len = PAGE_SIZE - o;
187 *offset = o;
188}
189
190static void list_next_page(struct dpages *dp)
191{
192 struct page_list *pl = (struct page_list *) dp->context_ptr;
193 dp->context_ptr = pl->next;
194 dp->context_u = 0;
195}
196
197static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
198{
199 dp->get_page = list_get_page;
200 dp->next_page = list_next_page;
201 dp->context_u = offset;
202 dp->context_ptr = pl;
203}
204
205/*
206 * Functions for getting the pages from a bvec.
207 */
208static void bio_get_page(struct dpages *dp, struct page **p,
209 unsigned long *len, unsigned *offset)
210{
211 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
212 dp->context_bi);
213
214 *p = bvec.bv_page;
215 *len = bvec.bv_len;
216 *offset = bvec.bv_offset;
217
218 /* avoid figuring it out again in bio_next_page() */
219 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
220}
221
222static void bio_next_page(struct dpages *dp)
223{
224 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
225
226 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
227 &dp->context_bi, len);
228}
229
230static void bio_dp_init(struct dpages *dp, struct bio *bio)
231{
232 dp->get_page = bio_get_page;
233 dp->next_page = bio_next_page;
234
235 /*
236 * We just use bvec iterator to retrieve pages, so it is ok to
237 * access the bvec table directly here
238 */
239 dp->context_ptr = bio->bi_io_vec;
240 dp->context_bi = bio->bi_iter;
241}
242
243/*
244 * Functions for getting the pages from a VMA.
245 */
246static void vm_get_page(struct dpages *dp,
247 struct page **p, unsigned long *len, unsigned *offset)
248{
249 *p = vmalloc_to_page(dp->context_ptr);
250 *offset = dp->context_u;
251 *len = PAGE_SIZE - dp->context_u;
252}
253
254static void vm_next_page(struct dpages *dp)
255{
256 dp->context_ptr += PAGE_SIZE - dp->context_u;
257 dp->context_u = 0;
258}
259
260static void vm_dp_init(struct dpages *dp, void *data)
261{
262 dp->get_page = vm_get_page;
263 dp->next_page = vm_next_page;
264 dp->context_u = offset_in_page(data);
265 dp->context_ptr = data;
266}
267
268/*
269 * Functions for getting the pages from kernel memory.
270 */
271static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
272 unsigned *offset)
273{
274 *p = virt_to_page(dp->context_ptr);
275 *offset = dp->context_u;
276 *len = PAGE_SIZE - dp->context_u;
277}
278
279static void km_next_page(struct dpages *dp)
280{
281 dp->context_ptr += PAGE_SIZE - dp->context_u;
282 dp->context_u = 0;
283}
284
285static void km_dp_init(struct dpages *dp, void *data)
286{
287 dp->get_page = km_get_page;
288 dp->next_page = km_next_page;
289 dp->context_u = offset_in_page(data);
290 dp->context_ptr = data;
291}
292
293/*-----------------------------------------------------------------
294 * IO routines that accept a list of pages.
295 *---------------------------------------------------------------*/
296static void do_region(const blk_opf_t opf, unsigned region,
297 struct dm_io_region *where, struct dpages *dp,
298 struct io *io)
299{
300 struct bio *bio;
301 struct page *page;
302 unsigned long len;
303 unsigned offset;
304 unsigned num_bvecs;
305 sector_t remaining = where->count;
306 struct request_queue *q = bdev_get_queue(where->bdev);
307 sector_t num_sectors;
308 unsigned int special_cmd_max_sectors;
309 const enum req_op op = opf & REQ_OP_MASK;
310
311 /*
312 * Reject unsupported discard and write same requests.
313 */
314 if (op == REQ_OP_DISCARD)
315 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
316 else if (op == REQ_OP_WRITE_ZEROES)
317 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
318 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
319 special_cmd_max_sectors == 0) {
320 atomic_inc(&io->count);
321 dec_count(io, region, BLK_STS_NOTSUPP);
322 return;
323 }
324
325 /*
326 * where->count may be zero if op holds a flush and we need to
327 * send a zero-sized flush.
328 */
329 do {
330 /*
331 * Allocate a suitably sized-bio.
332 */
333 switch (op) {
334 case REQ_OP_DISCARD:
335 case REQ_OP_WRITE_ZEROES:
336 num_bvecs = 0;
337 break;
338 default:
339 num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
340 (PAGE_SIZE >> SECTOR_SHIFT)));
341 }
342
343 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
344 &io->client->bios);
345 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
346 bio->bi_end_io = endio;
347 store_io_and_region_in_bio(bio, io, region);
348
349 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
350 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
351 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
352 remaining -= num_sectors;
353 } else while (remaining) {
354 /*
355 * Try and add as many pages as possible.
356 */
357 dp->get_page(dp, &page, &len, &offset);
358 len = min(len, to_bytes(remaining));
359 if (!bio_add_page(bio, page, len, offset))
360 break;
361
362 offset = 0;
363 remaining -= to_sector(len);
364 dp->next_page(dp);
365 }
366
367 atomic_inc(&io->count);
368 submit_bio(bio);
369 } while (remaining);
370}
371
372static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
373 struct dm_io_region *where, struct dpages *dp,
374 struct io *io, int sync)
375{
376 int i;
377 struct dpages old_pages = *dp;
378
379 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
380
381 if (sync)
382 opf |= REQ_SYNC;
383
384 /*
385 * For multiple regions we need to be careful to rewind
386 * the dp object for each call to do_region.
387 */
388 for (i = 0; i < num_regions; i++) {
389 *dp = old_pages;
390 if (where[i].count || (opf & REQ_PREFLUSH))
391 do_region(opf, i, where + i, dp, io);
392 }
393
394 /*
395 * Drop the extra reference that we were holding to avoid
396 * the io being completed too early.
397 */
398 dec_count(io, 0, 0);
399}
400
401struct sync_io {
402 unsigned long error_bits;
403 struct completion wait;
404};
405
406static void sync_io_complete(unsigned long error, void *context)
407{
408 struct sync_io *sio = context;
409
410 sio->error_bits = error;
411 complete(&sio->wait);
412}
413
414static int sync_io(struct dm_io_client *client, unsigned int num_regions,
415 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
416 unsigned long *error_bits)
417{
418 struct io *io;
419 struct sync_io sio;
420
421 if (num_regions > 1 && !op_is_write(opf)) {
422 WARN_ON(1);
423 return -EIO;
424 }
425
426 init_completion(&sio.wait);
427
428 io = mempool_alloc(&client->pool, GFP_NOIO);
429 io->error_bits = 0;
430 atomic_set(&io->count, 1); /* see dispatch_io() */
431 io->client = client;
432 io->callback = sync_io_complete;
433 io->context = &sio;
434
435 io->vma_invalidate_address = dp->vma_invalidate_address;
436 io->vma_invalidate_size = dp->vma_invalidate_size;
437
438 dispatch_io(opf, num_regions, where, dp, io, 1);
439
440 wait_for_completion_io(&sio.wait);
441
442 if (error_bits)
443 *error_bits = sio.error_bits;
444
445 return sio.error_bits ? -EIO : 0;
446}
447
448static int async_io(struct dm_io_client *client, unsigned int num_regions,
449 struct dm_io_region *where, blk_opf_t opf,
450 struct dpages *dp, io_notify_fn fn, void *context)
451{
452 struct io *io;
453
454 if (num_regions > 1 && !op_is_write(opf)) {
455 WARN_ON(1);
456 fn(1, context);
457 return -EIO;
458 }
459
460 io = mempool_alloc(&client->pool, GFP_NOIO);
461 io->error_bits = 0;
462 atomic_set(&io->count, 1); /* see dispatch_io() */
463 io->client = client;
464 io->callback = fn;
465 io->context = context;
466
467 io->vma_invalidate_address = dp->vma_invalidate_address;
468 io->vma_invalidate_size = dp->vma_invalidate_size;
469
470 dispatch_io(opf, num_regions, where, dp, io, 0);
471 return 0;
472}
473
474static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
475 unsigned long size)
476{
477 /* Set up dpages based on memory type */
478
479 dp->vma_invalidate_address = NULL;
480 dp->vma_invalidate_size = 0;
481
482 switch (io_req->mem.type) {
483 case DM_IO_PAGE_LIST:
484 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
485 break;
486
487 case DM_IO_BIO:
488 bio_dp_init(dp, io_req->mem.ptr.bio);
489 break;
490
491 case DM_IO_VMA:
492 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
493 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
494 dp->vma_invalidate_address = io_req->mem.ptr.vma;
495 dp->vma_invalidate_size = size;
496 }
497 vm_dp_init(dp, io_req->mem.ptr.vma);
498 break;
499
500 case DM_IO_KMEM:
501 km_dp_init(dp, io_req->mem.ptr.addr);
502 break;
503
504 default:
505 return -EINVAL;
506 }
507
508 return 0;
509}
510
511int dm_io(struct dm_io_request *io_req, unsigned num_regions,
512 struct dm_io_region *where, unsigned long *sync_error_bits)
513{
514 int r;
515 struct dpages dp;
516
517 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
518 if (r)
519 return r;
520
521 if (!io_req->notify.fn)
522 return sync_io(io_req->client, num_regions, where,
523 io_req->bi_opf, &dp, sync_error_bits);
524
525 return async_io(io_req->client, num_regions, where,
526 io_req->bi_opf, &dp, io_req->notify.fn,
527 io_req->notify.context);
528}
529EXPORT_SYMBOL(dm_io);
530
531int __init dm_io_init(void)
532{
533 _dm_io_cache = KMEM_CACHE(io, 0);
534 if (!_dm_io_cache)
535 return -ENOMEM;
536
537 return 0;
538}
539
540void dm_io_exit(void)
541{
542 kmem_cache_destroy(_dm_io_cache);
543 _dm_io_cache = NULL;
544}
1/*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-core.h"
9
10#include <linux/device-mapper.h>
11
12#include <linux/bio.h>
13#include <linux/completion.h>
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/dm-io.h>
19
20#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
24struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
29/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
33struct io {
34 unsigned long error_bits;
35 atomic_t count;
36 struct dm_io_client *client;
37 io_notify_fn callback;
38 void *context;
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
42
43static struct kmem_cache *_dm_io_cache;
44
45/*
46 * Create a client with mempool and bioset.
47 */
48struct dm_io_client *dm_io_client_create(void)
49{
50 struct dm_io_client *client;
51 unsigned min_ios = dm_get_reserved_bio_based_ios();
52
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
58 if (!client->pool)
59 goto bad;
60
61 client->bios = bioset_create(min_ios, 0, BIOSET_NEED_BVECS);
62 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 mempool_destroy(client->pool);
69 kfree(client);
70 return ERR_PTR(-ENOMEM);
71}
72EXPORT_SYMBOL(dm_io_client_create);
73
74void dm_io_client_destroy(struct dm_io_client *client)
75{
76 mempool_destroy(client->pool);
77 bioset_free(client->bios);
78 kfree(client);
79}
80EXPORT_SYMBOL(dm_io_client_destroy);
81
82/*-----------------------------------------------------------------
83 * We need to keep track of which region a bio is doing io for.
84 * To avoid a memory allocation to store just 5 or 6 bits, we
85 * ensure the 'struct io' pointer is aligned so enough low bits are
86 * always zero and then combine it with the region number directly in
87 * bi_private.
88 *---------------------------------------------------------------*/
89static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 unsigned region)
91{
92 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 DMCRIT("Unaligned struct io pointer %p", io);
94 BUG();
95 }
96
97 bio->bi_private = (void *)((unsigned long)io | region);
98}
99
100static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 unsigned *region)
102{
103 unsigned long val = (unsigned long)bio->bi_private;
104
105 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 *region = val & (DM_IO_MAX_REGIONS - 1);
107}
108
109/*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
113static void complete_io(struct io *io)
114{
115 unsigned long error_bits = io->error_bits;
116 io_notify_fn fn = io->callback;
117 void *context = io->context;
118
119 if (io->vma_invalidate_size)
120 invalidate_kernel_vmap_range(io->vma_invalidate_address,
121 io->vma_invalidate_size);
122
123 mempool_free(io, io->client->pool);
124 fn(error_bits, context);
125}
126
127static void dec_count(struct io *io, unsigned int region, blk_status_t error)
128{
129 if (error)
130 set_bit(region, &io->error_bits);
131
132 if (atomic_dec_and_test(&io->count))
133 complete_io(io);
134}
135
136static void endio(struct bio *bio)
137{
138 struct io *io;
139 unsigned region;
140 blk_status_t error;
141
142 if (bio->bi_status && bio_data_dir(bio) == READ)
143 zero_fill_bio(bio);
144
145 /*
146 * The bio destructor in bio_put() may use the io object.
147 */
148 retrieve_io_and_region_from_bio(bio, &io, ®ion);
149
150 error = bio->bi_status;
151 bio_put(bio);
152
153 dec_count(io, region, error);
154}
155
156/*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 union {
166 unsigned context_u;
167 struct bvec_iter context_bi;
168 };
169 void *context_ptr;
170
171 void *vma_invalidate_address;
172 unsigned long vma_invalidate_size;
173};
174
175/*
176 * Functions for getting the pages from a list.
177 */
178static void list_get_page(struct dpages *dp,
179 struct page **p, unsigned long *len, unsigned *offset)
180{
181 unsigned o = dp->context_u;
182 struct page_list *pl = (struct page_list *) dp->context_ptr;
183
184 *p = pl->page;
185 *len = PAGE_SIZE - o;
186 *offset = o;
187}
188
189static void list_next_page(struct dpages *dp)
190{
191 struct page_list *pl = (struct page_list *) dp->context_ptr;
192 dp->context_ptr = pl->next;
193 dp->context_u = 0;
194}
195
196static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
197{
198 dp->get_page = list_get_page;
199 dp->next_page = list_next_page;
200 dp->context_u = offset;
201 dp->context_ptr = pl;
202}
203
204/*
205 * Functions for getting the pages from a bvec.
206 */
207static void bio_get_page(struct dpages *dp, struct page **p,
208 unsigned long *len, unsigned *offset)
209{
210 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
211 dp->context_bi);
212
213 *p = bvec.bv_page;
214 *len = bvec.bv_len;
215 *offset = bvec.bv_offset;
216
217 /* avoid figuring it out again in bio_next_page() */
218 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
219}
220
221static void bio_next_page(struct dpages *dp)
222{
223 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
224
225 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
226 &dp->context_bi, len);
227}
228
229static void bio_dp_init(struct dpages *dp, struct bio *bio)
230{
231 dp->get_page = bio_get_page;
232 dp->next_page = bio_next_page;
233
234 /*
235 * We just use bvec iterator to retrieve pages, so it is ok to
236 * access the bvec table directly here
237 */
238 dp->context_ptr = bio->bi_io_vec;
239 dp->context_bi = bio->bi_iter;
240}
241
242/*
243 * Functions for getting the pages from a VMA.
244 */
245static void vm_get_page(struct dpages *dp,
246 struct page **p, unsigned long *len, unsigned *offset)
247{
248 *p = vmalloc_to_page(dp->context_ptr);
249 *offset = dp->context_u;
250 *len = PAGE_SIZE - dp->context_u;
251}
252
253static void vm_next_page(struct dpages *dp)
254{
255 dp->context_ptr += PAGE_SIZE - dp->context_u;
256 dp->context_u = 0;
257}
258
259static void vm_dp_init(struct dpages *dp, void *data)
260{
261 dp->get_page = vm_get_page;
262 dp->next_page = vm_next_page;
263 dp->context_u = offset_in_page(data);
264 dp->context_ptr = data;
265}
266
267/*
268 * Functions for getting the pages from kernel memory.
269 */
270static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
271 unsigned *offset)
272{
273 *p = virt_to_page(dp->context_ptr);
274 *offset = dp->context_u;
275 *len = PAGE_SIZE - dp->context_u;
276}
277
278static void km_next_page(struct dpages *dp)
279{
280 dp->context_ptr += PAGE_SIZE - dp->context_u;
281 dp->context_u = 0;
282}
283
284static void km_dp_init(struct dpages *dp, void *data)
285{
286 dp->get_page = km_get_page;
287 dp->next_page = km_next_page;
288 dp->context_u = offset_in_page(data);
289 dp->context_ptr = data;
290}
291
292/*-----------------------------------------------------------------
293 * IO routines that accept a list of pages.
294 *---------------------------------------------------------------*/
295static void do_region(int op, int op_flags, unsigned region,
296 struct dm_io_region *where, struct dpages *dp,
297 struct io *io)
298{
299 struct bio *bio;
300 struct page *page;
301 unsigned long len;
302 unsigned offset;
303 unsigned num_bvecs;
304 sector_t remaining = where->count;
305 struct request_queue *q = bdev_get_queue(where->bdev);
306 unsigned short logical_block_size = queue_logical_block_size(q);
307 sector_t num_sectors;
308 unsigned int uninitialized_var(special_cmd_max_sectors);
309
310 /*
311 * Reject unsupported discard and write same requests.
312 */
313 if (op == REQ_OP_DISCARD)
314 special_cmd_max_sectors = q->limits.max_discard_sectors;
315 else if (op == REQ_OP_WRITE_ZEROES)
316 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
317 else if (op == REQ_OP_WRITE_SAME)
318 special_cmd_max_sectors = q->limits.max_write_same_sectors;
319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES ||
320 op == REQ_OP_WRITE_SAME) && special_cmd_max_sectors == 0) {
321 atomic_inc(&io->count);
322 dec_count(io, region, BLK_STS_NOTSUPP);
323 return;
324 }
325
326 /*
327 * where->count may be zero if op holds a flush and we need to
328 * send a zero-sized flush.
329 */
330 do {
331 /*
332 * Allocate a suitably sized-bio.
333 */
334 switch (op) {
335 case REQ_OP_DISCARD:
336 case REQ_OP_WRITE_ZEROES:
337 num_bvecs = 0;
338 break;
339 case REQ_OP_WRITE_SAME:
340 num_bvecs = 1;
341 break;
342 default:
343 num_bvecs = min_t(int, BIO_MAX_PAGES,
344 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
345 }
346
347 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
348 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
349 bio_set_dev(bio, where->bdev);
350 bio->bi_end_io = endio;
351 bio_set_op_attrs(bio, op, op_flags);
352 store_io_and_region_in_bio(bio, io, region);
353
354 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
355 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
356 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
357 remaining -= num_sectors;
358 } else if (op == REQ_OP_WRITE_SAME) {
359 /*
360 * WRITE SAME only uses a single page.
361 */
362 dp->get_page(dp, &page, &len, &offset);
363 bio_add_page(bio, page, logical_block_size, offset);
364 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
365 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
366
367 offset = 0;
368 remaining -= num_sectors;
369 dp->next_page(dp);
370 } else while (remaining) {
371 /*
372 * Try and add as many pages as possible.
373 */
374 dp->get_page(dp, &page, &len, &offset);
375 len = min(len, to_bytes(remaining));
376 if (!bio_add_page(bio, page, len, offset))
377 break;
378
379 offset = 0;
380 remaining -= to_sector(len);
381 dp->next_page(dp);
382 }
383
384 atomic_inc(&io->count);
385 submit_bio(bio);
386 } while (remaining);
387}
388
389static void dispatch_io(int op, int op_flags, unsigned int num_regions,
390 struct dm_io_region *where, struct dpages *dp,
391 struct io *io, int sync)
392{
393 int i;
394 struct dpages old_pages = *dp;
395
396 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
397
398 if (sync)
399 op_flags |= REQ_SYNC;
400
401 /*
402 * For multiple regions we need to be careful to rewind
403 * the dp object for each call to do_region.
404 */
405 for (i = 0; i < num_regions; i++) {
406 *dp = old_pages;
407 if (where[i].count || (op_flags & REQ_PREFLUSH))
408 do_region(op, op_flags, i, where + i, dp, io);
409 }
410
411 /*
412 * Drop the extra reference that we were holding to avoid
413 * the io being completed too early.
414 */
415 dec_count(io, 0, 0);
416}
417
418struct sync_io {
419 unsigned long error_bits;
420 struct completion wait;
421};
422
423static void sync_io_complete(unsigned long error, void *context)
424{
425 struct sync_io *sio = context;
426
427 sio->error_bits = error;
428 complete(&sio->wait);
429}
430
431static int sync_io(struct dm_io_client *client, unsigned int num_regions,
432 struct dm_io_region *where, int op, int op_flags,
433 struct dpages *dp, unsigned long *error_bits)
434{
435 struct io *io;
436 struct sync_io sio;
437
438 if (num_regions > 1 && !op_is_write(op)) {
439 WARN_ON(1);
440 return -EIO;
441 }
442
443 init_completion(&sio.wait);
444
445 io = mempool_alloc(client->pool, GFP_NOIO);
446 io->error_bits = 0;
447 atomic_set(&io->count, 1); /* see dispatch_io() */
448 io->client = client;
449 io->callback = sync_io_complete;
450 io->context = &sio;
451
452 io->vma_invalidate_address = dp->vma_invalidate_address;
453 io->vma_invalidate_size = dp->vma_invalidate_size;
454
455 dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
456
457 wait_for_completion_io(&sio.wait);
458
459 if (error_bits)
460 *error_bits = sio.error_bits;
461
462 return sio.error_bits ? -EIO : 0;
463}
464
465static int async_io(struct dm_io_client *client, unsigned int num_regions,
466 struct dm_io_region *where, int op, int op_flags,
467 struct dpages *dp, io_notify_fn fn, void *context)
468{
469 struct io *io;
470
471 if (num_regions > 1 && !op_is_write(op)) {
472 WARN_ON(1);
473 fn(1, context);
474 return -EIO;
475 }
476
477 io = mempool_alloc(client->pool, GFP_NOIO);
478 io->error_bits = 0;
479 atomic_set(&io->count, 1); /* see dispatch_io() */
480 io->client = client;
481 io->callback = fn;
482 io->context = context;
483
484 io->vma_invalidate_address = dp->vma_invalidate_address;
485 io->vma_invalidate_size = dp->vma_invalidate_size;
486
487 dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
488 return 0;
489}
490
491static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
492 unsigned long size)
493{
494 /* Set up dpages based on memory type */
495
496 dp->vma_invalidate_address = NULL;
497 dp->vma_invalidate_size = 0;
498
499 switch (io_req->mem.type) {
500 case DM_IO_PAGE_LIST:
501 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
502 break;
503
504 case DM_IO_BIO:
505 bio_dp_init(dp, io_req->mem.ptr.bio);
506 break;
507
508 case DM_IO_VMA:
509 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
510 if (io_req->bi_op == REQ_OP_READ) {
511 dp->vma_invalidate_address = io_req->mem.ptr.vma;
512 dp->vma_invalidate_size = size;
513 }
514 vm_dp_init(dp, io_req->mem.ptr.vma);
515 break;
516
517 case DM_IO_KMEM:
518 km_dp_init(dp, io_req->mem.ptr.addr);
519 break;
520
521 default:
522 return -EINVAL;
523 }
524
525 return 0;
526}
527
528/*
529 * New collapsed (a)synchronous interface.
530 *
531 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
532 * the queue with blk_unplug() some time later or set REQ_SYNC in
533 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
534 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
535 */
536int dm_io(struct dm_io_request *io_req, unsigned num_regions,
537 struct dm_io_region *where, unsigned long *sync_error_bits)
538{
539 int r;
540 struct dpages dp;
541
542 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
543 if (r)
544 return r;
545
546 if (!io_req->notify.fn)
547 return sync_io(io_req->client, num_regions, where,
548 io_req->bi_op, io_req->bi_op_flags, &dp,
549 sync_error_bits);
550
551 return async_io(io_req->client, num_regions, where, io_req->bi_op,
552 io_req->bi_op_flags, &dp, io_req->notify.fn,
553 io_req->notify.context);
554}
555EXPORT_SYMBOL(dm_io);
556
557int __init dm_io_init(void)
558{
559 _dm_io_cache = KMEM_CACHE(io, 0);
560 if (!_dm_io_cache)
561 return -ENOMEM;
562
563 return 0;
564}
565
566void dm_io_exit(void)
567{
568 kmem_cache_destroy(_dm_io_cache);
569 _dm_io_cache = NULL;
570}