Loading...
1/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/blk-mq.h>
11
12#define DM_MSG_PREFIX "core-rq"
13
14/*
15 * One of these is allocated per request.
16 */
17struct dm_rq_target_io {
18 struct mapped_device *md;
19 struct dm_target *ti;
20 struct request *orig, *clone;
21 struct kthread_work work;
22 blk_status_t error;
23 union map_info info;
24 struct dm_stats_aux stats_aux;
25 unsigned long duration_jiffies;
26 unsigned n_sectors;
27 unsigned completed;
28};
29
30#define DM_MQ_NR_HW_QUEUES 1
31#define DM_MQ_QUEUE_DEPTH 2048
32static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
33static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
34
35/*
36 * Request-based DM's mempools' reserved IOs set by the user.
37 */
38#define RESERVED_REQUEST_BASED_IOS 256
39static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
40
41unsigned dm_get_reserved_rq_based_ios(void)
42{
43 return __dm_get_module_param(&reserved_rq_based_ios,
44 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
45}
46
47static unsigned dm_get_blk_mq_nr_hw_queues(void)
48{
49 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
50}
51
52static unsigned dm_get_blk_mq_queue_depth(void)
53{
54 return __dm_get_module_param(&dm_mq_queue_depth,
55 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
56}
57
58int dm_request_based(struct mapped_device *md)
59{
60 return queue_is_mq(md->queue);
61}
62
63void dm_start_queue(struct request_queue *q)
64{
65 blk_mq_unquiesce_queue(q);
66 blk_mq_kick_requeue_list(q);
67}
68
69void dm_stop_queue(struct request_queue *q)
70{
71 blk_mq_quiesce_queue(q);
72}
73
74/*
75 * Partial completion handling for request-based dm
76 */
77static void end_clone_bio(struct bio *clone)
78{
79 struct dm_rq_clone_bio_info *info =
80 container_of(clone, struct dm_rq_clone_bio_info, clone);
81 struct dm_rq_target_io *tio = info->tio;
82 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
83 blk_status_t error = clone->bi_status;
84 bool is_last = !clone->bi_next;
85
86 bio_put(clone);
87
88 if (tio->error)
89 /*
90 * An error has already been detected on the request.
91 * Once error occurred, just let clone->end_io() handle
92 * the remainder.
93 */
94 return;
95 else if (error) {
96 /*
97 * Don't notice the error to the upper layer yet.
98 * The error handling decision is made by the target driver,
99 * when the request is completed.
100 */
101 tio->error = error;
102 goto exit;
103 }
104
105 /*
106 * I/O for the bio successfully completed.
107 * Notice the data completion to the upper layer.
108 */
109 tio->completed += nr_bytes;
110
111 /*
112 * Update the original request.
113 * Do not use blk_mq_end_request() here, because it may complete
114 * the original request before the clone, and break the ordering.
115 */
116 if (is_last)
117 exit:
118 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
119}
120
121static struct dm_rq_target_io *tio_from_request(struct request *rq)
122{
123 return blk_mq_rq_to_pdu(rq);
124}
125
126static void rq_end_stats(struct mapped_device *md, struct request *orig)
127{
128 if (unlikely(dm_stats_used(&md->stats))) {
129 struct dm_rq_target_io *tio = tio_from_request(orig);
130 tio->duration_jiffies = jiffies - tio->duration_jiffies;
131 dm_stats_account_io(&md->stats, rq_data_dir(orig),
132 blk_rq_pos(orig), tio->n_sectors, true,
133 tio->duration_jiffies, &tio->stats_aux);
134 }
135}
136
137/*
138 * Don't touch any member of the md after calling this function because
139 * the md may be freed in dm_put() at the end of this function.
140 * Or do dm_get() before calling this function and dm_put() later.
141 */
142static void rq_completed(struct mapped_device *md)
143{
144 /*
145 * dm_put() must be at the end of this function. See the comment above
146 */
147 dm_put(md);
148}
149
150/*
151 * Complete the clone and the original request.
152 * Must be called without clone's queue lock held,
153 * see end_clone_request() for more details.
154 */
155static void dm_end_request(struct request *clone, blk_status_t error)
156{
157 struct dm_rq_target_io *tio = clone->end_io_data;
158 struct mapped_device *md = tio->md;
159 struct request *rq = tio->orig;
160
161 blk_rq_unprep_clone(clone);
162 tio->ti->type->release_clone_rq(clone, NULL);
163
164 rq_end_stats(md, rq);
165 blk_mq_end_request(rq, error);
166 rq_completed(md);
167}
168
169static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
170{
171 blk_mq_delay_kick_requeue_list(q, msecs);
172}
173
174void dm_mq_kick_requeue_list(struct mapped_device *md)
175{
176 __dm_mq_kick_requeue_list(md->queue, 0);
177}
178EXPORT_SYMBOL(dm_mq_kick_requeue_list);
179
180static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
181{
182 blk_mq_requeue_request(rq, false);
183 __dm_mq_kick_requeue_list(rq->q, msecs);
184}
185
186static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
187{
188 struct mapped_device *md = tio->md;
189 struct request *rq = tio->orig;
190 unsigned long delay_ms = delay_requeue ? 100 : 0;
191
192 rq_end_stats(md, rq);
193 if (tio->clone) {
194 blk_rq_unprep_clone(tio->clone);
195 tio->ti->type->release_clone_rq(tio->clone, NULL);
196 }
197
198 dm_mq_delay_requeue_request(rq, delay_ms);
199 rq_completed(md);
200}
201
202static void dm_done(struct request *clone, blk_status_t error, bool mapped)
203{
204 int r = DM_ENDIO_DONE;
205 struct dm_rq_target_io *tio = clone->end_io_data;
206 dm_request_endio_fn rq_end_io = NULL;
207
208 if (tio->ti) {
209 rq_end_io = tio->ti->type->rq_end_io;
210
211 if (mapped && rq_end_io)
212 r = rq_end_io(tio->ti, clone, error, &tio->info);
213 }
214
215 if (unlikely(error == BLK_STS_TARGET)) {
216 if (req_op(clone) == REQ_OP_DISCARD &&
217 !clone->q->limits.max_discard_sectors)
218 disable_discard(tio->md);
219 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
220 !clone->q->limits.max_write_zeroes_sectors)
221 disable_write_zeroes(tio->md);
222 }
223
224 switch (r) {
225 case DM_ENDIO_DONE:
226 /* The target wants to complete the I/O */
227 dm_end_request(clone, error);
228 break;
229 case DM_ENDIO_INCOMPLETE:
230 /* The target will handle the I/O */
231 return;
232 case DM_ENDIO_REQUEUE:
233 /* The target wants to requeue the I/O */
234 dm_requeue_original_request(tio, false);
235 break;
236 case DM_ENDIO_DELAY_REQUEUE:
237 /* The target wants to requeue the I/O after a delay */
238 dm_requeue_original_request(tio, true);
239 break;
240 default:
241 DMCRIT("unimplemented target endio return value: %d", r);
242 BUG();
243 }
244}
245
246/*
247 * Request completion handler for request-based dm
248 */
249static void dm_softirq_done(struct request *rq)
250{
251 bool mapped = true;
252 struct dm_rq_target_io *tio = tio_from_request(rq);
253 struct request *clone = tio->clone;
254
255 if (!clone) {
256 struct mapped_device *md = tio->md;
257
258 rq_end_stats(md, rq);
259 blk_mq_end_request(rq, tio->error);
260 rq_completed(md);
261 return;
262 }
263
264 if (rq->rq_flags & RQF_FAILED)
265 mapped = false;
266
267 dm_done(clone, tio->error, mapped);
268}
269
270/*
271 * Complete the clone and the original request with the error status
272 * through softirq context.
273 */
274static void dm_complete_request(struct request *rq, blk_status_t error)
275{
276 struct dm_rq_target_io *tio = tio_from_request(rq);
277
278 tio->error = error;
279 if (likely(!blk_should_fake_timeout(rq->q)))
280 blk_mq_complete_request(rq);
281}
282
283/*
284 * Complete the not-mapped clone and the original request with the error status
285 * through softirq context.
286 * Target's rq_end_io() function isn't called.
287 * This may be used when the target's clone_and_map_rq() function fails.
288 */
289static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
290{
291 rq->rq_flags |= RQF_FAILED;
292 dm_complete_request(rq, error);
293}
294
295static enum rq_end_io_ret end_clone_request(struct request *clone,
296 blk_status_t error)
297{
298 struct dm_rq_target_io *tio = clone->end_io_data;
299
300 dm_complete_request(tio->orig, error);
301 return RQ_END_IO_NONE;
302}
303
304static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
305 void *data)
306{
307 struct dm_rq_target_io *tio = data;
308 struct dm_rq_clone_bio_info *info =
309 container_of(bio, struct dm_rq_clone_bio_info, clone);
310
311 info->orig = bio_orig;
312 info->tio = tio;
313 bio->bi_end_io = end_clone_bio;
314
315 return 0;
316}
317
318static int setup_clone(struct request *clone, struct request *rq,
319 struct dm_rq_target_io *tio, gfp_t gfp_mask)
320{
321 int r;
322
323 r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask,
324 dm_rq_bio_constructor, tio);
325 if (r)
326 return r;
327
328 clone->end_io = end_clone_request;
329 clone->end_io_data = tio;
330
331 tio->clone = clone;
332
333 return 0;
334}
335
336static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
337 struct mapped_device *md)
338{
339 tio->md = md;
340 tio->ti = NULL;
341 tio->clone = NULL;
342 tio->orig = rq;
343 tio->error = 0;
344 tio->completed = 0;
345 /*
346 * Avoid initializing info for blk-mq; it passes
347 * target-specific data through info.ptr
348 * (see: dm_mq_init_request)
349 */
350 if (!md->init_tio_pdu)
351 memset(&tio->info, 0, sizeof(tio->info));
352}
353
354/*
355 * Returns:
356 * DM_MAPIO_* : the request has been processed as indicated
357 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
358 * < 0 : the request was completed due to failure
359 */
360static int map_request(struct dm_rq_target_io *tio)
361{
362 int r;
363 struct dm_target *ti = tio->ti;
364 struct mapped_device *md = tio->md;
365 struct request *rq = tio->orig;
366 struct request *clone = NULL;
367 blk_status_t ret;
368
369 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
370 switch (r) {
371 case DM_MAPIO_SUBMITTED:
372 /* The target has taken the I/O to submit by itself later */
373 break;
374 case DM_MAPIO_REMAPPED:
375 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
376 /* -ENOMEM */
377 ti->type->release_clone_rq(clone, &tio->info);
378 return DM_MAPIO_REQUEUE;
379 }
380
381 /* The target has remapped the I/O so dispatch it */
382 trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
383 blk_rq_pos(rq));
384 ret = blk_insert_cloned_request(clone);
385 switch (ret) {
386 case BLK_STS_OK:
387 break;
388 case BLK_STS_RESOURCE:
389 case BLK_STS_DEV_RESOURCE:
390 blk_rq_unprep_clone(clone);
391 blk_mq_cleanup_rq(clone);
392 tio->ti->type->release_clone_rq(clone, &tio->info);
393 tio->clone = NULL;
394 return DM_MAPIO_REQUEUE;
395 default:
396 /* must complete clone in terms of original request */
397 dm_complete_request(rq, ret);
398 }
399 break;
400 case DM_MAPIO_REQUEUE:
401 /* The target wants to requeue the I/O */
402 break;
403 case DM_MAPIO_DELAY_REQUEUE:
404 /* The target wants to requeue the I/O after a delay */
405 dm_requeue_original_request(tio, true);
406 break;
407 case DM_MAPIO_KILL:
408 /* The target wants to complete the I/O */
409 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
410 break;
411 default:
412 DMCRIT("unimplemented target map return value: %d", r);
413 BUG();
414 }
415
416 return r;
417}
418
419/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
420ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
421{
422 return sprintf(buf, "%u\n", 0);
423}
424
425ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
426 const char *buf, size_t count)
427{
428 return count;
429}
430
431static void dm_start_request(struct mapped_device *md, struct request *orig)
432{
433 blk_mq_start_request(orig);
434
435 if (unlikely(dm_stats_used(&md->stats))) {
436 struct dm_rq_target_io *tio = tio_from_request(orig);
437 tio->duration_jiffies = jiffies;
438 tio->n_sectors = blk_rq_sectors(orig);
439 dm_stats_account_io(&md->stats, rq_data_dir(orig),
440 blk_rq_pos(orig), tio->n_sectors, false, 0,
441 &tio->stats_aux);
442 }
443
444 /*
445 * Hold the md reference here for the in-flight I/O.
446 * We can't rely on the reference count by device opener,
447 * because the device may be closed during the request completion
448 * when all bios are completed.
449 * See the comment in rq_completed() too.
450 */
451 dm_get(md);
452}
453
454static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
455 unsigned int hctx_idx, unsigned int numa_node)
456{
457 struct mapped_device *md = set->driver_data;
458 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
459
460 /*
461 * Must initialize md member of tio, otherwise it won't
462 * be available in dm_mq_queue_rq.
463 */
464 tio->md = md;
465
466 if (md->init_tio_pdu) {
467 /* target-specific per-io data is immediately after the tio */
468 tio->info.ptr = tio + 1;
469 }
470
471 return 0;
472}
473
474static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
475 const struct blk_mq_queue_data *bd)
476{
477 struct request *rq = bd->rq;
478 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
479 struct mapped_device *md = tio->md;
480 struct dm_target *ti = md->immutable_target;
481
482 /*
483 * blk-mq's unquiesce may come from outside events, such as
484 * elevator switch, updating nr_requests or others, and request may
485 * come during suspend, so simply ask for blk-mq to requeue it.
486 */
487 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)))
488 return BLK_STS_RESOURCE;
489
490 if (unlikely(!ti)) {
491 int srcu_idx;
492 struct dm_table *map;
493
494 map = dm_get_live_table(md, &srcu_idx);
495 if (unlikely(!map)) {
496 dm_put_live_table(md, srcu_idx);
497 return BLK_STS_RESOURCE;
498 }
499 ti = dm_table_find_target(map, 0);
500 dm_put_live_table(md, srcu_idx);
501 }
502
503 if (ti->type->busy && ti->type->busy(ti))
504 return BLK_STS_RESOURCE;
505
506 dm_start_request(md, rq);
507
508 /* Init tio using md established in .init_request */
509 init_tio(tio, rq, md);
510
511 /*
512 * Establish tio->ti before calling map_request().
513 */
514 tio->ti = ti;
515
516 /* Direct call is fine since .queue_rq allows allocations */
517 if (map_request(tio) == DM_MAPIO_REQUEUE) {
518 /* Undo dm_start_request() before requeuing */
519 rq_end_stats(md, rq);
520 rq_completed(md);
521 return BLK_STS_RESOURCE;
522 }
523
524 return BLK_STS_OK;
525}
526
527static const struct blk_mq_ops dm_mq_ops = {
528 .queue_rq = dm_mq_queue_rq,
529 .complete = dm_softirq_done,
530 .init_request = dm_mq_init_request,
531};
532
533int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
534{
535 struct dm_target *immutable_tgt;
536 int err;
537
538 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
539 if (!md->tag_set)
540 return -ENOMEM;
541
542 md->tag_set->ops = &dm_mq_ops;
543 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
544 md->tag_set->numa_node = md->numa_node_id;
545 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
546 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
547 md->tag_set->driver_data = md;
548
549 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
550 immutable_tgt = dm_table_get_immutable_target(t);
551 if (immutable_tgt && immutable_tgt->per_io_data_size) {
552 /* any target-specific per-io data is immediately after the tio */
553 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
554 md->init_tio_pdu = true;
555 }
556
557 err = blk_mq_alloc_tag_set(md->tag_set);
558 if (err)
559 goto out_kfree_tag_set;
560
561 err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
562 if (err)
563 goto out_tag_set;
564 return 0;
565
566out_tag_set:
567 blk_mq_free_tag_set(md->tag_set);
568out_kfree_tag_set:
569 kfree(md->tag_set);
570 md->tag_set = NULL;
571
572 return err;
573}
574
575void dm_mq_cleanup_mapped_device(struct mapped_device *md)
576{
577 if (md->tag_set) {
578 blk_mq_free_tag_set(md->tag_set);
579 kfree(md->tag_set);
580 md->tag_set = NULL;
581 }
582}
583
584module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
585MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
586
587/* Unused, but preserved for userspace compatibility */
588static bool use_blk_mq = true;
589module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
590MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
591
592module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
593MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
594
595module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
596MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
1/*
2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-core.h"
8#include "dm-rq.h"
9
10#include <linux/elevator.h> /* for rq_end_sector() */
11#include <linux/blk-mq.h>
12
13#define DM_MSG_PREFIX "core-rq"
14
15/*
16 * One of these is allocated per request.
17 */
18struct dm_rq_target_io {
19 struct mapped_device *md;
20 struct dm_target *ti;
21 struct request *orig, *clone;
22 struct kthread_work work;
23 blk_status_t error;
24 union map_info info;
25 struct dm_stats_aux stats_aux;
26 unsigned long duration_jiffies;
27 unsigned n_sectors;
28 unsigned completed;
29};
30
31#define DM_MQ_NR_HW_QUEUES 1
32#define DM_MQ_QUEUE_DEPTH 2048
33static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
34static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
35
36/*
37 * Request-based DM's mempools' reserved IOs set by the user.
38 */
39#define RESERVED_REQUEST_BASED_IOS 256
40static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
41
42unsigned dm_get_reserved_rq_based_ios(void)
43{
44 return __dm_get_module_param(&reserved_rq_based_ios,
45 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
46}
47EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
48
49static unsigned dm_get_blk_mq_nr_hw_queues(void)
50{
51 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
52}
53
54static unsigned dm_get_blk_mq_queue_depth(void)
55{
56 return __dm_get_module_param(&dm_mq_queue_depth,
57 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
58}
59
60int dm_request_based(struct mapped_device *md)
61{
62 return queue_is_mq(md->queue);
63}
64
65void dm_start_queue(struct request_queue *q)
66{
67 blk_mq_unquiesce_queue(q);
68 blk_mq_kick_requeue_list(q);
69}
70
71void dm_stop_queue(struct request_queue *q)
72{
73 blk_mq_quiesce_queue(q);
74}
75
76/*
77 * Partial completion handling for request-based dm
78 */
79static void end_clone_bio(struct bio *clone)
80{
81 struct dm_rq_clone_bio_info *info =
82 container_of(clone, struct dm_rq_clone_bio_info, clone);
83 struct dm_rq_target_io *tio = info->tio;
84 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
85 blk_status_t error = clone->bi_status;
86 bool is_last = !clone->bi_next;
87
88 bio_put(clone);
89
90 if (tio->error)
91 /*
92 * An error has already been detected on the request.
93 * Once error occurred, just let clone->end_io() handle
94 * the remainder.
95 */
96 return;
97 else if (error) {
98 /*
99 * Don't notice the error to the upper layer yet.
100 * The error handling decision is made by the target driver,
101 * when the request is completed.
102 */
103 tio->error = error;
104 goto exit;
105 }
106
107 /*
108 * I/O for the bio successfully completed.
109 * Notice the data completion to the upper layer.
110 */
111 tio->completed += nr_bytes;
112
113 /*
114 * Update the original request.
115 * Do not use blk_mq_end_request() here, because it may complete
116 * the original request before the clone, and break the ordering.
117 */
118 if (is_last)
119 exit:
120 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
121}
122
123static struct dm_rq_target_io *tio_from_request(struct request *rq)
124{
125 return blk_mq_rq_to_pdu(rq);
126}
127
128static void rq_end_stats(struct mapped_device *md, struct request *orig)
129{
130 if (unlikely(dm_stats_used(&md->stats))) {
131 struct dm_rq_target_io *tio = tio_from_request(orig);
132 tio->duration_jiffies = jiffies - tio->duration_jiffies;
133 dm_stats_account_io(&md->stats, rq_data_dir(orig),
134 blk_rq_pos(orig), tio->n_sectors, true,
135 tio->duration_jiffies, &tio->stats_aux);
136 }
137}
138
139/*
140 * Don't touch any member of the md after calling this function because
141 * the md may be freed in dm_put() at the end of this function.
142 * Or do dm_get() before calling this function and dm_put() later.
143 */
144static void rq_completed(struct mapped_device *md)
145{
146 /*
147 * dm_put() must be at the end of this function. See the comment above
148 */
149 dm_put(md);
150}
151
152/*
153 * Complete the clone and the original request.
154 * Must be called without clone's queue lock held,
155 * see end_clone_request() for more details.
156 */
157static void dm_end_request(struct request *clone, blk_status_t error)
158{
159 struct dm_rq_target_io *tio = clone->end_io_data;
160 struct mapped_device *md = tio->md;
161 struct request *rq = tio->orig;
162
163 blk_rq_unprep_clone(clone);
164 tio->ti->type->release_clone_rq(clone, NULL);
165
166 rq_end_stats(md, rq);
167 blk_mq_end_request(rq, error);
168 rq_completed(md);
169}
170
171static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
172{
173 blk_mq_delay_kick_requeue_list(q, msecs);
174}
175
176void dm_mq_kick_requeue_list(struct mapped_device *md)
177{
178 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
179}
180EXPORT_SYMBOL(dm_mq_kick_requeue_list);
181
182static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
183{
184 blk_mq_requeue_request(rq, false);
185 __dm_mq_kick_requeue_list(rq->q, msecs);
186}
187
188static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
189{
190 struct mapped_device *md = tio->md;
191 struct request *rq = tio->orig;
192 unsigned long delay_ms = delay_requeue ? 100 : 0;
193
194 rq_end_stats(md, rq);
195 if (tio->clone) {
196 blk_rq_unprep_clone(tio->clone);
197 tio->ti->type->release_clone_rq(tio->clone, NULL);
198 }
199
200 dm_mq_delay_requeue_request(rq, delay_ms);
201 rq_completed(md);
202}
203
204static void dm_done(struct request *clone, blk_status_t error, bool mapped)
205{
206 int r = DM_ENDIO_DONE;
207 struct dm_rq_target_io *tio = clone->end_io_data;
208 dm_request_endio_fn rq_end_io = NULL;
209
210 if (tio->ti) {
211 rq_end_io = tio->ti->type->rq_end_io;
212
213 if (mapped && rq_end_io)
214 r = rq_end_io(tio->ti, clone, error, &tio->info);
215 }
216
217 if (unlikely(error == BLK_STS_TARGET)) {
218 if (req_op(clone) == REQ_OP_DISCARD &&
219 !clone->q->limits.max_discard_sectors)
220 disable_discard(tio->md);
221 else if (req_op(clone) == REQ_OP_WRITE_SAME &&
222 !clone->q->limits.max_write_same_sectors)
223 disable_write_same(tio->md);
224 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
225 !clone->q->limits.max_write_zeroes_sectors)
226 disable_write_zeroes(tio->md);
227 }
228
229 switch (r) {
230 case DM_ENDIO_DONE:
231 /* The target wants to complete the I/O */
232 dm_end_request(clone, error);
233 break;
234 case DM_ENDIO_INCOMPLETE:
235 /* The target will handle the I/O */
236 return;
237 case DM_ENDIO_REQUEUE:
238 /* The target wants to requeue the I/O */
239 dm_requeue_original_request(tio, false);
240 break;
241 case DM_ENDIO_DELAY_REQUEUE:
242 /* The target wants to requeue the I/O after a delay */
243 dm_requeue_original_request(tio, true);
244 break;
245 default:
246 DMWARN("unimplemented target endio return value: %d", r);
247 BUG();
248 }
249}
250
251/*
252 * Request completion handler for request-based dm
253 */
254static void dm_softirq_done(struct request *rq)
255{
256 bool mapped = true;
257 struct dm_rq_target_io *tio = tio_from_request(rq);
258 struct request *clone = tio->clone;
259
260 if (!clone) {
261 struct mapped_device *md = tio->md;
262
263 rq_end_stats(md, rq);
264 blk_mq_end_request(rq, tio->error);
265 rq_completed(md);
266 return;
267 }
268
269 if (rq->rq_flags & RQF_FAILED)
270 mapped = false;
271
272 dm_done(clone, tio->error, mapped);
273}
274
275/*
276 * Complete the clone and the original request with the error status
277 * through softirq context.
278 */
279static void dm_complete_request(struct request *rq, blk_status_t error)
280{
281 struct dm_rq_target_io *tio = tio_from_request(rq);
282
283 tio->error = error;
284 if (likely(!blk_should_fake_timeout(rq->q)))
285 blk_mq_complete_request(rq);
286}
287
288/*
289 * Complete the not-mapped clone and the original request with the error status
290 * through softirq context.
291 * Target's rq_end_io() function isn't called.
292 * This may be used when the target's clone_and_map_rq() function fails.
293 */
294static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
295{
296 rq->rq_flags |= RQF_FAILED;
297 dm_complete_request(rq, error);
298}
299
300static void end_clone_request(struct request *clone, blk_status_t error)
301{
302 struct dm_rq_target_io *tio = clone->end_io_data;
303
304 dm_complete_request(tio->orig, error);
305}
306
307static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
308{
309 blk_status_t r;
310
311 if (blk_queue_io_stat(clone->q))
312 clone->rq_flags |= RQF_IO_STAT;
313
314 clone->start_time_ns = ktime_get_ns();
315 r = blk_insert_cloned_request(clone->q, clone);
316 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
317 /* must complete clone in terms of original request */
318 dm_complete_request(rq, r);
319 return r;
320}
321
322static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
323 void *data)
324{
325 struct dm_rq_target_io *tio = data;
326 struct dm_rq_clone_bio_info *info =
327 container_of(bio, struct dm_rq_clone_bio_info, clone);
328
329 info->orig = bio_orig;
330 info->tio = tio;
331 bio->bi_end_io = end_clone_bio;
332
333 return 0;
334}
335
336static int setup_clone(struct request *clone, struct request *rq,
337 struct dm_rq_target_io *tio, gfp_t gfp_mask)
338{
339 int r;
340
341 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
342 dm_rq_bio_constructor, tio);
343 if (r)
344 return r;
345
346 clone->end_io = end_clone_request;
347 clone->end_io_data = tio;
348
349 tio->clone = clone;
350
351 return 0;
352}
353
354static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
355 struct mapped_device *md)
356{
357 tio->md = md;
358 tio->ti = NULL;
359 tio->clone = NULL;
360 tio->orig = rq;
361 tio->error = 0;
362 tio->completed = 0;
363 /*
364 * Avoid initializing info for blk-mq; it passes
365 * target-specific data through info.ptr
366 * (see: dm_mq_init_request)
367 */
368 if (!md->init_tio_pdu)
369 memset(&tio->info, 0, sizeof(tio->info));
370}
371
372/*
373 * Returns:
374 * DM_MAPIO_* : the request has been processed as indicated
375 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
376 * < 0 : the request was completed due to failure
377 */
378static int map_request(struct dm_rq_target_io *tio)
379{
380 int r;
381 struct dm_target *ti = tio->ti;
382 struct mapped_device *md = tio->md;
383 struct request *rq = tio->orig;
384 struct request *clone = NULL;
385 blk_status_t ret;
386
387 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
388 switch (r) {
389 case DM_MAPIO_SUBMITTED:
390 /* The target has taken the I/O to submit by itself later */
391 break;
392 case DM_MAPIO_REMAPPED:
393 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
394 /* -ENOMEM */
395 ti->type->release_clone_rq(clone, &tio->info);
396 return DM_MAPIO_REQUEUE;
397 }
398
399 /* The target has remapped the I/O so dispatch it */
400 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
401 blk_rq_pos(rq));
402 ret = dm_dispatch_clone_request(clone, rq);
403 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
404 blk_rq_unprep_clone(clone);
405 blk_mq_cleanup_rq(clone);
406 tio->ti->type->release_clone_rq(clone, &tio->info);
407 tio->clone = NULL;
408 return DM_MAPIO_REQUEUE;
409 }
410 break;
411 case DM_MAPIO_REQUEUE:
412 /* The target wants to requeue the I/O */
413 break;
414 case DM_MAPIO_DELAY_REQUEUE:
415 /* The target wants to requeue the I/O after a delay */
416 dm_requeue_original_request(tio, true);
417 break;
418 case DM_MAPIO_KILL:
419 /* The target wants to complete the I/O */
420 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
421 break;
422 default:
423 DMWARN("unimplemented target map return value: %d", r);
424 BUG();
425 }
426
427 return r;
428}
429
430/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
431ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
432{
433 return sprintf(buf, "%u\n", 0);
434}
435
436ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
437 const char *buf, size_t count)
438{
439 return count;
440}
441
442static void dm_start_request(struct mapped_device *md, struct request *orig)
443{
444 blk_mq_start_request(orig);
445
446 if (unlikely(dm_stats_used(&md->stats))) {
447 struct dm_rq_target_io *tio = tio_from_request(orig);
448 tio->duration_jiffies = jiffies;
449 tio->n_sectors = blk_rq_sectors(orig);
450 dm_stats_account_io(&md->stats, rq_data_dir(orig),
451 blk_rq_pos(orig), tio->n_sectors, false, 0,
452 &tio->stats_aux);
453 }
454
455 /*
456 * Hold the md reference here for the in-flight I/O.
457 * We can't rely on the reference count by device opener,
458 * because the device may be closed during the request completion
459 * when all bios are completed.
460 * See the comment in rq_completed() too.
461 */
462 dm_get(md);
463}
464
465static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
466 unsigned int hctx_idx, unsigned int numa_node)
467{
468 struct mapped_device *md = set->driver_data;
469 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
470
471 /*
472 * Must initialize md member of tio, otherwise it won't
473 * be available in dm_mq_queue_rq.
474 */
475 tio->md = md;
476
477 if (md->init_tio_pdu) {
478 /* target-specific per-io data is immediately after the tio */
479 tio->info.ptr = tio + 1;
480 }
481
482 return 0;
483}
484
485static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
486 const struct blk_mq_queue_data *bd)
487{
488 struct request *rq = bd->rq;
489 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
490 struct mapped_device *md = tio->md;
491 struct dm_target *ti = md->immutable_target;
492
493 if (unlikely(!ti)) {
494 int srcu_idx;
495 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
496
497 ti = dm_table_find_target(map, 0);
498 dm_put_live_table(md, srcu_idx);
499 }
500
501 if (ti->type->busy && ti->type->busy(ti))
502 return BLK_STS_RESOURCE;
503
504 dm_start_request(md, rq);
505
506 /* Init tio using md established in .init_request */
507 init_tio(tio, rq, md);
508
509 /*
510 * Establish tio->ti before calling map_request().
511 */
512 tio->ti = ti;
513
514 /* Direct call is fine since .queue_rq allows allocations */
515 if (map_request(tio) == DM_MAPIO_REQUEUE) {
516 /* Undo dm_start_request() before requeuing */
517 rq_end_stats(md, rq);
518 rq_completed(md);
519 return BLK_STS_RESOURCE;
520 }
521
522 return BLK_STS_OK;
523}
524
525static const struct blk_mq_ops dm_mq_ops = {
526 .queue_rq = dm_mq_queue_rq,
527 .complete = dm_softirq_done,
528 .init_request = dm_mq_init_request,
529};
530
531int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
532{
533 struct request_queue *q;
534 struct dm_target *immutable_tgt;
535 int err;
536
537 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
538 if (!md->tag_set)
539 return -ENOMEM;
540
541 md->tag_set->ops = &dm_mq_ops;
542 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
543 md->tag_set->numa_node = md->numa_node_id;
544 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
545 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
546 md->tag_set->driver_data = md;
547
548 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
549 immutable_tgt = dm_table_get_immutable_target(t);
550 if (immutable_tgt && immutable_tgt->per_io_data_size) {
551 /* any target-specific per-io data is immediately after the tio */
552 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
553 md->init_tio_pdu = true;
554 }
555
556 err = blk_mq_alloc_tag_set(md->tag_set);
557 if (err)
558 goto out_kfree_tag_set;
559
560 q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
561 if (IS_ERR(q)) {
562 err = PTR_ERR(q);
563 goto out_tag_set;
564 }
565
566 return 0;
567
568out_tag_set:
569 blk_mq_free_tag_set(md->tag_set);
570out_kfree_tag_set:
571 kfree(md->tag_set);
572
573 return err;
574}
575
576void dm_mq_cleanup_mapped_device(struct mapped_device *md)
577{
578 if (md->tag_set) {
579 blk_mq_free_tag_set(md->tag_set);
580 kfree(md->tag_set);
581 }
582}
583
584module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
585MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
586
587/* Unused, but preserved for userspace compatibility */
588static bool use_blk_mq = true;
589module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
590MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
591
592module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
593MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
594
595module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
596MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");