Loading...
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>
27#include <linux/writeback.h>
28#include <linux/task_io_accounting_ops.h>
29#include <linux/fault-inject.h>
30#include <linux/list_sort.h>
31#include <linux/delay.h>
32#include <linux/ratelimit.h>
33
34#define CREATE_TRACE_POINTS
35#include <trace/events/block.h>
36
37#include "blk.h"
38#include "blk-cgroup.h"
39
40EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
41EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
42EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
43
44DEFINE_IDA(blk_queue_ida);
45
46/*
47 * For the allocated request tables
48 */
49static struct kmem_cache *request_cachep;
50
51/*
52 * For queue allocation
53 */
54struct kmem_cache *blk_requestq_cachep;
55
56/*
57 * Controlling structure to kblockd
58 */
59static struct workqueue_struct *kblockd_workqueue;
60
61static void drive_stat_acct(struct request *rq, int new_io)
62{
63 struct hd_struct *part;
64 int rw = rq_data_dir(rq);
65 int cpu;
66
67 if (!blk_do_io_stat(rq))
68 return;
69
70 cpu = part_stat_lock();
71
72 if (!new_io) {
73 part = rq->part;
74 part_stat_inc(cpu, part, merges[rw]);
75 } else {
76 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
77 if (!hd_struct_try_get(part)) {
78 /*
79 * The partition is already being removed,
80 * the request will be accounted on the disk only
81 *
82 * We take a reference on disk->part0 although that
83 * partition will never be deleted, so we can treat
84 * it as any other partition.
85 */
86 part = &rq->rq_disk->part0;
87 hd_struct_get(part);
88 }
89 part_round_stats(cpu, part);
90 part_inc_in_flight(part, rw);
91 rq->part = part;
92 }
93
94 part_stat_unlock();
95}
96
97void blk_queue_congestion_threshold(struct request_queue *q)
98{
99 int nr;
100
101 nr = q->nr_requests - (q->nr_requests / 8) + 1;
102 if (nr > q->nr_requests)
103 nr = q->nr_requests;
104 q->nr_congestion_on = nr;
105
106 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
107 if (nr < 1)
108 nr = 1;
109 q->nr_congestion_off = nr;
110}
111
112/**
113 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
114 * @bdev: device
115 *
116 * Locates the passed device's request queue and returns the address of its
117 * backing_dev_info
118 *
119 * Will return NULL if the request queue cannot be located.
120 */
121struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
122{
123 struct backing_dev_info *ret = NULL;
124 struct request_queue *q = bdev_get_queue(bdev);
125
126 if (q)
127 ret = &q->backing_dev_info;
128 return ret;
129}
130EXPORT_SYMBOL(blk_get_backing_dev_info);
131
132void blk_rq_init(struct request_queue *q, struct request *rq)
133{
134 memset(rq, 0, sizeof(*rq));
135
136 INIT_LIST_HEAD(&rq->queuelist);
137 INIT_LIST_HEAD(&rq->timeout_list);
138 rq->cpu = -1;
139 rq->q = q;
140 rq->__sector = (sector_t) -1;
141 INIT_HLIST_NODE(&rq->hash);
142 RB_CLEAR_NODE(&rq->rb_node);
143 rq->cmd = rq->__cmd;
144 rq->cmd_len = BLK_MAX_CDB;
145 rq->tag = -1;
146 rq->ref_count = 1;
147 rq->start_time = jiffies;
148 set_start_time_ns(rq);
149 rq->part = NULL;
150}
151EXPORT_SYMBOL(blk_rq_init);
152
153static void req_bio_endio(struct request *rq, struct bio *bio,
154 unsigned int nbytes, int error)
155{
156 if (error)
157 clear_bit(BIO_UPTODATE, &bio->bi_flags);
158 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
159 error = -EIO;
160
161 if (unlikely(nbytes > bio->bi_size)) {
162 printk(KERN_ERR "%s: want %u bytes done, %u left\n",
163 __func__, nbytes, bio->bi_size);
164 nbytes = bio->bi_size;
165 }
166
167 if (unlikely(rq->cmd_flags & REQ_QUIET))
168 set_bit(BIO_QUIET, &bio->bi_flags);
169
170 bio->bi_size -= nbytes;
171 bio->bi_sector += (nbytes >> 9);
172
173 if (bio_integrity(bio))
174 bio_integrity_advance(bio, nbytes);
175
176 /* don't actually finish bio if it's part of flush sequence */
177 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
178 bio_endio(bio, error);
179}
180
181void blk_dump_rq_flags(struct request *rq, char *msg)
182{
183 int bit;
184
185 printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg,
186 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
187 rq->cmd_flags);
188
189 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
190 (unsigned long long)blk_rq_pos(rq),
191 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
192 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
193 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
194
195 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
196 printk(KERN_INFO " cdb: ");
197 for (bit = 0; bit < BLK_MAX_CDB; bit++)
198 printk("%02x ", rq->cmd[bit]);
199 printk("\n");
200 }
201}
202EXPORT_SYMBOL(blk_dump_rq_flags);
203
204static void blk_delay_work(struct work_struct *work)
205{
206 struct request_queue *q;
207
208 q = container_of(work, struct request_queue, delay_work.work);
209 spin_lock_irq(q->queue_lock);
210 __blk_run_queue(q);
211 spin_unlock_irq(q->queue_lock);
212}
213
214/**
215 * blk_delay_queue - restart queueing after defined interval
216 * @q: The &struct request_queue in question
217 * @msecs: Delay in msecs
218 *
219 * Description:
220 * Sometimes queueing needs to be postponed for a little while, to allow
221 * resources to come back. This function will make sure that queueing is
222 * restarted around the specified time.
223 */
224void blk_delay_queue(struct request_queue *q, unsigned long msecs)
225{
226 queue_delayed_work(kblockd_workqueue, &q->delay_work,
227 msecs_to_jiffies(msecs));
228}
229EXPORT_SYMBOL(blk_delay_queue);
230
231/**
232 * blk_start_queue - restart a previously stopped queue
233 * @q: The &struct request_queue in question
234 *
235 * Description:
236 * blk_start_queue() will clear the stop flag on the queue, and call
237 * the request_fn for the queue if it was in a stopped state when
238 * entered. Also see blk_stop_queue(). Queue lock must be held.
239 **/
240void blk_start_queue(struct request_queue *q)
241{
242 WARN_ON(!irqs_disabled());
243
244 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
245 __blk_run_queue(q);
246}
247EXPORT_SYMBOL(blk_start_queue);
248
249/**
250 * blk_stop_queue - stop a queue
251 * @q: The &struct request_queue in question
252 *
253 * Description:
254 * The Linux block layer assumes that a block driver will consume all
255 * entries on the request queue when the request_fn strategy is called.
256 * Often this will not happen, because of hardware limitations (queue
257 * depth settings). If a device driver gets a 'queue full' response,
258 * or if it simply chooses not to queue more I/O at one point, it can
259 * call this function to prevent the request_fn from being called until
260 * the driver has signalled it's ready to go again. This happens by calling
261 * blk_start_queue() to restart queue operations. Queue lock must be held.
262 **/
263void blk_stop_queue(struct request_queue *q)
264{
265 __cancel_delayed_work(&q->delay_work);
266 queue_flag_set(QUEUE_FLAG_STOPPED, q);
267}
268EXPORT_SYMBOL(blk_stop_queue);
269
270/**
271 * blk_sync_queue - cancel any pending callbacks on a queue
272 * @q: the queue
273 *
274 * Description:
275 * The block layer may perform asynchronous callback activity
276 * on a queue, such as calling the unplug function after a timeout.
277 * A block device may call blk_sync_queue to ensure that any
278 * such activity is cancelled, thus allowing it to release resources
279 * that the callbacks might use. The caller must already have made sure
280 * that its ->make_request_fn will not re-add plugging prior to calling
281 * this function.
282 *
283 * This function does not cancel any asynchronous activity arising
284 * out of elevator or throttling code. That would require elevaotor_exit()
285 * and blkcg_exit_queue() to be called with queue lock initialized.
286 *
287 */
288void blk_sync_queue(struct request_queue *q)
289{
290 del_timer_sync(&q->timeout);
291 cancel_delayed_work_sync(&q->delay_work);
292}
293EXPORT_SYMBOL(blk_sync_queue);
294
295/**
296 * __blk_run_queue - run a single device queue
297 * @q: The queue to run
298 *
299 * Description:
300 * See @blk_run_queue. This variant must be called with the queue lock
301 * held and interrupts disabled.
302 */
303void __blk_run_queue(struct request_queue *q)
304{
305 if (unlikely(blk_queue_stopped(q)))
306 return;
307
308 q->request_fn(q);
309}
310EXPORT_SYMBOL(__blk_run_queue);
311
312/**
313 * blk_run_queue_async - run a single device queue in workqueue context
314 * @q: The queue to run
315 *
316 * Description:
317 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
318 * of us.
319 */
320void blk_run_queue_async(struct request_queue *q)
321{
322 if (likely(!blk_queue_stopped(q))) {
323 __cancel_delayed_work(&q->delay_work);
324 queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
325 }
326}
327EXPORT_SYMBOL(blk_run_queue_async);
328
329/**
330 * blk_run_queue - run a single device queue
331 * @q: The queue to run
332 *
333 * Description:
334 * Invoke request handling on this queue, if it has pending work to do.
335 * May be used to restart queueing when a request has completed.
336 */
337void blk_run_queue(struct request_queue *q)
338{
339 unsigned long flags;
340
341 spin_lock_irqsave(q->queue_lock, flags);
342 __blk_run_queue(q);
343 spin_unlock_irqrestore(q->queue_lock, flags);
344}
345EXPORT_SYMBOL(blk_run_queue);
346
347void blk_put_queue(struct request_queue *q)
348{
349 kobject_put(&q->kobj);
350}
351EXPORT_SYMBOL(blk_put_queue);
352
353/**
354 * blk_drain_queue - drain requests from request_queue
355 * @q: queue to drain
356 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
357 *
358 * Drain requests from @q. If @drain_all is set, all requests are drained.
359 * If not, only ELVPRIV requests are drained. The caller is responsible
360 * for ensuring that no new requests which need to be drained are queued.
361 */
362void blk_drain_queue(struct request_queue *q, bool drain_all)
363{
364 int i;
365
366 while (true) {
367 bool drain = false;
368
369 spin_lock_irq(q->queue_lock);
370
371 /*
372 * The caller might be trying to drain @q before its
373 * elevator is initialized.
374 */
375 if (q->elevator)
376 elv_drain_elevator(q);
377
378 blkcg_drain_queue(q);
379
380 /*
381 * This function might be called on a queue which failed
382 * driver init after queue creation or is not yet fully
383 * active yet. Some drivers (e.g. fd and loop) get unhappy
384 * in such cases. Kick queue iff dispatch queue has
385 * something on it and @q has request_fn set.
386 */
387 if (!list_empty(&q->queue_head) && q->request_fn)
388 __blk_run_queue(q);
389
390 drain |= q->rq.elvpriv;
391
392 /*
393 * Unfortunately, requests are queued at and tracked from
394 * multiple places and there's no single counter which can
395 * be drained. Check all the queues and counters.
396 */
397 if (drain_all) {
398 drain |= !list_empty(&q->queue_head);
399 for (i = 0; i < 2; i++) {
400 drain |= q->rq.count[i];
401 drain |= q->in_flight[i];
402 drain |= !list_empty(&q->flush_queue[i]);
403 }
404 }
405
406 spin_unlock_irq(q->queue_lock);
407
408 if (!drain)
409 break;
410 msleep(10);
411 }
412
413 /*
414 * With queue marked dead, any woken up waiter will fail the
415 * allocation path, so the wakeup chaining is lost and we're
416 * left with hung waiters. We need to wake up those waiters.
417 */
418 if (q->request_fn) {
419 spin_lock_irq(q->queue_lock);
420 for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
421 wake_up_all(&q->rq.wait[i]);
422 spin_unlock_irq(q->queue_lock);
423 }
424}
425
426/**
427 * blk_queue_bypass_start - enter queue bypass mode
428 * @q: queue of interest
429 *
430 * In bypass mode, only the dispatch FIFO queue of @q is used. This
431 * function makes @q enter bypass mode and drains all requests which were
432 * throttled or issued before. On return, it's guaranteed that no request
433 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
434 * inside queue or RCU read lock.
435 */
436void blk_queue_bypass_start(struct request_queue *q)
437{
438 bool drain;
439
440 spin_lock_irq(q->queue_lock);
441 drain = !q->bypass_depth++;
442 queue_flag_set(QUEUE_FLAG_BYPASS, q);
443 spin_unlock_irq(q->queue_lock);
444
445 if (drain) {
446 blk_drain_queue(q, false);
447 /* ensure blk_queue_bypass() is %true inside RCU read lock */
448 synchronize_rcu();
449 }
450}
451EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
452
453/**
454 * blk_queue_bypass_end - leave queue bypass mode
455 * @q: queue of interest
456 *
457 * Leave bypass mode and restore the normal queueing behavior.
458 */
459void blk_queue_bypass_end(struct request_queue *q)
460{
461 spin_lock_irq(q->queue_lock);
462 if (!--q->bypass_depth)
463 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
464 WARN_ON_ONCE(q->bypass_depth < 0);
465 spin_unlock_irq(q->queue_lock);
466}
467EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
468
469/**
470 * blk_cleanup_queue - shutdown a request queue
471 * @q: request queue to shutdown
472 *
473 * Mark @q DEAD, drain all pending requests, destroy and put it. All
474 * future requests will be failed immediately with -ENODEV.
475 */
476void blk_cleanup_queue(struct request_queue *q)
477{
478 spinlock_t *lock = q->queue_lock;
479
480 /* mark @q DEAD, no new request or merges will be allowed afterwards */
481 mutex_lock(&q->sysfs_lock);
482 queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
483 spin_lock_irq(lock);
484
485 /*
486 * Dead queue is permanently in bypass mode till released. Note
487 * that, unlike blk_queue_bypass_start(), we aren't performing
488 * synchronize_rcu() after entering bypass mode to avoid the delay
489 * as some drivers create and destroy a lot of queues while
490 * probing. This is still safe because blk_release_queue() will be
491 * called only after the queue refcnt drops to zero and nothing,
492 * RCU or not, would be traversing the queue by then.
493 */
494 q->bypass_depth++;
495 queue_flag_set(QUEUE_FLAG_BYPASS, q);
496
497 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
498 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
499 queue_flag_set(QUEUE_FLAG_DEAD, q);
500 spin_unlock_irq(lock);
501 mutex_unlock(&q->sysfs_lock);
502
503 /* drain all requests queued before DEAD marking */
504 blk_drain_queue(q, true);
505
506 /* @q won't process any more request, flush async actions */
507 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
508 blk_sync_queue(q);
509
510 spin_lock_irq(lock);
511 if (q->queue_lock != &q->__queue_lock)
512 q->queue_lock = &q->__queue_lock;
513 spin_unlock_irq(lock);
514
515 /* @q is and will stay empty, shutdown and put */
516 blk_put_queue(q);
517}
518EXPORT_SYMBOL(blk_cleanup_queue);
519
520static int blk_init_free_list(struct request_queue *q)
521{
522 struct request_list *rl = &q->rq;
523
524 if (unlikely(rl->rq_pool))
525 return 0;
526
527 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
528 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
529 rl->elvpriv = 0;
530 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
531 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
532
533 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
534 mempool_free_slab, request_cachep, q->node);
535
536 if (!rl->rq_pool)
537 return -ENOMEM;
538
539 return 0;
540}
541
542struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
543{
544 return blk_alloc_queue_node(gfp_mask, -1);
545}
546EXPORT_SYMBOL(blk_alloc_queue);
547
548struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
549{
550 struct request_queue *q;
551 int err;
552
553 q = kmem_cache_alloc_node(blk_requestq_cachep,
554 gfp_mask | __GFP_ZERO, node_id);
555 if (!q)
556 return NULL;
557
558 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
559 if (q->id < 0)
560 goto fail_q;
561
562 q->backing_dev_info.ra_pages =
563 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
564 q->backing_dev_info.state = 0;
565 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
566 q->backing_dev_info.name = "block";
567 q->node = node_id;
568
569 err = bdi_init(&q->backing_dev_info);
570 if (err)
571 goto fail_id;
572
573 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
574 laptop_mode_timer_fn, (unsigned long) q);
575 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
576 INIT_LIST_HEAD(&q->queue_head);
577 INIT_LIST_HEAD(&q->timeout_list);
578 INIT_LIST_HEAD(&q->icq_list);
579#ifdef CONFIG_BLK_CGROUP
580 INIT_LIST_HEAD(&q->blkg_list);
581#endif
582 INIT_LIST_HEAD(&q->flush_queue[0]);
583 INIT_LIST_HEAD(&q->flush_queue[1]);
584 INIT_LIST_HEAD(&q->flush_data_in_flight);
585 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
586
587 kobject_init(&q->kobj, &blk_queue_ktype);
588
589 mutex_init(&q->sysfs_lock);
590 spin_lock_init(&q->__queue_lock);
591
592 /*
593 * By default initialize queue_lock to internal lock and driver can
594 * override it later if need be.
595 */
596 q->queue_lock = &q->__queue_lock;
597
598 /*
599 * A queue starts its life with bypass turned on to avoid
600 * unnecessary bypass on/off overhead and nasty surprises during
601 * init. The initial bypass will be finished at the end of
602 * blk_init_allocated_queue().
603 */
604 q->bypass_depth = 1;
605 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
606
607 if (blkcg_init_queue(q))
608 goto fail_id;
609
610 return q;
611
612fail_id:
613 ida_simple_remove(&blk_queue_ida, q->id);
614fail_q:
615 kmem_cache_free(blk_requestq_cachep, q);
616 return NULL;
617}
618EXPORT_SYMBOL(blk_alloc_queue_node);
619
620/**
621 * blk_init_queue - prepare a request queue for use with a block device
622 * @rfn: The function to be called to process requests that have been
623 * placed on the queue.
624 * @lock: Request queue spin lock
625 *
626 * Description:
627 * If a block device wishes to use the standard request handling procedures,
628 * which sorts requests and coalesces adjacent requests, then it must
629 * call blk_init_queue(). The function @rfn will be called when there
630 * are requests on the queue that need to be processed. If the device
631 * supports plugging, then @rfn may not be called immediately when requests
632 * are available on the queue, but may be called at some time later instead.
633 * Plugged queues are generally unplugged when a buffer belonging to one
634 * of the requests on the queue is needed, or due to memory pressure.
635 *
636 * @rfn is not required, or even expected, to remove all requests off the
637 * queue, but only as many as it can handle at a time. If it does leave
638 * requests on the queue, it is responsible for arranging that the requests
639 * get dealt with eventually.
640 *
641 * The queue spin lock must be held while manipulating the requests on the
642 * request queue; this lock will be taken also from interrupt context, so irq
643 * disabling is needed for it.
644 *
645 * Function returns a pointer to the initialized request queue, or %NULL if
646 * it didn't succeed.
647 *
648 * Note:
649 * blk_init_queue() must be paired with a blk_cleanup_queue() call
650 * when the block device is deactivated (such as at module unload).
651 **/
652
653struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
654{
655 return blk_init_queue_node(rfn, lock, -1);
656}
657EXPORT_SYMBOL(blk_init_queue);
658
659struct request_queue *
660blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
661{
662 struct request_queue *uninit_q, *q;
663
664 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
665 if (!uninit_q)
666 return NULL;
667
668 q = blk_init_allocated_queue(uninit_q, rfn, lock);
669 if (!q)
670 blk_cleanup_queue(uninit_q);
671
672 return q;
673}
674EXPORT_SYMBOL(blk_init_queue_node);
675
676struct request_queue *
677blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
678 spinlock_t *lock)
679{
680 if (!q)
681 return NULL;
682
683 if (blk_init_free_list(q))
684 return NULL;
685
686 q->request_fn = rfn;
687 q->prep_rq_fn = NULL;
688 q->unprep_rq_fn = NULL;
689 q->queue_flags = QUEUE_FLAG_DEFAULT;
690
691 /* Override internal queue lock with supplied lock pointer */
692 if (lock)
693 q->queue_lock = lock;
694
695 /*
696 * This also sets hw/phys segments, boundary and size
697 */
698 blk_queue_make_request(q, blk_queue_bio);
699
700 q->sg_reserved_size = INT_MAX;
701
702 /* init elevator */
703 if (elevator_init(q, NULL))
704 return NULL;
705
706 blk_queue_congestion_threshold(q);
707
708 /* all done, end the initial bypass */
709 blk_queue_bypass_end(q);
710 return q;
711}
712EXPORT_SYMBOL(blk_init_allocated_queue);
713
714bool blk_get_queue(struct request_queue *q)
715{
716 if (likely(!blk_queue_dead(q))) {
717 __blk_get_queue(q);
718 return true;
719 }
720
721 return false;
722}
723EXPORT_SYMBOL(blk_get_queue);
724
725static inline void blk_free_request(struct request_queue *q, struct request *rq)
726{
727 if (rq->cmd_flags & REQ_ELVPRIV) {
728 elv_put_request(q, rq);
729 if (rq->elv.icq)
730 put_io_context(rq->elv.icq->ioc);
731 }
732
733 mempool_free(rq, q->rq.rq_pool);
734}
735
736/*
737 * ioc_batching returns true if the ioc is a valid batching request and
738 * should be given priority access to a request.
739 */
740static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
741{
742 if (!ioc)
743 return 0;
744
745 /*
746 * Make sure the process is able to allocate at least 1 request
747 * even if the batch times out, otherwise we could theoretically
748 * lose wakeups.
749 */
750 return ioc->nr_batch_requests == q->nr_batching ||
751 (ioc->nr_batch_requests > 0
752 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
753}
754
755/*
756 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
757 * will cause the process to be a "batcher" on all queues in the system. This
758 * is the behaviour we want though - once it gets a wakeup it should be given
759 * a nice run.
760 */
761static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
762{
763 if (!ioc || ioc_batching(q, ioc))
764 return;
765
766 ioc->nr_batch_requests = q->nr_batching;
767 ioc->last_waited = jiffies;
768}
769
770static void __freed_request(struct request_queue *q, int sync)
771{
772 struct request_list *rl = &q->rq;
773
774 if (rl->count[sync] < queue_congestion_off_threshold(q))
775 blk_clear_queue_congested(q, sync);
776
777 if (rl->count[sync] + 1 <= q->nr_requests) {
778 if (waitqueue_active(&rl->wait[sync]))
779 wake_up(&rl->wait[sync]);
780
781 blk_clear_queue_full(q, sync);
782 }
783}
784
785/*
786 * A request has just been released. Account for it, update the full and
787 * congestion status, wake up any waiters. Called under q->queue_lock.
788 */
789static void freed_request(struct request_queue *q, unsigned int flags)
790{
791 struct request_list *rl = &q->rq;
792 int sync = rw_is_sync(flags);
793
794 rl->count[sync]--;
795 if (flags & REQ_ELVPRIV)
796 rl->elvpriv--;
797
798 __freed_request(q, sync);
799
800 if (unlikely(rl->starved[sync ^ 1]))
801 __freed_request(q, sync ^ 1);
802}
803
804/*
805 * Determine if elevator data should be initialized when allocating the
806 * request associated with @bio.
807 */
808static bool blk_rq_should_init_elevator(struct bio *bio)
809{
810 if (!bio)
811 return true;
812
813 /*
814 * Flush requests do not use the elevator so skip initialization.
815 * This allows a request to share the flush and elevator data.
816 */
817 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA))
818 return false;
819
820 return true;
821}
822
823/**
824 * rq_ioc - determine io_context for request allocation
825 * @bio: request being allocated is for this bio (can be %NULL)
826 *
827 * Determine io_context to use for request allocation for @bio. May return
828 * %NULL if %current->io_context doesn't exist.
829 */
830static struct io_context *rq_ioc(struct bio *bio)
831{
832#ifdef CONFIG_BLK_CGROUP
833 if (bio && bio->bi_ioc)
834 return bio->bi_ioc;
835#endif
836 return current->io_context;
837}
838
839/**
840 * get_request - get a free request
841 * @q: request_queue to allocate request from
842 * @rw_flags: RW and SYNC flags
843 * @bio: bio to allocate request for (can be %NULL)
844 * @gfp_mask: allocation mask
845 *
846 * Get a free request from @q. This function may fail under memory
847 * pressure or if @q is dead.
848 *
849 * Must be callled with @q->queue_lock held and,
850 * Returns %NULL on failure, with @q->queue_lock held.
851 * Returns !%NULL on success, with @q->queue_lock *not held*.
852 */
853static struct request *get_request(struct request_queue *q, int rw_flags,
854 struct bio *bio, gfp_t gfp_mask)
855{
856 struct request *rq;
857 struct request_list *rl = &q->rq;
858 struct elevator_type *et;
859 struct io_context *ioc;
860 struct io_cq *icq = NULL;
861 const bool is_sync = rw_is_sync(rw_flags) != 0;
862 bool retried = false;
863 int may_queue;
864retry:
865 et = q->elevator->type;
866 ioc = rq_ioc(bio);
867
868 if (unlikely(blk_queue_dead(q)))
869 return NULL;
870
871 may_queue = elv_may_queue(q, rw_flags);
872 if (may_queue == ELV_MQUEUE_NO)
873 goto rq_starved;
874
875 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
876 if (rl->count[is_sync]+1 >= q->nr_requests) {
877 /*
878 * We want ioc to record batching state. If it's
879 * not already there, creating a new one requires
880 * dropping queue_lock, which in turn requires
881 * retesting conditions to avoid queue hang.
882 */
883 if (!ioc && !retried) {
884 spin_unlock_irq(q->queue_lock);
885 create_io_context(gfp_mask, q->node);
886 spin_lock_irq(q->queue_lock);
887 retried = true;
888 goto retry;
889 }
890
891 /*
892 * The queue will fill after this allocation, so set
893 * it as full, and mark this process as "batching".
894 * This process will be allowed to complete a batch of
895 * requests, others will be blocked.
896 */
897 if (!blk_queue_full(q, is_sync)) {
898 ioc_set_batching(q, ioc);
899 blk_set_queue_full(q, is_sync);
900 } else {
901 if (may_queue != ELV_MQUEUE_MUST
902 && !ioc_batching(q, ioc)) {
903 /*
904 * The queue is full and the allocating
905 * process is not a "batcher", and not
906 * exempted by the IO scheduler
907 */
908 return NULL;
909 }
910 }
911 }
912 blk_set_queue_congested(q, is_sync);
913 }
914
915 /*
916 * Only allow batching queuers to allocate up to 50% over the defined
917 * limit of requests, otherwise we could have thousands of requests
918 * allocated with any setting of ->nr_requests
919 */
920 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
921 return NULL;
922
923 rl->count[is_sync]++;
924 rl->starved[is_sync] = 0;
925
926 /*
927 * Decide whether the new request will be managed by elevator. If
928 * so, mark @rw_flags and increment elvpriv. Non-zero elvpriv will
929 * prevent the current elevator from being destroyed until the new
930 * request is freed. This guarantees icq's won't be destroyed and
931 * makes creating new ones safe.
932 *
933 * Also, lookup icq while holding queue_lock. If it doesn't exist,
934 * it will be created after releasing queue_lock.
935 */
936 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
937 rw_flags |= REQ_ELVPRIV;
938 rl->elvpriv++;
939 if (et->icq_cache && ioc)
940 icq = ioc_lookup_icq(ioc, q);
941 }
942
943 if (blk_queue_io_stat(q))
944 rw_flags |= REQ_IO_STAT;
945 spin_unlock_irq(q->queue_lock);
946
947 /* allocate and init request */
948 rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
949 if (!rq)
950 goto fail_alloc;
951
952 blk_rq_init(q, rq);
953 rq->cmd_flags = rw_flags | REQ_ALLOCED;
954
955 /* init elvpriv */
956 if (rw_flags & REQ_ELVPRIV) {
957 if (unlikely(et->icq_cache && !icq)) {
958 create_io_context(gfp_mask, q->node);
959 ioc = rq_ioc(bio);
960 if (!ioc)
961 goto fail_elvpriv;
962
963 icq = ioc_create_icq(ioc, q, gfp_mask);
964 if (!icq)
965 goto fail_elvpriv;
966 }
967
968 rq->elv.icq = icq;
969 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
970 goto fail_elvpriv;
971
972 /* @rq->elv.icq holds io_context until @rq is freed */
973 if (icq)
974 get_io_context(icq->ioc);
975 }
976out:
977 /*
978 * ioc may be NULL here, and ioc_batching will be false. That's
979 * OK, if the queue is under the request limit then requests need
980 * not count toward the nr_batch_requests limit. There will always
981 * be some limit enforced by BLK_BATCH_TIME.
982 */
983 if (ioc_batching(q, ioc))
984 ioc->nr_batch_requests--;
985
986 trace_block_getrq(q, bio, rw_flags & 1);
987 return rq;
988
989fail_elvpriv:
990 /*
991 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
992 * and may fail indefinitely under memory pressure and thus
993 * shouldn't stall IO. Treat this request as !elvpriv. This will
994 * disturb iosched and blkcg but weird is bettern than dead.
995 */
996 printk_ratelimited(KERN_WARNING "%s: request aux data allocation failed, iosched may be disturbed\n",
997 dev_name(q->backing_dev_info.dev));
998
999 rq->cmd_flags &= ~REQ_ELVPRIV;
1000 rq->elv.icq = NULL;
1001
1002 spin_lock_irq(q->queue_lock);
1003 rl->elvpriv--;
1004 spin_unlock_irq(q->queue_lock);
1005 goto out;
1006
1007fail_alloc:
1008 /*
1009 * Allocation failed presumably due to memory. Undo anything we
1010 * might have messed up.
1011 *
1012 * Allocating task should really be put onto the front of the wait
1013 * queue, but this is pretty rare.
1014 */
1015 spin_lock_irq(q->queue_lock);
1016 freed_request(q, rw_flags);
1017
1018 /*
1019 * in the very unlikely event that allocation failed and no
1020 * requests for this direction was pending, mark us starved so that
1021 * freeing of a request in the other direction will notice
1022 * us. another possible fix would be to split the rq mempool into
1023 * READ and WRITE
1024 */
1025rq_starved:
1026 if (unlikely(rl->count[is_sync] == 0))
1027 rl->starved[is_sync] = 1;
1028 return NULL;
1029}
1030
1031/**
1032 * get_request_wait - get a free request with retry
1033 * @q: request_queue to allocate request from
1034 * @rw_flags: RW and SYNC flags
1035 * @bio: bio to allocate request for (can be %NULL)
1036 *
1037 * Get a free request from @q. This function keeps retrying under memory
1038 * pressure and fails iff @q is dead.
1039 *
1040 * Must be callled with @q->queue_lock held and,
1041 * Returns %NULL on failure, with @q->queue_lock held.
1042 * Returns !%NULL on success, with @q->queue_lock *not held*.
1043 */
1044static struct request *get_request_wait(struct request_queue *q, int rw_flags,
1045 struct bio *bio)
1046{
1047 const bool is_sync = rw_is_sync(rw_flags) != 0;
1048 struct request *rq;
1049
1050 rq = get_request(q, rw_flags, bio, GFP_NOIO);
1051 while (!rq) {
1052 DEFINE_WAIT(wait);
1053 struct request_list *rl = &q->rq;
1054
1055 if (unlikely(blk_queue_dead(q)))
1056 return NULL;
1057
1058 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1059 TASK_UNINTERRUPTIBLE);
1060
1061 trace_block_sleeprq(q, bio, rw_flags & 1);
1062
1063 spin_unlock_irq(q->queue_lock);
1064 io_schedule();
1065
1066 /*
1067 * After sleeping, we become a "batching" process and
1068 * will be able to allocate at least one request, and
1069 * up to a big batch of them for a small period time.
1070 * See ioc_batching, ioc_set_batching
1071 */
1072 create_io_context(GFP_NOIO, q->node);
1073 ioc_set_batching(q, current->io_context);
1074
1075 spin_lock_irq(q->queue_lock);
1076 finish_wait(&rl->wait[is_sync], &wait);
1077
1078 rq = get_request(q, rw_flags, bio, GFP_NOIO);
1079 };
1080
1081 return rq;
1082}
1083
1084struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1085{
1086 struct request *rq;
1087
1088 BUG_ON(rw != READ && rw != WRITE);
1089
1090 spin_lock_irq(q->queue_lock);
1091 if (gfp_mask & __GFP_WAIT)
1092 rq = get_request_wait(q, rw, NULL);
1093 else
1094 rq = get_request(q, rw, NULL, gfp_mask);
1095 if (!rq)
1096 spin_unlock_irq(q->queue_lock);
1097 /* q->queue_lock is unlocked at this point */
1098
1099 return rq;
1100}
1101EXPORT_SYMBOL(blk_get_request);
1102
1103/**
1104 * blk_make_request - given a bio, allocate a corresponding struct request.
1105 * @q: target request queue
1106 * @bio: The bio describing the memory mappings that will be submitted for IO.
1107 * It may be a chained-bio properly constructed by block/bio layer.
1108 * @gfp_mask: gfp flags to be used for memory allocation
1109 *
1110 * blk_make_request is the parallel of generic_make_request for BLOCK_PC
1111 * type commands. Where the struct request needs to be farther initialized by
1112 * the caller. It is passed a &struct bio, which describes the memory info of
1113 * the I/O transfer.
1114 *
1115 * The caller of blk_make_request must make sure that bi_io_vec
1116 * are set to describe the memory buffers. That bio_data_dir() will return
1117 * the needed direction of the request. (And all bio's in the passed bio-chain
1118 * are properly set accordingly)
1119 *
1120 * If called under none-sleepable conditions, mapped bio buffers must not
1121 * need bouncing, by calling the appropriate masked or flagged allocator,
1122 * suitable for the target device. Otherwise the call to blk_queue_bounce will
1123 * BUG.
1124 *
1125 * WARNING: When allocating/cloning a bio-chain, careful consideration should be
1126 * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for
1127 * anything but the first bio in the chain. Otherwise you risk waiting for IO
1128 * completion of a bio that hasn't been submitted yet, thus resulting in a
1129 * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead
1130 * of bio_alloc(), as that avoids the mempool deadlock.
1131 * If possible a big IO should be split into smaller parts when allocation
1132 * fails. Partial allocation should not be an error, or you risk a live-lock.
1133 */
1134struct request *blk_make_request(struct request_queue *q, struct bio *bio,
1135 gfp_t gfp_mask)
1136{
1137 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask);
1138
1139 if (unlikely(!rq))
1140 return ERR_PTR(-ENOMEM);
1141
1142 for_each_bio(bio) {
1143 struct bio *bounce_bio = bio;
1144 int ret;
1145
1146 blk_queue_bounce(q, &bounce_bio);
1147 ret = blk_rq_append_bio(q, rq, bounce_bio);
1148 if (unlikely(ret)) {
1149 blk_put_request(rq);
1150 return ERR_PTR(ret);
1151 }
1152 }
1153
1154 return rq;
1155}
1156EXPORT_SYMBOL(blk_make_request);
1157
1158/**
1159 * blk_requeue_request - put a request back on queue
1160 * @q: request queue where request should be inserted
1161 * @rq: request to be inserted
1162 *
1163 * Description:
1164 * Drivers often keep queueing requests until the hardware cannot accept
1165 * more, when that condition happens we need to put the request back
1166 * on the queue. Must be called with queue lock held.
1167 */
1168void blk_requeue_request(struct request_queue *q, struct request *rq)
1169{
1170 blk_delete_timer(rq);
1171 blk_clear_rq_complete(rq);
1172 trace_block_rq_requeue(q, rq);
1173
1174 if (blk_rq_tagged(rq))
1175 blk_queue_end_tag(q, rq);
1176
1177 BUG_ON(blk_queued_rq(rq));
1178
1179 elv_requeue_request(q, rq);
1180}
1181EXPORT_SYMBOL(blk_requeue_request);
1182
1183static void add_acct_request(struct request_queue *q, struct request *rq,
1184 int where)
1185{
1186 drive_stat_acct(rq, 1);
1187 __elv_add_request(q, rq, where);
1188}
1189
1190static void part_round_stats_single(int cpu, struct hd_struct *part,
1191 unsigned long now)
1192{
1193 if (now == part->stamp)
1194 return;
1195
1196 if (part_in_flight(part)) {
1197 __part_stat_add(cpu, part, time_in_queue,
1198 part_in_flight(part) * (now - part->stamp));
1199 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1200 }
1201 part->stamp = now;
1202}
1203
1204/**
1205 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1206 * @cpu: cpu number for stats access
1207 * @part: target partition
1208 *
1209 * The average IO queue length and utilisation statistics are maintained
1210 * by observing the current state of the queue length and the amount of
1211 * time it has been in this state for.
1212 *
1213 * Normally, that accounting is done on IO completion, but that can result
1214 * in more than a second's worth of IO being accounted for within any one
1215 * second, leading to >100% utilisation. To deal with that, we call this
1216 * function to do a round-off before returning the results when reading
1217 * /proc/diskstats. This accounts immediately for all queue usage up to
1218 * the current jiffies and restarts the counters again.
1219 */
1220void part_round_stats(int cpu, struct hd_struct *part)
1221{
1222 unsigned long now = jiffies;
1223
1224 if (part->partno)
1225 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1226 part_round_stats_single(cpu, part, now);
1227}
1228EXPORT_SYMBOL_GPL(part_round_stats);
1229
1230/*
1231 * queue lock must be held
1232 */
1233void __blk_put_request(struct request_queue *q, struct request *req)
1234{
1235 if (unlikely(!q))
1236 return;
1237 if (unlikely(--req->ref_count))
1238 return;
1239
1240 elv_completed_request(q, req);
1241
1242 /* this is a bio leak */
1243 WARN_ON(req->bio != NULL);
1244
1245 /*
1246 * Request may not have originated from ll_rw_blk. if not,
1247 * it didn't come out of our reserved rq pools
1248 */
1249 if (req->cmd_flags & REQ_ALLOCED) {
1250 unsigned int flags = req->cmd_flags;
1251
1252 BUG_ON(!list_empty(&req->queuelist));
1253 BUG_ON(!hlist_unhashed(&req->hash));
1254
1255 blk_free_request(q, req);
1256 freed_request(q, flags);
1257 }
1258}
1259EXPORT_SYMBOL_GPL(__blk_put_request);
1260
1261void blk_put_request(struct request *req)
1262{
1263 unsigned long flags;
1264 struct request_queue *q = req->q;
1265
1266 spin_lock_irqsave(q->queue_lock, flags);
1267 __blk_put_request(q, req);
1268 spin_unlock_irqrestore(q->queue_lock, flags);
1269}
1270EXPORT_SYMBOL(blk_put_request);
1271
1272/**
1273 * blk_add_request_payload - add a payload to a request
1274 * @rq: request to update
1275 * @page: page backing the payload
1276 * @len: length of the payload.
1277 *
1278 * This allows to later add a payload to an already submitted request by
1279 * a block driver. The driver needs to take care of freeing the payload
1280 * itself.
1281 *
1282 * Note that this is a quite horrible hack and nothing but handling of
1283 * discard requests should ever use it.
1284 */
1285void blk_add_request_payload(struct request *rq, struct page *page,
1286 unsigned int len)
1287{
1288 struct bio *bio = rq->bio;
1289
1290 bio->bi_io_vec->bv_page = page;
1291 bio->bi_io_vec->bv_offset = 0;
1292 bio->bi_io_vec->bv_len = len;
1293
1294 bio->bi_size = len;
1295 bio->bi_vcnt = 1;
1296 bio->bi_phys_segments = 1;
1297
1298 rq->__data_len = rq->resid_len = len;
1299 rq->nr_phys_segments = 1;
1300 rq->buffer = bio_data(bio);
1301}
1302EXPORT_SYMBOL_GPL(blk_add_request_payload);
1303
1304static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1305 struct bio *bio)
1306{
1307 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1308
1309 if (!ll_back_merge_fn(q, req, bio))
1310 return false;
1311
1312 trace_block_bio_backmerge(q, bio);
1313
1314 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1315 blk_rq_set_mixed_merge(req);
1316
1317 req->biotail->bi_next = bio;
1318 req->biotail = bio;
1319 req->__data_len += bio->bi_size;
1320 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1321
1322 drive_stat_acct(req, 0);
1323 return true;
1324}
1325
1326static bool bio_attempt_front_merge(struct request_queue *q,
1327 struct request *req, struct bio *bio)
1328{
1329 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1330
1331 if (!ll_front_merge_fn(q, req, bio))
1332 return false;
1333
1334 trace_block_bio_frontmerge(q, bio);
1335
1336 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1337 blk_rq_set_mixed_merge(req);
1338
1339 bio->bi_next = req->bio;
1340 req->bio = bio;
1341
1342 /*
1343 * may not be valid. if the low level driver said
1344 * it didn't need a bounce buffer then it better
1345 * not touch req->buffer either...
1346 */
1347 req->buffer = bio_data(bio);
1348 req->__sector = bio->bi_sector;
1349 req->__data_len += bio->bi_size;
1350 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1351
1352 drive_stat_acct(req, 0);
1353 return true;
1354}
1355
1356/**
1357 * attempt_plug_merge - try to merge with %current's plugged list
1358 * @q: request_queue new bio is being queued at
1359 * @bio: new bio being queued
1360 * @request_count: out parameter for number of traversed plugged requests
1361 *
1362 * Determine whether @bio being queued on @q can be merged with a request
1363 * on %current's plugged list. Returns %true if merge was successful,
1364 * otherwise %false.
1365 *
1366 * Plugging coalesces IOs from the same issuer for the same purpose without
1367 * going through @q->queue_lock. As such it's more of an issuing mechanism
1368 * than scheduling, and the request, while may have elvpriv data, is not
1369 * added on the elevator at this point. In addition, we don't have
1370 * reliable access to the elevator outside queue lock. Only check basic
1371 * merging parameters without querying the elevator.
1372 */
1373static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1374 unsigned int *request_count)
1375{
1376 struct blk_plug *plug;
1377 struct request *rq;
1378 bool ret = false;
1379
1380 plug = current->plug;
1381 if (!plug)
1382 goto out;
1383 *request_count = 0;
1384
1385 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1386 int el_ret;
1387
1388 if (rq->q == q)
1389 (*request_count)++;
1390
1391 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1392 continue;
1393
1394 el_ret = blk_try_merge(rq, bio);
1395 if (el_ret == ELEVATOR_BACK_MERGE) {
1396 ret = bio_attempt_back_merge(q, rq, bio);
1397 if (ret)
1398 break;
1399 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1400 ret = bio_attempt_front_merge(q, rq, bio);
1401 if (ret)
1402 break;
1403 }
1404 }
1405out:
1406 return ret;
1407}
1408
1409void init_request_from_bio(struct request *req, struct bio *bio)
1410{
1411 req->cmd_type = REQ_TYPE_FS;
1412
1413 req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK;
1414 if (bio->bi_rw & REQ_RAHEAD)
1415 req->cmd_flags |= REQ_FAILFAST_MASK;
1416
1417 req->errors = 0;
1418 req->__sector = bio->bi_sector;
1419 req->ioprio = bio_prio(bio);
1420 blk_rq_bio_prep(req->q, req, bio);
1421}
1422
1423void blk_queue_bio(struct request_queue *q, struct bio *bio)
1424{
1425 const bool sync = !!(bio->bi_rw & REQ_SYNC);
1426 struct blk_plug *plug;
1427 int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
1428 struct request *req;
1429 unsigned int request_count = 0;
1430
1431 /*
1432 * low level driver can indicate that it wants pages above a
1433 * certain limit bounced to low memory (ie for highmem, or even
1434 * ISA dma in theory)
1435 */
1436 blk_queue_bounce(q, &bio);
1437
1438 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) {
1439 spin_lock_irq(q->queue_lock);
1440 where = ELEVATOR_INSERT_FLUSH;
1441 goto get_rq;
1442 }
1443
1444 /*
1445 * Check if we can merge with the plugged list before grabbing
1446 * any locks.
1447 */
1448 if (attempt_plug_merge(q, bio, &request_count))
1449 return;
1450
1451 spin_lock_irq(q->queue_lock);
1452
1453 el_ret = elv_merge(q, &req, bio);
1454 if (el_ret == ELEVATOR_BACK_MERGE) {
1455 if (bio_attempt_back_merge(q, req, bio)) {
1456 elv_bio_merged(q, req, bio);
1457 if (!attempt_back_merge(q, req))
1458 elv_merged_request(q, req, el_ret);
1459 goto out_unlock;
1460 }
1461 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1462 if (bio_attempt_front_merge(q, req, bio)) {
1463 elv_bio_merged(q, req, bio);
1464 if (!attempt_front_merge(q, req))
1465 elv_merged_request(q, req, el_ret);
1466 goto out_unlock;
1467 }
1468 }
1469
1470get_rq:
1471 /*
1472 * This sync check and mask will be re-done in init_request_from_bio(),
1473 * but we need to set it earlier to expose the sync flag to the
1474 * rq allocator and io schedulers.
1475 */
1476 rw_flags = bio_data_dir(bio);
1477 if (sync)
1478 rw_flags |= REQ_SYNC;
1479
1480 /*
1481 * Grab a free request. This is might sleep but can not fail.
1482 * Returns with the queue unlocked.
1483 */
1484 req = get_request_wait(q, rw_flags, bio);
1485 if (unlikely(!req)) {
1486 bio_endio(bio, -ENODEV); /* @q is dead */
1487 goto out_unlock;
1488 }
1489
1490 /*
1491 * After dropping the lock and possibly sleeping here, our request
1492 * may now be mergeable after it had proven unmergeable (above).
1493 * We don't worry about that case for efficiency. It won't happen
1494 * often, and the elevators are able to handle it.
1495 */
1496 init_request_from_bio(req, bio);
1497
1498 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1499 req->cpu = raw_smp_processor_id();
1500
1501 plug = current->plug;
1502 if (plug) {
1503 /*
1504 * If this is the first request added after a plug, fire
1505 * of a plug trace. If others have been added before, check
1506 * if we have multiple devices in this plug. If so, make a
1507 * note to sort the list before dispatch.
1508 */
1509 if (list_empty(&plug->list))
1510 trace_block_plug(q);
1511 else {
1512 if (!plug->should_sort) {
1513 struct request *__rq;
1514
1515 __rq = list_entry_rq(plug->list.prev);
1516 if (__rq->q != q)
1517 plug->should_sort = 1;
1518 }
1519 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1520 blk_flush_plug_list(plug, false);
1521 trace_block_plug(q);
1522 }
1523 }
1524 list_add_tail(&req->queuelist, &plug->list);
1525 drive_stat_acct(req, 1);
1526 } else {
1527 spin_lock_irq(q->queue_lock);
1528 add_acct_request(q, req, where);
1529 __blk_run_queue(q);
1530out_unlock:
1531 spin_unlock_irq(q->queue_lock);
1532 }
1533}
1534EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
1535
1536/*
1537 * If bio->bi_dev is a partition, remap the location
1538 */
1539static inline void blk_partition_remap(struct bio *bio)
1540{
1541 struct block_device *bdev = bio->bi_bdev;
1542
1543 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1544 struct hd_struct *p = bdev->bd_part;
1545
1546 bio->bi_sector += p->start_sect;
1547 bio->bi_bdev = bdev->bd_contains;
1548
1549 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1550 bdev->bd_dev,
1551 bio->bi_sector - p->start_sect);
1552 }
1553}
1554
1555static void handle_bad_sector(struct bio *bio)
1556{
1557 char b[BDEVNAME_SIZE];
1558
1559 printk(KERN_INFO "attempt to access beyond end of device\n");
1560 printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
1561 bdevname(bio->bi_bdev, b),
1562 bio->bi_rw,
1563 (unsigned long long)bio->bi_sector + bio_sectors(bio),
1564 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1565
1566 set_bit(BIO_EOF, &bio->bi_flags);
1567}
1568
1569#ifdef CONFIG_FAIL_MAKE_REQUEST
1570
1571static DECLARE_FAULT_ATTR(fail_make_request);
1572
1573static int __init setup_fail_make_request(char *str)
1574{
1575 return setup_fault_attr(&fail_make_request, str);
1576}
1577__setup("fail_make_request=", setup_fail_make_request);
1578
1579static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1580{
1581 return part->make_it_fail && should_fail(&fail_make_request, bytes);
1582}
1583
1584static int __init fail_make_request_debugfs(void)
1585{
1586 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1587 NULL, &fail_make_request);
1588
1589 return IS_ERR(dir) ? PTR_ERR(dir) : 0;
1590}
1591
1592late_initcall(fail_make_request_debugfs);
1593
1594#else /* CONFIG_FAIL_MAKE_REQUEST */
1595
1596static inline bool should_fail_request(struct hd_struct *part,
1597 unsigned int bytes)
1598{
1599 return false;
1600}
1601
1602#endif /* CONFIG_FAIL_MAKE_REQUEST */
1603
1604/*
1605 * Check whether this bio extends beyond the end of the device.
1606 */
1607static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1608{
1609 sector_t maxsector;
1610
1611 if (!nr_sectors)
1612 return 0;
1613
1614 /* Test device or partition size, when known. */
1615 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1616 if (maxsector) {
1617 sector_t sector = bio->bi_sector;
1618
1619 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1620 /*
1621 * This may well happen - the kernel calls bread()
1622 * without checking the size of the device, e.g., when
1623 * mounting a device.
1624 */
1625 handle_bad_sector(bio);
1626 return 1;
1627 }
1628 }
1629
1630 return 0;
1631}
1632
1633static noinline_for_stack bool
1634generic_make_request_checks(struct bio *bio)
1635{
1636 struct request_queue *q;
1637 int nr_sectors = bio_sectors(bio);
1638 int err = -EIO;
1639 char b[BDEVNAME_SIZE];
1640 struct hd_struct *part;
1641
1642 might_sleep();
1643
1644 if (bio_check_eod(bio, nr_sectors))
1645 goto end_io;
1646
1647 q = bdev_get_queue(bio->bi_bdev);
1648 if (unlikely(!q)) {
1649 printk(KERN_ERR
1650 "generic_make_request: Trying to access "
1651 "nonexistent block-device %s (%Lu)\n",
1652 bdevname(bio->bi_bdev, b),
1653 (long long) bio->bi_sector);
1654 goto end_io;
1655 }
1656
1657 if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
1658 nr_sectors > queue_max_hw_sectors(q))) {
1659 printk(KERN_ERR "bio too big device %s (%u > %u)\n",
1660 bdevname(bio->bi_bdev, b),
1661 bio_sectors(bio),
1662 queue_max_hw_sectors(q));
1663 goto end_io;
1664 }
1665
1666 part = bio->bi_bdev->bd_part;
1667 if (should_fail_request(part, bio->bi_size) ||
1668 should_fail_request(&part_to_disk(part)->part0,
1669 bio->bi_size))
1670 goto end_io;
1671
1672 /*
1673 * If this device has partitions, remap block n
1674 * of partition p to block n+start(p) of the disk.
1675 */
1676 blk_partition_remap(bio);
1677
1678 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio))
1679 goto end_io;
1680
1681 if (bio_check_eod(bio, nr_sectors))
1682 goto end_io;
1683
1684 /*
1685 * Filter flush bio's early so that make_request based
1686 * drivers without flush support don't have to worry
1687 * about them.
1688 */
1689 if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
1690 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
1691 if (!nr_sectors) {
1692 err = 0;
1693 goto end_io;
1694 }
1695 }
1696
1697 if ((bio->bi_rw & REQ_DISCARD) &&
1698 (!blk_queue_discard(q) ||
1699 ((bio->bi_rw & REQ_SECURE) &&
1700 !blk_queue_secdiscard(q)))) {
1701 err = -EOPNOTSUPP;
1702 goto end_io;
1703 }
1704
1705 if (blk_throtl_bio(q, bio))
1706 return false; /* throttled, will be resubmitted later */
1707
1708 trace_block_bio_queue(q, bio);
1709 return true;
1710
1711end_io:
1712 bio_endio(bio, err);
1713 return false;
1714}
1715
1716/**
1717 * generic_make_request - hand a buffer to its device driver for I/O
1718 * @bio: The bio describing the location in memory and on the device.
1719 *
1720 * generic_make_request() is used to make I/O requests of block
1721 * devices. It is passed a &struct bio, which describes the I/O that needs
1722 * to be done.
1723 *
1724 * generic_make_request() does not return any status. The
1725 * success/failure status of the request, along with notification of
1726 * completion, is delivered asynchronously through the bio->bi_end_io
1727 * function described (one day) else where.
1728 *
1729 * The caller of generic_make_request must make sure that bi_io_vec
1730 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1731 * set to describe the device address, and the
1732 * bi_end_io and optionally bi_private are set to describe how
1733 * completion notification should be signaled.
1734 *
1735 * generic_make_request and the drivers it calls may use bi_next if this
1736 * bio happens to be merged with someone else, and may resubmit the bio to
1737 * a lower device by calling into generic_make_request recursively, which
1738 * means the bio should NOT be touched after the call to ->make_request_fn.
1739 */
1740void generic_make_request(struct bio *bio)
1741{
1742 struct bio_list bio_list_on_stack;
1743
1744 if (!generic_make_request_checks(bio))
1745 return;
1746
1747 /*
1748 * We only want one ->make_request_fn to be active at a time, else
1749 * stack usage with stacked devices could be a problem. So use
1750 * current->bio_list to keep a list of requests submited by a
1751 * make_request_fn function. current->bio_list is also used as a
1752 * flag to say if generic_make_request is currently active in this
1753 * task or not. If it is NULL, then no make_request is active. If
1754 * it is non-NULL, then a make_request is active, and new requests
1755 * should be added at the tail
1756 */
1757 if (current->bio_list) {
1758 bio_list_add(current->bio_list, bio);
1759 return;
1760 }
1761
1762 /* following loop may be a bit non-obvious, and so deserves some
1763 * explanation.
1764 * Before entering the loop, bio->bi_next is NULL (as all callers
1765 * ensure that) so we have a list with a single bio.
1766 * We pretend that we have just taken it off a longer list, so
1767 * we assign bio_list to a pointer to the bio_list_on_stack,
1768 * thus initialising the bio_list of new bios to be
1769 * added. ->make_request() may indeed add some more bios
1770 * through a recursive call to generic_make_request. If it
1771 * did, we find a non-NULL value in bio_list and re-enter the loop
1772 * from the top. In this case we really did just take the bio
1773 * of the top of the list (no pretending) and so remove it from
1774 * bio_list, and call into ->make_request() again.
1775 */
1776 BUG_ON(bio->bi_next);
1777 bio_list_init(&bio_list_on_stack);
1778 current->bio_list = &bio_list_on_stack;
1779 do {
1780 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
1781
1782 q->make_request_fn(q, bio);
1783
1784 bio = bio_list_pop(current->bio_list);
1785 } while (bio);
1786 current->bio_list = NULL; /* deactivate */
1787}
1788EXPORT_SYMBOL(generic_make_request);
1789
1790/**
1791 * submit_bio - submit a bio to the block device layer for I/O
1792 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
1793 * @bio: The &struct bio which describes the I/O
1794 *
1795 * submit_bio() is very similar in purpose to generic_make_request(), and
1796 * uses that function to do most of the work. Both are fairly rough
1797 * interfaces; @bio must be presetup and ready for I/O.
1798 *
1799 */
1800void submit_bio(int rw, struct bio *bio)
1801{
1802 int count = bio_sectors(bio);
1803
1804 bio->bi_rw |= rw;
1805
1806 /*
1807 * If it's a regular read/write or a barrier with data attached,
1808 * go through the normal accounting stuff before submission.
1809 */
1810 if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
1811 if (rw & WRITE) {
1812 count_vm_events(PGPGOUT, count);
1813 } else {
1814 task_io_account_read(bio->bi_size);
1815 count_vm_events(PGPGIN, count);
1816 }
1817
1818 if (unlikely(block_dump)) {
1819 char b[BDEVNAME_SIZE];
1820 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1821 current->comm, task_pid_nr(current),
1822 (rw & WRITE) ? "WRITE" : "READ",
1823 (unsigned long long)bio->bi_sector,
1824 bdevname(bio->bi_bdev, b),
1825 count);
1826 }
1827 }
1828
1829 generic_make_request(bio);
1830}
1831EXPORT_SYMBOL(submit_bio);
1832
1833/**
1834 * blk_rq_check_limits - Helper function to check a request for the queue limit
1835 * @q: the queue
1836 * @rq: the request being checked
1837 *
1838 * Description:
1839 * @rq may have been made based on weaker limitations of upper-level queues
1840 * in request stacking drivers, and it may violate the limitation of @q.
1841 * Since the block layer and the underlying device driver trust @rq
1842 * after it is inserted to @q, it should be checked against @q before
1843 * the insertion using this generic function.
1844 *
1845 * This function should also be useful for request stacking drivers
1846 * in some cases below, so export this function.
1847 * Request stacking drivers like request-based dm may change the queue
1848 * limits while requests are in the queue (e.g. dm's table swapping).
1849 * Such request stacking drivers should check those requests agaist
1850 * the new queue limits again when they dispatch those requests,
1851 * although such checkings are also done against the old queue limits
1852 * when submitting requests.
1853 */
1854int blk_rq_check_limits(struct request_queue *q, struct request *rq)
1855{
1856 if (rq->cmd_flags & REQ_DISCARD)
1857 return 0;
1858
1859 if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
1860 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
1861 printk(KERN_ERR "%s: over max size limit.\n", __func__);
1862 return -EIO;
1863 }
1864
1865 /*
1866 * queue's settings related to segment counting like q->bounce_pfn
1867 * may differ from that of other stacking queues.
1868 * Recalculate it to check the request correctly on this queue's
1869 * limitation.
1870 */
1871 blk_recalc_rq_segments(rq);
1872 if (rq->nr_phys_segments > queue_max_segments(q)) {
1873 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
1874 return -EIO;
1875 }
1876
1877 return 0;
1878}
1879EXPORT_SYMBOL_GPL(blk_rq_check_limits);
1880
1881/**
1882 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
1883 * @q: the queue to submit the request
1884 * @rq: the request being queued
1885 */
1886int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1887{
1888 unsigned long flags;
1889 int where = ELEVATOR_INSERT_BACK;
1890
1891 if (blk_rq_check_limits(q, rq))
1892 return -EIO;
1893
1894 if (rq->rq_disk &&
1895 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
1896 return -EIO;
1897
1898 spin_lock_irqsave(q->queue_lock, flags);
1899 if (unlikely(blk_queue_dead(q))) {
1900 spin_unlock_irqrestore(q->queue_lock, flags);
1901 return -ENODEV;
1902 }
1903
1904 /*
1905 * Submitting request must be dequeued before calling this function
1906 * because it will be linked to another request_queue
1907 */
1908 BUG_ON(blk_queued_rq(rq));
1909
1910 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA))
1911 where = ELEVATOR_INSERT_FLUSH;
1912
1913 add_acct_request(q, rq, where);
1914 if (where == ELEVATOR_INSERT_FLUSH)
1915 __blk_run_queue(q);
1916 spin_unlock_irqrestore(q->queue_lock, flags);
1917
1918 return 0;
1919}
1920EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1921
1922/**
1923 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
1924 * @rq: request to examine
1925 *
1926 * Description:
1927 * A request could be merge of IOs which require different failure
1928 * handling. This function determines the number of bytes which
1929 * can be failed from the beginning of the request without
1930 * crossing into area which need to be retried further.
1931 *
1932 * Return:
1933 * The number of bytes to fail.
1934 *
1935 * Context:
1936 * queue_lock must be held.
1937 */
1938unsigned int blk_rq_err_bytes(const struct request *rq)
1939{
1940 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
1941 unsigned int bytes = 0;
1942 struct bio *bio;
1943
1944 if (!(rq->cmd_flags & REQ_MIXED_MERGE))
1945 return blk_rq_bytes(rq);
1946
1947 /*
1948 * Currently the only 'mixing' which can happen is between
1949 * different fastfail types. We can safely fail portions
1950 * which have all the failfast bits that the first one has -
1951 * the ones which are at least as eager to fail as the first
1952 * one.
1953 */
1954 for (bio = rq->bio; bio; bio = bio->bi_next) {
1955 if ((bio->bi_rw & ff) != ff)
1956 break;
1957 bytes += bio->bi_size;
1958 }
1959
1960 /* this could lead to infinite loop */
1961 BUG_ON(blk_rq_bytes(rq) && !bytes);
1962 return bytes;
1963}
1964EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1965
1966static void blk_account_io_completion(struct request *req, unsigned int bytes)
1967{
1968 if (blk_do_io_stat(req)) {
1969 const int rw = rq_data_dir(req);
1970 struct hd_struct *part;
1971 int cpu;
1972
1973 cpu = part_stat_lock();
1974 part = req->part;
1975 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
1976 part_stat_unlock();
1977 }
1978}
1979
1980static void blk_account_io_done(struct request *req)
1981{
1982 /*
1983 * Account IO completion. flush_rq isn't accounted as a
1984 * normal IO on queueing nor completion. Accounting the
1985 * containing request is enough.
1986 */
1987 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
1988 unsigned long duration = jiffies - req->start_time;
1989 const int rw = rq_data_dir(req);
1990 struct hd_struct *part;
1991 int cpu;
1992
1993 cpu = part_stat_lock();
1994 part = req->part;
1995
1996 part_stat_inc(cpu, part, ios[rw]);
1997 part_stat_add(cpu, part, ticks[rw], duration);
1998 part_round_stats(cpu, part);
1999 part_dec_in_flight(part, rw);
2000
2001 hd_struct_put(part);
2002 part_stat_unlock();
2003 }
2004}
2005
2006/**
2007 * blk_peek_request - peek at the top of a request queue
2008 * @q: request queue to peek at
2009 *
2010 * Description:
2011 * Return the request at the top of @q. The returned request
2012 * should be started using blk_start_request() before LLD starts
2013 * processing it.
2014 *
2015 * Return:
2016 * Pointer to the request at the top of @q if available. Null
2017 * otherwise.
2018 *
2019 * Context:
2020 * queue_lock must be held.
2021 */
2022struct request *blk_peek_request(struct request_queue *q)
2023{
2024 struct request *rq;
2025 int ret;
2026
2027 while ((rq = __elv_next_request(q)) != NULL) {
2028 if (!(rq->cmd_flags & REQ_STARTED)) {
2029 /*
2030 * This is the first time the device driver
2031 * sees this request (possibly after
2032 * requeueing). Notify IO scheduler.
2033 */
2034 if (rq->cmd_flags & REQ_SORTED)
2035 elv_activate_rq(q, rq);
2036
2037 /*
2038 * just mark as started even if we don't start
2039 * it, a request that has been delayed should
2040 * not be passed by new incoming requests
2041 */
2042 rq->cmd_flags |= REQ_STARTED;
2043 trace_block_rq_issue(q, rq);
2044 }
2045
2046 if (!q->boundary_rq || q->boundary_rq == rq) {
2047 q->end_sector = rq_end_sector(rq);
2048 q->boundary_rq = NULL;
2049 }
2050
2051 if (rq->cmd_flags & REQ_DONTPREP)
2052 break;
2053
2054 if (q->dma_drain_size && blk_rq_bytes(rq)) {
2055 /*
2056 * make sure space for the drain appears we
2057 * know we can do this because max_hw_segments
2058 * has been adjusted to be one fewer than the
2059 * device can handle
2060 */
2061 rq->nr_phys_segments++;
2062 }
2063
2064 if (!q->prep_rq_fn)
2065 break;
2066
2067 ret = q->prep_rq_fn(q, rq);
2068 if (ret == BLKPREP_OK) {
2069 break;
2070 } else if (ret == BLKPREP_DEFER) {
2071 /*
2072 * the request may have been (partially) prepped.
2073 * we need to keep this request in the front to
2074 * avoid resource deadlock. REQ_STARTED will
2075 * prevent other fs requests from passing this one.
2076 */
2077 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2078 !(rq->cmd_flags & REQ_DONTPREP)) {
2079 /*
2080 * remove the space for the drain we added
2081 * so that we don't add it again
2082 */
2083 --rq->nr_phys_segments;
2084 }
2085
2086 rq = NULL;
2087 break;
2088 } else if (ret == BLKPREP_KILL) {
2089 rq->cmd_flags |= REQ_QUIET;
2090 /*
2091 * Mark this request as started so we don't trigger
2092 * any debug logic in the end I/O path.
2093 */
2094 blk_start_request(rq);
2095 __blk_end_request_all(rq, -EIO);
2096 } else {
2097 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2098 break;
2099 }
2100 }
2101
2102 return rq;
2103}
2104EXPORT_SYMBOL(blk_peek_request);
2105
2106void blk_dequeue_request(struct request *rq)
2107{
2108 struct request_queue *q = rq->q;
2109
2110 BUG_ON(list_empty(&rq->queuelist));
2111 BUG_ON(ELV_ON_HASH(rq));
2112
2113 list_del_init(&rq->queuelist);
2114
2115 /*
2116 * the time frame between a request being removed from the lists
2117 * and to it is freed is accounted as io that is in progress at
2118 * the driver side.
2119 */
2120 if (blk_account_rq(rq)) {
2121 q->in_flight[rq_is_sync(rq)]++;
2122 set_io_start_time_ns(rq);
2123 }
2124}
2125
2126/**
2127 * blk_start_request - start request processing on the driver
2128 * @req: request to dequeue
2129 *
2130 * Description:
2131 * Dequeue @req and start timeout timer on it. This hands off the
2132 * request to the driver.
2133 *
2134 * Block internal functions which don't want to start timer should
2135 * call blk_dequeue_request().
2136 *
2137 * Context:
2138 * queue_lock must be held.
2139 */
2140void blk_start_request(struct request *req)
2141{
2142 blk_dequeue_request(req);
2143
2144 /*
2145 * We are now handing the request to the hardware, initialize
2146 * resid_len to full count and add the timeout handler.
2147 */
2148 req->resid_len = blk_rq_bytes(req);
2149 if (unlikely(blk_bidi_rq(req)))
2150 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2151
2152 blk_add_timer(req);
2153}
2154EXPORT_SYMBOL(blk_start_request);
2155
2156/**
2157 * blk_fetch_request - fetch a request from a request queue
2158 * @q: request queue to fetch a request from
2159 *
2160 * Description:
2161 * Return the request at the top of @q. The request is started on
2162 * return and LLD can start processing it immediately.
2163 *
2164 * Return:
2165 * Pointer to the request at the top of @q if available. Null
2166 * otherwise.
2167 *
2168 * Context:
2169 * queue_lock must be held.
2170 */
2171struct request *blk_fetch_request(struct request_queue *q)
2172{
2173 struct request *rq;
2174
2175 rq = blk_peek_request(q);
2176 if (rq)
2177 blk_start_request(rq);
2178 return rq;
2179}
2180EXPORT_SYMBOL(blk_fetch_request);
2181
2182/**
2183 * blk_update_request - Special helper function for request stacking drivers
2184 * @req: the request being processed
2185 * @error: %0 for success, < %0 for error
2186 * @nr_bytes: number of bytes to complete @req
2187 *
2188 * Description:
2189 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2190 * the request structure even if @req doesn't have leftover.
2191 * If @req has leftover, sets it up for the next range of segments.
2192 *
2193 * This special helper function is only for request stacking drivers
2194 * (e.g. request-based dm) so that they can handle partial completion.
2195 * Actual device drivers should use blk_end_request instead.
2196 *
2197 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2198 * %false return from this function.
2199 *
2200 * Return:
2201 * %false - this request doesn't have any more data
2202 * %true - this request has more data
2203 **/
2204bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2205{
2206 int total_bytes, bio_nbytes, next_idx = 0;
2207 struct bio *bio;
2208
2209 if (!req->bio)
2210 return false;
2211
2212 trace_block_rq_complete(req->q, req);
2213
2214 /*
2215 * For fs requests, rq is just carrier of independent bio's
2216 * and each partial completion should be handled separately.
2217 * Reset per-request error on each partial completion.
2218 *
2219 * TODO: tj: This is too subtle. It would be better to let
2220 * low level drivers do what they see fit.
2221 */
2222 if (req->cmd_type == REQ_TYPE_FS)
2223 req->errors = 0;
2224
2225 if (error && req->cmd_type == REQ_TYPE_FS &&
2226 !(req->cmd_flags & REQ_QUIET)) {
2227 char *error_type;
2228
2229 switch (error) {
2230 case -ENOLINK:
2231 error_type = "recoverable transport";
2232 break;
2233 case -EREMOTEIO:
2234 error_type = "critical target";
2235 break;
2236 case -EBADE:
2237 error_type = "critical nexus";
2238 break;
2239 case -EIO:
2240 default:
2241 error_type = "I/O";
2242 break;
2243 }
2244 printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n",
2245 error_type, req->rq_disk ? req->rq_disk->disk_name : "?",
2246 (unsigned long long)blk_rq_pos(req));
2247 }
2248
2249 blk_account_io_completion(req, nr_bytes);
2250
2251 total_bytes = bio_nbytes = 0;
2252 while ((bio = req->bio) != NULL) {
2253 int nbytes;
2254
2255 if (nr_bytes >= bio->bi_size) {
2256 req->bio = bio->bi_next;
2257 nbytes = bio->bi_size;
2258 req_bio_endio(req, bio, nbytes, error);
2259 next_idx = 0;
2260 bio_nbytes = 0;
2261 } else {
2262 int idx = bio->bi_idx + next_idx;
2263
2264 if (unlikely(idx >= bio->bi_vcnt)) {
2265 blk_dump_rq_flags(req, "__end_that");
2266 printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
2267 __func__, idx, bio->bi_vcnt);
2268 break;
2269 }
2270
2271 nbytes = bio_iovec_idx(bio, idx)->bv_len;
2272 BIO_BUG_ON(nbytes > bio->bi_size);
2273
2274 /*
2275 * not a complete bvec done
2276 */
2277 if (unlikely(nbytes > nr_bytes)) {
2278 bio_nbytes += nr_bytes;
2279 total_bytes += nr_bytes;
2280 break;
2281 }
2282
2283 /*
2284 * advance to the next vector
2285 */
2286 next_idx++;
2287 bio_nbytes += nbytes;
2288 }
2289
2290 total_bytes += nbytes;
2291 nr_bytes -= nbytes;
2292
2293 bio = req->bio;
2294 if (bio) {
2295 /*
2296 * end more in this run, or just return 'not-done'
2297 */
2298 if (unlikely(nr_bytes <= 0))
2299 break;
2300 }
2301 }
2302
2303 /*
2304 * completely done
2305 */
2306 if (!req->bio) {
2307 /*
2308 * Reset counters so that the request stacking driver
2309 * can find how many bytes remain in the request
2310 * later.
2311 */
2312 req->__data_len = 0;
2313 return false;
2314 }
2315
2316 /*
2317 * if the request wasn't completed, update state
2318 */
2319 if (bio_nbytes) {
2320 req_bio_endio(req, bio, bio_nbytes, error);
2321 bio->bi_idx += next_idx;
2322 bio_iovec(bio)->bv_offset += nr_bytes;
2323 bio_iovec(bio)->bv_len -= nr_bytes;
2324 }
2325
2326 req->__data_len -= total_bytes;
2327 req->buffer = bio_data(req->bio);
2328
2329 /* update sector only for requests with clear definition of sector */
2330 if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
2331 req->__sector += total_bytes >> 9;
2332
2333 /* mixed attributes always follow the first bio */
2334 if (req->cmd_flags & REQ_MIXED_MERGE) {
2335 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2336 req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK;
2337 }
2338
2339 /*
2340 * If total number of sectors is less than the first segment
2341 * size, something has gone terribly wrong.
2342 */
2343 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2344 blk_dump_rq_flags(req, "request botched");
2345 req->__data_len = blk_rq_cur_bytes(req);
2346 }
2347
2348 /* recalculate the number of segments */
2349 blk_recalc_rq_segments(req);
2350
2351 return true;
2352}
2353EXPORT_SYMBOL_GPL(blk_update_request);
2354
2355static bool blk_update_bidi_request(struct request *rq, int error,
2356 unsigned int nr_bytes,
2357 unsigned int bidi_bytes)
2358{
2359 if (blk_update_request(rq, error, nr_bytes))
2360 return true;
2361
2362 /* Bidi request must be completed as a whole */
2363 if (unlikely(blk_bidi_rq(rq)) &&
2364 blk_update_request(rq->next_rq, error, bidi_bytes))
2365 return true;
2366
2367 if (blk_queue_add_random(rq->q))
2368 add_disk_randomness(rq->rq_disk);
2369
2370 return false;
2371}
2372
2373/**
2374 * blk_unprep_request - unprepare a request
2375 * @req: the request
2376 *
2377 * This function makes a request ready for complete resubmission (or
2378 * completion). It happens only after all error handling is complete,
2379 * so represents the appropriate moment to deallocate any resources
2380 * that were allocated to the request in the prep_rq_fn. The queue
2381 * lock is held when calling this.
2382 */
2383void blk_unprep_request(struct request *req)
2384{
2385 struct request_queue *q = req->q;
2386
2387 req->cmd_flags &= ~REQ_DONTPREP;
2388 if (q->unprep_rq_fn)
2389 q->unprep_rq_fn(q, req);
2390}
2391EXPORT_SYMBOL_GPL(blk_unprep_request);
2392
2393/*
2394 * queue lock must be held
2395 */
2396static void blk_finish_request(struct request *req, int error)
2397{
2398 if (blk_rq_tagged(req))
2399 blk_queue_end_tag(req->q, req);
2400
2401 BUG_ON(blk_queued_rq(req));
2402
2403 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2404 laptop_io_completion(&req->q->backing_dev_info);
2405
2406 blk_delete_timer(req);
2407
2408 if (req->cmd_flags & REQ_DONTPREP)
2409 blk_unprep_request(req);
2410
2411
2412 blk_account_io_done(req);
2413
2414 if (req->end_io)
2415 req->end_io(req, error);
2416 else {
2417 if (blk_bidi_rq(req))
2418 __blk_put_request(req->next_rq->q, req->next_rq);
2419
2420 __blk_put_request(req->q, req);
2421 }
2422}
2423
2424/**
2425 * blk_end_bidi_request - Complete a bidi request
2426 * @rq: the request to complete
2427 * @error: %0 for success, < %0 for error
2428 * @nr_bytes: number of bytes to complete @rq
2429 * @bidi_bytes: number of bytes to complete @rq->next_rq
2430 *
2431 * Description:
2432 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2433 * Drivers that supports bidi can safely call this member for any
2434 * type of request, bidi or uni. In the later case @bidi_bytes is
2435 * just ignored.
2436 *
2437 * Return:
2438 * %false - we are done with this request
2439 * %true - still buffers pending for this request
2440 **/
2441static bool blk_end_bidi_request(struct request *rq, int error,
2442 unsigned int nr_bytes, unsigned int bidi_bytes)
2443{
2444 struct request_queue *q = rq->q;
2445 unsigned long flags;
2446
2447 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2448 return true;
2449
2450 spin_lock_irqsave(q->queue_lock, flags);
2451 blk_finish_request(rq, error);
2452 spin_unlock_irqrestore(q->queue_lock, flags);
2453
2454 return false;
2455}
2456
2457/**
2458 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2459 * @rq: the request to complete
2460 * @error: %0 for success, < %0 for error
2461 * @nr_bytes: number of bytes to complete @rq
2462 * @bidi_bytes: number of bytes to complete @rq->next_rq
2463 *
2464 * Description:
2465 * Identical to blk_end_bidi_request() except that queue lock is
2466 * assumed to be locked on entry and remains so on return.
2467 *
2468 * Return:
2469 * %false - we are done with this request
2470 * %true - still buffers pending for this request
2471 **/
2472bool __blk_end_bidi_request(struct request *rq, int error,
2473 unsigned int nr_bytes, unsigned int bidi_bytes)
2474{
2475 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2476 return true;
2477
2478 blk_finish_request(rq, error);
2479
2480 return false;
2481}
2482
2483/**
2484 * blk_end_request - Helper function for drivers to complete the request.
2485 * @rq: the request being processed
2486 * @error: %0 for success, < %0 for error
2487 * @nr_bytes: number of bytes to complete
2488 *
2489 * Description:
2490 * Ends I/O on a number of bytes attached to @rq.
2491 * If @rq has leftover, sets it up for the next range of segments.
2492 *
2493 * Return:
2494 * %false - we are done with this request
2495 * %true - still buffers pending for this request
2496 **/
2497bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2498{
2499 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2500}
2501EXPORT_SYMBOL(blk_end_request);
2502
2503/**
2504 * blk_end_request_all - Helper function for drives to finish the request.
2505 * @rq: the request to finish
2506 * @error: %0 for success, < %0 for error
2507 *
2508 * Description:
2509 * Completely finish @rq.
2510 */
2511void blk_end_request_all(struct request *rq, int error)
2512{
2513 bool pending;
2514 unsigned int bidi_bytes = 0;
2515
2516 if (unlikely(blk_bidi_rq(rq)))
2517 bidi_bytes = blk_rq_bytes(rq->next_rq);
2518
2519 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2520 BUG_ON(pending);
2521}
2522EXPORT_SYMBOL(blk_end_request_all);
2523
2524/**
2525 * blk_end_request_cur - Helper function to finish the current request chunk.
2526 * @rq: the request to finish the current chunk for
2527 * @error: %0 for success, < %0 for error
2528 *
2529 * Description:
2530 * Complete the current consecutively mapped chunk from @rq.
2531 *
2532 * Return:
2533 * %false - we are done with this request
2534 * %true - still buffers pending for this request
2535 */
2536bool blk_end_request_cur(struct request *rq, int error)
2537{
2538 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2539}
2540EXPORT_SYMBOL(blk_end_request_cur);
2541
2542/**
2543 * blk_end_request_err - Finish a request till the next failure boundary.
2544 * @rq: the request to finish till the next failure boundary for
2545 * @error: must be negative errno
2546 *
2547 * Description:
2548 * Complete @rq till the next failure boundary.
2549 *
2550 * Return:
2551 * %false - we are done with this request
2552 * %true - still buffers pending for this request
2553 */
2554bool blk_end_request_err(struct request *rq, int error)
2555{
2556 WARN_ON(error >= 0);
2557 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2558}
2559EXPORT_SYMBOL_GPL(blk_end_request_err);
2560
2561/**
2562 * __blk_end_request - Helper function for drivers to complete the request.
2563 * @rq: the request being processed
2564 * @error: %0 for success, < %0 for error
2565 * @nr_bytes: number of bytes to complete
2566 *
2567 * Description:
2568 * Must be called with queue lock held unlike blk_end_request().
2569 *
2570 * Return:
2571 * %false - we are done with this request
2572 * %true - still buffers pending for this request
2573 **/
2574bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2575{
2576 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2577}
2578EXPORT_SYMBOL(__blk_end_request);
2579
2580/**
2581 * __blk_end_request_all - Helper function for drives to finish the request.
2582 * @rq: the request to finish
2583 * @error: %0 for success, < %0 for error
2584 *
2585 * Description:
2586 * Completely finish @rq. Must be called with queue lock held.
2587 */
2588void __blk_end_request_all(struct request *rq, int error)
2589{
2590 bool pending;
2591 unsigned int bidi_bytes = 0;
2592
2593 if (unlikely(blk_bidi_rq(rq)))
2594 bidi_bytes = blk_rq_bytes(rq->next_rq);
2595
2596 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2597 BUG_ON(pending);
2598}
2599EXPORT_SYMBOL(__blk_end_request_all);
2600
2601/**
2602 * __blk_end_request_cur - Helper function to finish the current request chunk.
2603 * @rq: the request to finish the current chunk for
2604 * @error: %0 for success, < %0 for error
2605 *
2606 * Description:
2607 * Complete the current consecutively mapped chunk from @rq. Must
2608 * be called with queue lock held.
2609 *
2610 * Return:
2611 * %false - we are done with this request
2612 * %true - still buffers pending for this request
2613 */
2614bool __blk_end_request_cur(struct request *rq, int error)
2615{
2616 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2617}
2618EXPORT_SYMBOL(__blk_end_request_cur);
2619
2620/**
2621 * __blk_end_request_err - Finish a request till the next failure boundary.
2622 * @rq: the request to finish till the next failure boundary for
2623 * @error: must be negative errno
2624 *
2625 * Description:
2626 * Complete @rq till the next failure boundary. Must be called
2627 * with queue lock held.
2628 *
2629 * Return:
2630 * %false - we are done with this request
2631 * %true - still buffers pending for this request
2632 */
2633bool __blk_end_request_err(struct request *rq, int error)
2634{
2635 WARN_ON(error >= 0);
2636 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2637}
2638EXPORT_SYMBOL_GPL(__blk_end_request_err);
2639
2640void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2641 struct bio *bio)
2642{
2643 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
2644 rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
2645
2646 if (bio_has_data(bio)) {
2647 rq->nr_phys_segments = bio_phys_segments(q, bio);
2648 rq->buffer = bio_data(bio);
2649 }
2650 rq->__data_len = bio->bi_size;
2651 rq->bio = rq->biotail = bio;
2652
2653 if (bio->bi_bdev)
2654 rq->rq_disk = bio->bi_bdev->bd_disk;
2655}
2656
2657#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2658/**
2659 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2660 * @rq: the request to be flushed
2661 *
2662 * Description:
2663 * Flush all pages in @rq.
2664 */
2665void rq_flush_dcache_pages(struct request *rq)
2666{
2667 struct req_iterator iter;
2668 struct bio_vec *bvec;
2669
2670 rq_for_each_segment(bvec, rq, iter)
2671 flush_dcache_page(bvec->bv_page);
2672}
2673EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2674#endif
2675
2676/**
2677 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2678 * @q : the queue of the device being checked
2679 *
2680 * Description:
2681 * Check if underlying low-level drivers of a device are busy.
2682 * If the drivers want to export their busy state, they must set own
2683 * exporting function using blk_queue_lld_busy() first.
2684 *
2685 * Basically, this function is used only by request stacking drivers
2686 * to stop dispatching requests to underlying devices when underlying
2687 * devices are busy. This behavior helps more I/O merging on the queue
2688 * of the request stacking driver and prevents I/O throughput regression
2689 * on burst I/O load.
2690 *
2691 * Return:
2692 * 0 - Not busy (The request stacking driver should dispatch request)
2693 * 1 - Busy (The request stacking driver should stop dispatching request)
2694 */
2695int blk_lld_busy(struct request_queue *q)
2696{
2697 if (q->lld_busy_fn)
2698 return q->lld_busy_fn(q);
2699
2700 return 0;
2701}
2702EXPORT_SYMBOL_GPL(blk_lld_busy);
2703
2704/**
2705 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
2706 * @rq: the clone request to be cleaned up
2707 *
2708 * Description:
2709 * Free all bios in @rq for a cloned request.
2710 */
2711void blk_rq_unprep_clone(struct request *rq)
2712{
2713 struct bio *bio;
2714
2715 while ((bio = rq->bio) != NULL) {
2716 rq->bio = bio->bi_next;
2717
2718 bio_put(bio);
2719 }
2720}
2721EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
2722
2723/*
2724 * Copy attributes of the original request to the clone request.
2725 * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
2726 */
2727static void __blk_rq_prep_clone(struct request *dst, struct request *src)
2728{
2729 dst->cpu = src->cpu;
2730 dst->cmd_flags = (src->cmd_flags & REQ_CLONE_MASK) | REQ_NOMERGE;
2731 dst->cmd_type = src->cmd_type;
2732 dst->__sector = blk_rq_pos(src);
2733 dst->__data_len = blk_rq_bytes(src);
2734 dst->nr_phys_segments = src->nr_phys_segments;
2735 dst->ioprio = src->ioprio;
2736 dst->extra_len = src->extra_len;
2737}
2738
2739/**
2740 * blk_rq_prep_clone - Helper function to setup clone request
2741 * @rq: the request to be setup
2742 * @rq_src: original request to be cloned
2743 * @bs: bio_set that bios for clone are allocated from
2744 * @gfp_mask: memory allocation mask for bio
2745 * @bio_ctr: setup function to be called for each clone bio.
2746 * Returns %0 for success, non %0 for failure.
2747 * @data: private data to be passed to @bio_ctr
2748 *
2749 * Description:
2750 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
2751 * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
2752 * are not copied, and copying such parts is the caller's responsibility.
2753 * Also, pages which the original bios are pointing to are not copied
2754 * and the cloned bios just point same pages.
2755 * So cloned bios must be completed before original bios, which means
2756 * the caller must complete @rq before @rq_src.
2757 */
2758int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
2759 struct bio_set *bs, gfp_t gfp_mask,
2760 int (*bio_ctr)(struct bio *, struct bio *, void *),
2761 void *data)
2762{
2763 struct bio *bio, *bio_src;
2764
2765 if (!bs)
2766 bs = fs_bio_set;
2767
2768 blk_rq_init(NULL, rq);
2769
2770 __rq_for_each_bio(bio_src, rq_src) {
2771 bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
2772 if (!bio)
2773 goto free_and_out;
2774
2775 __bio_clone(bio, bio_src);
2776
2777 if (bio_integrity(bio_src) &&
2778 bio_integrity_clone(bio, bio_src, gfp_mask, bs))
2779 goto free_and_out;
2780
2781 if (bio_ctr && bio_ctr(bio, bio_src, data))
2782 goto free_and_out;
2783
2784 if (rq->bio) {
2785 rq->biotail->bi_next = bio;
2786 rq->biotail = bio;
2787 } else
2788 rq->bio = rq->biotail = bio;
2789 }
2790
2791 __blk_rq_prep_clone(rq, rq_src);
2792
2793 return 0;
2794
2795free_and_out:
2796 if (bio)
2797 bio_free(bio, bs);
2798 blk_rq_unprep_clone(rq);
2799
2800 return -ENOMEM;
2801}
2802EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
2803
2804int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2805{
2806 return queue_work(kblockd_workqueue, work);
2807}
2808EXPORT_SYMBOL(kblockd_schedule_work);
2809
2810int kblockd_schedule_delayed_work(struct request_queue *q,
2811 struct delayed_work *dwork, unsigned long delay)
2812{
2813 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2814}
2815EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2816
2817#define PLUG_MAGIC 0x91827364
2818
2819/**
2820 * blk_start_plug - initialize blk_plug and track it inside the task_struct
2821 * @plug: The &struct blk_plug that needs to be initialized
2822 *
2823 * Description:
2824 * Tracking blk_plug inside the task_struct will help with auto-flushing the
2825 * pending I/O should the task end up blocking between blk_start_plug() and
2826 * blk_finish_plug(). This is important from a performance perspective, but
2827 * also ensures that we don't deadlock. For instance, if the task is blocking
2828 * for a memory allocation, memory reclaim could end up wanting to free a
2829 * page belonging to that request that is currently residing in our private
2830 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
2831 * this kind of deadlock.
2832 */
2833void blk_start_plug(struct blk_plug *plug)
2834{
2835 struct task_struct *tsk = current;
2836
2837 plug->magic = PLUG_MAGIC;
2838 INIT_LIST_HEAD(&plug->list);
2839 INIT_LIST_HEAD(&plug->cb_list);
2840 plug->should_sort = 0;
2841
2842 /*
2843 * If this is a nested plug, don't actually assign it. It will be
2844 * flushed on its own.
2845 */
2846 if (!tsk->plug) {
2847 /*
2848 * Store ordering should not be needed here, since a potential
2849 * preempt will imply a full memory barrier
2850 */
2851 tsk->plug = plug;
2852 }
2853}
2854EXPORT_SYMBOL(blk_start_plug);
2855
2856static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
2857{
2858 struct request *rqa = container_of(a, struct request, queuelist);
2859 struct request *rqb = container_of(b, struct request, queuelist);
2860
2861 return !(rqa->q <= rqb->q);
2862}
2863
2864/*
2865 * If 'from_schedule' is true, then postpone the dispatch of requests
2866 * until a safe kblockd context. We due this to avoid accidental big
2867 * additional stack usage in driver dispatch, in places where the originally
2868 * plugger did not intend it.
2869 */
2870static void queue_unplugged(struct request_queue *q, unsigned int depth,
2871 bool from_schedule)
2872 __releases(q->queue_lock)
2873{
2874 trace_block_unplug(q, depth, !from_schedule);
2875
2876 /*
2877 * Don't mess with dead queue.
2878 */
2879 if (unlikely(blk_queue_dead(q))) {
2880 spin_unlock(q->queue_lock);
2881 return;
2882 }
2883
2884 /*
2885 * If we are punting this to kblockd, then we can safely drop
2886 * the queue_lock before waking kblockd (which needs to take
2887 * this lock).
2888 */
2889 if (from_schedule) {
2890 spin_unlock(q->queue_lock);
2891 blk_run_queue_async(q);
2892 } else {
2893 __blk_run_queue(q);
2894 spin_unlock(q->queue_lock);
2895 }
2896
2897}
2898
2899static void flush_plug_callbacks(struct blk_plug *plug)
2900{
2901 LIST_HEAD(callbacks);
2902
2903 if (list_empty(&plug->cb_list))
2904 return;
2905
2906 list_splice_init(&plug->cb_list, &callbacks);
2907
2908 while (!list_empty(&callbacks)) {
2909 struct blk_plug_cb *cb = list_first_entry(&callbacks,
2910 struct blk_plug_cb,
2911 list);
2912 list_del(&cb->list);
2913 cb->callback(cb);
2914 }
2915}
2916
2917void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
2918{
2919 struct request_queue *q;
2920 unsigned long flags;
2921 struct request *rq;
2922 LIST_HEAD(list);
2923 unsigned int depth;
2924
2925 BUG_ON(plug->magic != PLUG_MAGIC);
2926
2927 flush_plug_callbacks(plug);
2928 if (list_empty(&plug->list))
2929 return;
2930
2931 list_splice_init(&plug->list, &list);
2932
2933 if (plug->should_sort) {
2934 list_sort(NULL, &list, plug_rq_cmp);
2935 plug->should_sort = 0;
2936 }
2937
2938 q = NULL;
2939 depth = 0;
2940
2941 /*
2942 * Save and disable interrupts here, to avoid doing it for every
2943 * queue lock we have to take.
2944 */
2945 local_irq_save(flags);
2946 while (!list_empty(&list)) {
2947 rq = list_entry_rq(list.next);
2948 list_del_init(&rq->queuelist);
2949 BUG_ON(!rq->q);
2950 if (rq->q != q) {
2951 /*
2952 * This drops the queue lock
2953 */
2954 if (q)
2955 queue_unplugged(q, depth, from_schedule);
2956 q = rq->q;
2957 depth = 0;
2958 spin_lock(q->queue_lock);
2959 }
2960
2961 /*
2962 * Short-circuit if @q is dead
2963 */
2964 if (unlikely(blk_queue_dead(q))) {
2965 __blk_end_request_all(rq, -ENODEV);
2966 continue;
2967 }
2968
2969 /*
2970 * rq is already accounted, so use raw insert
2971 */
2972 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA))
2973 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
2974 else
2975 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
2976
2977 depth++;
2978 }
2979
2980 /*
2981 * This drops the queue lock
2982 */
2983 if (q)
2984 queue_unplugged(q, depth, from_schedule);
2985
2986 local_irq_restore(flags);
2987}
2988
2989void blk_finish_plug(struct blk_plug *plug)
2990{
2991 blk_flush_plug_list(plug, false);
2992
2993 if (plug == current->plug)
2994 current->plug = NULL;
2995}
2996EXPORT_SYMBOL(blk_finish_plug);
2997
2998int __init blk_dev_init(void)
2999{
3000 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
3001 sizeof(((struct request *)0)->cmd_flags));
3002
3003 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3004 kblockd_workqueue = alloc_workqueue("kblockd",
3005 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3006 if (!kblockd_workqueue)
3007 panic("Failed to create kblockd\n");
3008
3009 request_cachep = kmem_cache_create("blkdev_requests",
3010 sizeof(struct request), 0, SLAB_PANIC, NULL);
3011
3012 blk_requestq_cachep = kmem_cache_create("blkdev_queue",
3013 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3014
3015 return 0;
3016}
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/blk-mq.h>
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>
28#include <linux/writeback.h>
29#include <linux/task_io_accounting_ops.h>
30#include <linux/fault-inject.h>
31#include <linux/list_sort.h>
32#include <linux/delay.h>
33#include <linux/ratelimit.h>
34#include <linux/pm_runtime.h>
35#include <linux/blk-cgroup.h>
36
37#define CREATE_TRACE_POINTS
38#include <trace/events/block.h>
39
40#include "blk.h"
41#include "blk-mq.h"
42#include "blk-wbt.h"
43
44EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
45EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
46EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
47EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
48EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
49
50DEFINE_IDA(blk_queue_ida);
51
52/*
53 * For the allocated request tables
54 */
55struct kmem_cache *request_cachep;
56
57/*
58 * For queue allocation
59 */
60struct kmem_cache *blk_requestq_cachep;
61
62/*
63 * Controlling structure to kblockd
64 */
65static struct workqueue_struct *kblockd_workqueue;
66
67static void blk_clear_congested(struct request_list *rl, int sync)
68{
69#ifdef CONFIG_CGROUP_WRITEBACK
70 clear_wb_congested(rl->blkg->wb_congested, sync);
71#else
72 /*
73 * If !CGROUP_WRITEBACK, all blkg's map to bdi->wb and we shouldn't
74 * flip its congestion state for events on other blkcgs.
75 */
76 if (rl == &rl->q->root_rl)
77 clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
78#endif
79}
80
81static void blk_set_congested(struct request_list *rl, int sync)
82{
83#ifdef CONFIG_CGROUP_WRITEBACK
84 set_wb_congested(rl->blkg->wb_congested, sync);
85#else
86 /* see blk_clear_congested() */
87 if (rl == &rl->q->root_rl)
88 set_wb_congested(rl->q->backing_dev_info.wb.congested, sync);
89#endif
90}
91
92void blk_queue_congestion_threshold(struct request_queue *q)
93{
94 int nr;
95
96 nr = q->nr_requests - (q->nr_requests / 8) + 1;
97 if (nr > q->nr_requests)
98 nr = q->nr_requests;
99 q->nr_congestion_on = nr;
100
101 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
102 if (nr < 1)
103 nr = 1;
104 q->nr_congestion_off = nr;
105}
106
107/**
108 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
109 * @bdev: device
110 *
111 * Locates the passed device's request queue and returns the address of its
112 * backing_dev_info. This function can only be called if @bdev is opened
113 * and the return value is never NULL.
114 */
115struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
116{
117 struct request_queue *q = bdev_get_queue(bdev);
118
119 return &q->backing_dev_info;
120}
121EXPORT_SYMBOL(blk_get_backing_dev_info);
122
123void blk_rq_init(struct request_queue *q, struct request *rq)
124{
125 memset(rq, 0, sizeof(*rq));
126
127 INIT_LIST_HEAD(&rq->queuelist);
128 INIT_LIST_HEAD(&rq->timeout_list);
129 rq->cpu = -1;
130 rq->q = q;
131 rq->__sector = (sector_t) -1;
132 INIT_HLIST_NODE(&rq->hash);
133 RB_CLEAR_NODE(&rq->rb_node);
134 rq->cmd = rq->__cmd;
135 rq->cmd_len = BLK_MAX_CDB;
136 rq->tag = -1;
137 rq->start_time = jiffies;
138 set_start_time_ns(rq);
139 rq->part = NULL;
140}
141EXPORT_SYMBOL(blk_rq_init);
142
143static void req_bio_endio(struct request *rq, struct bio *bio,
144 unsigned int nbytes, int error)
145{
146 if (error)
147 bio->bi_error = error;
148
149 if (unlikely(rq->rq_flags & RQF_QUIET))
150 bio_set_flag(bio, BIO_QUIET);
151
152 bio_advance(bio, nbytes);
153
154 /* don't actually finish bio if it's part of flush sequence */
155 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
156 bio_endio(bio);
157}
158
159void blk_dump_rq_flags(struct request *rq, char *msg)
160{
161 int bit;
162
163 printk(KERN_INFO "%s: dev %s: type=%x, flags=%llx\n", msg,
164 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
165 (unsigned long long) rq->cmd_flags);
166
167 printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
168 (unsigned long long)blk_rq_pos(rq),
169 blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
170 printk(KERN_INFO " bio %p, biotail %p, len %u\n",
171 rq->bio, rq->biotail, blk_rq_bytes(rq));
172
173 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
174 printk(KERN_INFO " cdb: ");
175 for (bit = 0; bit < BLK_MAX_CDB; bit++)
176 printk("%02x ", rq->cmd[bit]);
177 printk("\n");
178 }
179}
180EXPORT_SYMBOL(blk_dump_rq_flags);
181
182static void blk_delay_work(struct work_struct *work)
183{
184 struct request_queue *q;
185
186 q = container_of(work, struct request_queue, delay_work.work);
187 spin_lock_irq(q->queue_lock);
188 __blk_run_queue(q);
189 spin_unlock_irq(q->queue_lock);
190}
191
192/**
193 * blk_delay_queue - restart queueing after defined interval
194 * @q: The &struct request_queue in question
195 * @msecs: Delay in msecs
196 *
197 * Description:
198 * Sometimes queueing needs to be postponed for a little while, to allow
199 * resources to come back. This function will make sure that queueing is
200 * restarted around the specified time. Queue lock must be held.
201 */
202void blk_delay_queue(struct request_queue *q, unsigned long msecs)
203{
204 if (likely(!blk_queue_dead(q)))
205 queue_delayed_work(kblockd_workqueue, &q->delay_work,
206 msecs_to_jiffies(msecs));
207}
208EXPORT_SYMBOL(blk_delay_queue);
209
210/**
211 * blk_start_queue_async - asynchronously restart a previously stopped queue
212 * @q: The &struct request_queue in question
213 *
214 * Description:
215 * blk_start_queue_async() will clear the stop flag on the queue, and
216 * ensure that the request_fn for the queue is run from an async
217 * context.
218 **/
219void blk_start_queue_async(struct request_queue *q)
220{
221 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
222 blk_run_queue_async(q);
223}
224EXPORT_SYMBOL(blk_start_queue_async);
225
226/**
227 * blk_start_queue - restart a previously stopped queue
228 * @q: The &struct request_queue in question
229 *
230 * Description:
231 * blk_start_queue() will clear the stop flag on the queue, and call
232 * the request_fn for the queue if it was in a stopped state when
233 * entered. Also see blk_stop_queue(). Queue lock must be held.
234 **/
235void blk_start_queue(struct request_queue *q)
236{
237 WARN_ON(!irqs_disabled());
238
239 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
240 __blk_run_queue(q);
241}
242EXPORT_SYMBOL(blk_start_queue);
243
244/**
245 * blk_stop_queue - stop a queue
246 * @q: The &struct request_queue in question
247 *
248 * Description:
249 * The Linux block layer assumes that a block driver will consume all
250 * entries on the request queue when the request_fn strategy is called.
251 * Often this will not happen, because of hardware limitations (queue
252 * depth settings). If a device driver gets a 'queue full' response,
253 * or if it simply chooses not to queue more I/O at one point, it can
254 * call this function to prevent the request_fn from being called until
255 * the driver has signalled it's ready to go again. This happens by calling
256 * blk_start_queue() to restart queue operations. Queue lock must be held.
257 **/
258void blk_stop_queue(struct request_queue *q)
259{
260 cancel_delayed_work(&q->delay_work);
261 queue_flag_set(QUEUE_FLAG_STOPPED, q);
262}
263EXPORT_SYMBOL(blk_stop_queue);
264
265/**
266 * blk_sync_queue - cancel any pending callbacks on a queue
267 * @q: the queue
268 *
269 * Description:
270 * The block layer may perform asynchronous callback activity
271 * on a queue, such as calling the unplug function after a timeout.
272 * A block device may call blk_sync_queue to ensure that any
273 * such activity is cancelled, thus allowing it to release resources
274 * that the callbacks might use. The caller must already have made sure
275 * that its ->make_request_fn will not re-add plugging prior to calling
276 * this function.
277 *
278 * This function does not cancel any asynchronous activity arising
279 * out of elevator or throttling code. That would require elevator_exit()
280 * and blkcg_exit_queue() to be called with queue lock initialized.
281 *
282 */
283void blk_sync_queue(struct request_queue *q)
284{
285 del_timer_sync(&q->timeout);
286
287 if (q->mq_ops) {
288 struct blk_mq_hw_ctx *hctx;
289 int i;
290
291 queue_for_each_hw_ctx(q, hctx, i) {
292 cancel_work_sync(&hctx->run_work);
293 cancel_delayed_work_sync(&hctx->delay_work);
294 }
295 } else {
296 cancel_delayed_work_sync(&q->delay_work);
297 }
298}
299EXPORT_SYMBOL(blk_sync_queue);
300
301/**
302 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
303 * @q: The queue to run
304 *
305 * Description:
306 * Invoke request handling on a queue if there are any pending requests.
307 * May be used to restart request handling after a request has completed.
308 * This variant runs the queue whether or not the queue has been
309 * stopped. Must be called with the queue lock held and interrupts
310 * disabled. See also @blk_run_queue.
311 */
312inline void __blk_run_queue_uncond(struct request_queue *q)
313{
314 if (unlikely(blk_queue_dead(q)))
315 return;
316
317 /*
318 * Some request_fn implementations, e.g. scsi_request_fn(), unlock
319 * the queue lock internally. As a result multiple threads may be
320 * running such a request function concurrently. Keep track of the
321 * number of active request_fn invocations such that blk_drain_queue()
322 * can wait until all these request_fn calls have finished.
323 */
324 q->request_fn_active++;
325 q->request_fn(q);
326 q->request_fn_active--;
327}
328EXPORT_SYMBOL_GPL(__blk_run_queue_uncond);
329
330/**
331 * __blk_run_queue - run a single device queue
332 * @q: The queue to run
333 *
334 * Description:
335 * See @blk_run_queue. This variant must be called with the queue lock
336 * held and interrupts disabled.
337 */
338void __blk_run_queue(struct request_queue *q)
339{
340 if (unlikely(blk_queue_stopped(q)))
341 return;
342
343 __blk_run_queue_uncond(q);
344}
345EXPORT_SYMBOL(__blk_run_queue);
346
347/**
348 * blk_run_queue_async - run a single device queue in workqueue context
349 * @q: The queue to run
350 *
351 * Description:
352 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
353 * of us. The caller must hold the queue lock.
354 */
355void blk_run_queue_async(struct request_queue *q)
356{
357 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
358 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
359}
360EXPORT_SYMBOL(blk_run_queue_async);
361
362/**
363 * blk_run_queue - run a single device queue
364 * @q: The queue to run
365 *
366 * Description:
367 * Invoke request handling on this queue, if it has pending work to do.
368 * May be used to restart queueing when a request has completed.
369 */
370void blk_run_queue(struct request_queue *q)
371{
372 unsigned long flags;
373
374 spin_lock_irqsave(q->queue_lock, flags);
375 __blk_run_queue(q);
376 spin_unlock_irqrestore(q->queue_lock, flags);
377}
378EXPORT_SYMBOL(blk_run_queue);
379
380void blk_put_queue(struct request_queue *q)
381{
382 kobject_put(&q->kobj);
383}
384EXPORT_SYMBOL(blk_put_queue);
385
386/**
387 * __blk_drain_queue - drain requests from request_queue
388 * @q: queue to drain
389 * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV
390 *
391 * Drain requests from @q. If @drain_all is set, all requests are drained.
392 * If not, only ELVPRIV requests are drained. The caller is responsible
393 * for ensuring that no new requests which need to be drained are queued.
394 */
395static void __blk_drain_queue(struct request_queue *q, bool drain_all)
396 __releases(q->queue_lock)
397 __acquires(q->queue_lock)
398{
399 int i;
400
401 lockdep_assert_held(q->queue_lock);
402
403 while (true) {
404 bool drain = false;
405
406 /*
407 * The caller might be trying to drain @q before its
408 * elevator is initialized.
409 */
410 if (q->elevator)
411 elv_drain_elevator(q);
412
413 blkcg_drain_queue(q);
414
415 /*
416 * This function might be called on a queue which failed
417 * driver init after queue creation or is not yet fully
418 * active yet. Some drivers (e.g. fd and loop) get unhappy
419 * in such cases. Kick queue iff dispatch queue has
420 * something on it and @q has request_fn set.
421 */
422 if (!list_empty(&q->queue_head) && q->request_fn)
423 __blk_run_queue(q);
424
425 drain |= q->nr_rqs_elvpriv;
426 drain |= q->request_fn_active;
427
428 /*
429 * Unfortunately, requests are queued at and tracked from
430 * multiple places and there's no single counter which can
431 * be drained. Check all the queues and counters.
432 */
433 if (drain_all) {
434 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
435 drain |= !list_empty(&q->queue_head);
436 for (i = 0; i < 2; i++) {
437 drain |= q->nr_rqs[i];
438 drain |= q->in_flight[i];
439 if (fq)
440 drain |= !list_empty(&fq->flush_queue[i]);
441 }
442 }
443
444 if (!drain)
445 break;
446
447 spin_unlock_irq(q->queue_lock);
448
449 msleep(10);
450
451 spin_lock_irq(q->queue_lock);
452 }
453
454 /*
455 * With queue marked dead, any woken up waiter will fail the
456 * allocation path, so the wakeup chaining is lost and we're
457 * left with hung waiters. We need to wake up those waiters.
458 */
459 if (q->request_fn) {
460 struct request_list *rl;
461
462 blk_queue_for_each_rl(rl, q)
463 for (i = 0; i < ARRAY_SIZE(rl->wait); i++)
464 wake_up_all(&rl->wait[i]);
465 }
466}
467
468/**
469 * blk_queue_bypass_start - enter queue bypass mode
470 * @q: queue of interest
471 *
472 * In bypass mode, only the dispatch FIFO queue of @q is used. This
473 * function makes @q enter bypass mode and drains all requests which were
474 * throttled or issued before. On return, it's guaranteed that no request
475 * is being throttled or has ELVPRIV set and blk_queue_bypass() %true
476 * inside queue or RCU read lock.
477 */
478void blk_queue_bypass_start(struct request_queue *q)
479{
480 spin_lock_irq(q->queue_lock);
481 q->bypass_depth++;
482 queue_flag_set(QUEUE_FLAG_BYPASS, q);
483 spin_unlock_irq(q->queue_lock);
484
485 /*
486 * Queues start drained. Skip actual draining till init is
487 * complete. This avoids lenghty delays during queue init which
488 * can happen many times during boot.
489 */
490 if (blk_queue_init_done(q)) {
491 spin_lock_irq(q->queue_lock);
492 __blk_drain_queue(q, false);
493 spin_unlock_irq(q->queue_lock);
494
495 /* ensure blk_queue_bypass() is %true inside RCU read lock */
496 synchronize_rcu();
497 }
498}
499EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
500
501/**
502 * blk_queue_bypass_end - leave queue bypass mode
503 * @q: queue of interest
504 *
505 * Leave bypass mode and restore the normal queueing behavior.
506 */
507void blk_queue_bypass_end(struct request_queue *q)
508{
509 spin_lock_irq(q->queue_lock);
510 if (!--q->bypass_depth)
511 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
512 WARN_ON_ONCE(q->bypass_depth < 0);
513 spin_unlock_irq(q->queue_lock);
514}
515EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
516
517void blk_set_queue_dying(struct request_queue *q)
518{
519 spin_lock_irq(q->queue_lock);
520 queue_flag_set(QUEUE_FLAG_DYING, q);
521 spin_unlock_irq(q->queue_lock);
522
523 if (q->mq_ops)
524 blk_mq_wake_waiters(q);
525 else {
526 struct request_list *rl;
527
528 blk_queue_for_each_rl(rl, q) {
529 if (rl->rq_pool) {
530 wake_up(&rl->wait[BLK_RW_SYNC]);
531 wake_up(&rl->wait[BLK_RW_ASYNC]);
532 }
533 }
534 }
535}
536EXPORT_SYMBOL_GPL(blk_set_queue_dying);
537
538/**
539 * blk_cleanup_queue - shutdown a request queue
540 * @q: request queue to shutdown
541 *
542 * Mark @q DYING, drain all pending requests, mark @q DEAD, destroy and
543 * put it. All future requests will be failed immediately with -ENODEV.
544 */
545void blk_cleanup_queue(struct request_queue *q)
546{
547 spinlock_t *lock = q->queue_lock;
548
549 /* mark @q DYING, no new request or merges will be allowed afterwards */
550 mutex_lock(&q->sysfs_lock);
551 blk_set_queue_dying(q);
552 spin_lock_irq(lock);
553
554 /*
555 * A dying queue is permanently in bypass mode till released. Note
556 * that, unlike blk_queue_bypass_start(), we aren't performing
557 * synchronize_rcu() after entering bypass mode to avoid the delay
558 * as some drivers create and destroy a lot of queues while
559 * probing. This is still safe because blk_release_queue() will be
560 * called only after the queue refcnt drops to zero and nothing,
561 * RCU or not, would be traversing the queue by then.
562 */
563 q->bypass_depth++;
564 queue_flag_set(QUEUE_FLAG_BYPASS, q);
565
566 queue_flag_set(QUEUE_FLAG_NOMERGES, q);
567 queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
568 queue_flag_set(QUEUE_FLAG_DYING, q);
569 spin_unlock_irq(lock);
570 mutex_unlock(&q->sysfs_lock);
571
572 /*
573 * Drain all requests queued before DYING marking. Set DEAD flag to
574 * prevent that q->request_fn() gets invoked after draining finished.
575 */
576 blk_freeze_queue(q);
577 spin_lock_irq(lock);
578 if (!q->mq_ops)
579 __blk_drain_queue(q, true);
580 queue_flag_set(QUEUE_FLAG_DEAD, q);
581 spin_unlock_irq(lock);
582
583 /* for synchronous bio-based driver finish in-flight integrity i/o */
584 blk_flush_integrity();
585
586 /* @q won't process any more request, flush async actions */
587 del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
588 blk_sync_queue(q);
589
590 if (q->mq_ops)
591 blk_mq_free_queue(q);
592 percpu_ref_exit(&q->q_usage_counter);
593
594 spin_lock_irq(lock);
595 if (q->queue_lock != &q->__queue_lock)
596 q->queue_lock = &q->__queue_lock;
597 spin_unlock_irq(lock);
598
599 bdi_unregister(&q->backing_dev_info);
600
601 /* @q is and will stay empty, shutdown and put */
602 blk_put_queue(q);
603}
604EXPORT_SYMBOL(blk_cleanup_queue);
605
606/* Allocate memory local to the request queue */
607static void *alloc_request_struct(gfp_t gfp_mask, void *data)
608{
609 int nid = (int)(long)data;
610 return kmem_cache_alloc_node(request_cachep, gfp_mask, nid);
611}
612
613static void free_request_struct(void *element, void *unused)
614{
615 kmem_cache_free(request_cachep, element);
616}
617
618int blk_init_rl(struct request_list *rl, struct request_queue *q,
619 gfp_t gfp_mask)
620{
621 if (unlikely(rl->rq_pool))
622 return 0;
623
624 rl->q = q;
625 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
626 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
627 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
628 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
629
630 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, alloc_request_struct,
631 free_request_struct,
632 (void *)(long)q->node, gfp_mask,
633 q->node);
634 if (!rl->rq_pool)
635 return -ENOMEM;
636
637 return 0;
638}
639
640void blk_exit_rl(struct request_list *rl)
641{
642 if (rl->rq_pool)
643 mempool_destroy(rl->rq_pool);
644}
645
646struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
647{
648 return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
649}
650EXPORT_SYMBOL(blk_alloc_queue);
651
652int blk_queue_enter(struct request_queue *q, bool nowait)
653{
654 while (true) {
655 int ret;
656
657 if (percpu_ref_tryget_live(&q->q_usage_counter))
658 return 0;
659
660 if (nowait)
661 return -EBUSY;
662
663 ret = wait_event_interruptible(q->mq_freeze_wq,
664 !atomic_read(&q->mq_freeze_depth) ||
665 blk_queue_dying(q));
666 if (blk_queue_dying(q))
667 return -ENODEV;
668 if (ret)
669 return ret;
670 }
671}
672
673void blk_queue_exit(struct request_queue *q)
674{
675 percpu_ref_put(&q->q_usage_counter);
676}
677
678static void blk_queue_usage_counter_release(struct percpu_ref *ref)
679{
680 struct request_queue *q =
681 container_of(ref, struct request_queue, q_usage_counter);
682
683 wake_up_all(&q->mq_freeze_wq);
684}
685
686static void blk_rq_timed_out_timer(unsigned long data)
687{
688 struct request_queue *q = (struct request_queue *)data;
689
690 kblockd_schedule_work(&q->timeout_work);
691}
692
693struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
694{
695 struct request_queue *q;
696 int err;
697
698 q = kmem_cache_alloc_node(blk_requestq_cachep,
699 gfp_mask | __GFP_ZERO, node_id);
700 if (!q)
701 return NULL;
702
703 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
704 if (q->id < 0)
705 goto fail_q;
706
707 q->bio_split = bioset_create(BIO_POOL_SIZE, 0);
708 if (!q->bio_split)
709 goto fail_id;
710
711 q->backing_dev_info.ra_pages =
712 (VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
713 q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
714 q->backing_dev_info.name = "block";
715 q->node = node_id;
716
717 err = bdi_init(&q->backing_dev_info);
718 if (err)
719 goto fail_split;
720
721 setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
722 laptop_mode_timer_fn, (unsigned long) q);
723 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
724 INIT_LIST_HEAD(&q->queue_head);
725 INIT_LIST_HEAD(&q->timeout_list);
726 INIT_LIST_HEAD(&q->icq_list);
727#ifdef CONFIG_BLK_CGROUP
728 INIT_LIST_HEAD(&q->blkg_list);
729#endif
730 INIT_DELAYED_WORK(&q->delay_work, blk_delay_work);
731
732 kobject_init(&q->kobj, &blk_queue_ktype);
733
734 mutex_init(&q->sysfs_lock);
735 spin_lock_init(&q->__queue_lock);
736
737 /*
738 * By default initialize queue_lock to internal lock and driver can
739 * override it later if need be.
740 */
741 q->queue_lock = &q->__queue_lock;
742
743 /*
744 * A queue starts its life with bypass turned on to avoid
745 * unnecessary bypass on/off overhead and nasty surprises during
746 * init. The initial bypass will be finished when the queue is
747 * registered by blk_register_queue().
748 */
749 q->bypass_depth = 1;
750 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
751
752 init_waitqueue_head(&q->mq_freeze_wq);
753
754 /*
755 * Init percpu_ref in atomic mode so that it's faster to shutdown.
756 * See blk_register_queue() for details.
757 */
758 if (percpu_ref_init(&q->q_usage_counter,
759 blk_queue_usage_counter_release,
760 PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
761 goto fail_bdi;
762
763 if (blkcg_init_queue(q))
764 goto fail_ref;
765
766 return q;
767
768fail_ref:
769 percpu_ref_exit(&q->q_usage_counter);
770fail_bdi:
771 bdi_destroy(&q->backing_dev_info);
772fail_split:
773 bioset_free(q->bio_split);
774fail_id:
775 ida_simple_remove(&blk_queue_ida, q->id);
776fail_q:
777 kmem_cache_free(blk_requestq_cachep, q);
778 return NULL;
779}
780EXPORT_SYMBOL(blk_alloc_queue_node);
781
782/**
783 * blk_init_queue - prepare a request queue for use with a block device
784 * @rfn: The function to be called to process requests that have been
785 * placed on the queue.
786 * @lock: Request queue spin lock
787 *
788 * Description:
789 * If a block device wishes to use the standard request handling procedures,
790 * which sorts requests and coalesces adjacent requests, then it must
791 * call blk_init_queue(). The function @rfn will be called when there
792 * are requests on the queue that need to be processed. If the device
793 * supports plugging, then @rfn may not be called immediately when requests
794 * are available on the queue, but may be called at some time later instead.
795 * Plugged queues are generally unplugged when a buffer belonging to one
796 * of the requests on the queue is needed, or due to memory pressure.
797 *
798 * @rfn is not required, or even expected, to remove all requests off the
799 * queue, but only as many as it can handle at a time. If it does leave
800 * requests on the queue, it is responsible for arranging that the requests
801 * get dealt with eventually.
802 *
803 * The queue spin lock must be held while manipulating the requests on the
804 * request queue; this lock will be taken also from interrupt context, so irq
805 * disabling is needed for it.
806 *
807 * Function returns a pointer to the initialized request queue, or %NULL if
808 * it didn't succeed.
809 *
810 * Note:
811 * blk_init_queue() must be paired with a blk_cleanup_queue() call
812 * when the block device is deactivated (such as at module unload).
813 **/
814
815struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
816{
817 return blk_init_queue_node(rfn, lock, NUMA_NO_NODE);
818}
819EXPORT_SYMBOL(blk_init_queue);
820
821struct request_queue *
822blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
823{
824 struct request_queue *uninit_q, *q;
825
826 uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id);
827 if (!uninit_q)
828 return NULL;
829
830 q = blk_init_allocated_queue(uninit_q, rfn, lock);
831 if (!q)
832 blk_cleanup_queue(uninit_q);
833
834 return q;
835}
836EXPORT_SYMBOL(blk_init_queue_node);
837
838static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio);
839
840struct request_queue *
841blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
842 spinlock_t *lock)
843{
844 if (!q)
845 return NULL;
846
847 q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, 0);
848 if (!q->fq)
849 return NULL;
850
851 if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
852 goto fail;
853
854 INIT_WORK(&q->timeout_work, blk_timeout_work);
855 q->request_fn = rfn;
856 q->prep_rq_fn = NULL;
857 q->unprep_rq_fn = NULL;
858 q->queue_flags |= QUEUE_FLAG_DEFAULT;
859
860 /* Override internal queue lock with supplied lock pointer */
861 if (lock)
862 q->queue_lock = lock;
863
864 /*
865 * This also sets hw/phys segments, boundary and size
866 */
867 blk_queue_make_request(q, blk_queue_bio);
868
869 q->sg_reserved_size = INT_MAX;
870
871 /* Protect q->elevator from elevator_change */
872 mutex_lock(&q->sysfs_lock);
873
874 /* init elevator */
875 if (elevator_init(q, NULL)) {
876 mutex_unlock(&q->sysfs_lock);
877 goto fail;
878 }
879
880 mutex_unlock(&q->sysfs_lock);
881
882 return q;
883
884fail:
885 blk_free_flush_queue(q->fq);
886 wbt_exit(q);
887 return NULL;
888}
889EXPORT_SYMBOL(blk_init_allocated_queue);
890
891bool blk_get_queue(struct request_queue *q)
892{
893 if (likely(!blk_queue_dying(q))) {
894 __blk_get_queue(q);
895 return true;
896 }
897
898 return false;
899}
900EXPORT_SYMBOL(blk_get_queue);
901
902static inline void blk_free_request(struct request_list *rl, struct request *rq)
903{
904 if (rq->rq_flags & RQF_ELVPRIV) {
905 elv_put_request(rl->q, rq);
906 if (rq->elv.icq)
907 put_io_context(rq->elv.icq->ioc);
908 }
909
910 mempool_free(rq, rl->rq_pool);
911}
912
913/*
914 * ioc_batching returns true if the ioc is a valid batching request and
915 * should be given priority access to a request.
916 */
917static inline int ioc_batching(struct request_queue *q, struct io_context *ioc)
918{
919 if (!ioc)
920 return 0;
921
922 /*
923 * Make sure the process is able to allocate at least 1 request
924 * even if the batch times out, otherwise we could theoretically
925 * lose wakeups.
926 */
927 return ioc->nr_batch_requests == q->nr_batching ||
928 (ioc->nr_batch_requests > 0
929 && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
930}
931
932/*
933 * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
934 * will cause the process to be a "batcher" on all queues in the system. This
935 * is the behaviour we want though - once it gets a wakeup it should be given
936 * a nice run.
937 */
938static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
939{
940 if (!ioc || ioc_batching(q, ioc))
941 return;
942
943 ioc->nr_batch_requests = q->nr_batching;
944 ioc->last_waited = jiffies;
945}
946
947static void __freed_request(struct request_list *rl, int sync)
948{
949 struct request_queue *q = rl->q;
950
951 if (rl->count[sync] < queue_congestion_off_threshold(q))
952 blk_clear_congested(rl, sync);
953
954 if (rl->count[sync] + 1 <= q->nr_requests) {
955 if (waitqueue_active(&rl->wait[sync]))
956 wake_up(&rl->wait[sync]);
957
958 blk_clear_rl_full(rl, sync);
959 }
960}
961
962/*
963 * A request has just been released. Account for it, update the full and
964 * congestion status, wake up any waiters. Called under q->queue_lock.
965 */
966static void freed_request(struct request_list *rl, bool sync,
967 req_flags_t rq_flags)
968{
969 struct request_queue *q = rl->q;
970
971 q->nr_rqs[sync]--;
972 rl->count[sync]--;
973 if (rq_flags & RQF_ELVPRIV)
974 q->nr_rqs_elvpriv--;
975
976 __freed_request(rl, sync);
977
978 if (unlikely(rl->starved[sync ^ 1]))
979 __freed_request(rl, sync ^ 1);
980}
981
982int blk_update_nr_requests(struct request_queue *q, unsigned int nr)
983{
984 struct request_list *rl;
985 int on_thresh, off_thresh;
986
987 spin_lock_irq(q->queue_lock);
988 q->nr_requests = nr;
989 blk_queue_congestion_threshold(q);
990 on_thresh = queue_congestion_on_threshold(q);
991 off_thresh = queue_congestion_off_threshold(q);
992
993 blk_queue_for_each_rl(rl, q) {
994 if (rl->count[BLK_RW_SYNC] >= on_thresh)
995 blk_set_congested(rl, BLK_RW_SYNC);
996 else if (rl->count[BLK_RW_SYNC] < off_thresh)
997 blk_clear_congested(rl, BLK_RW_SYNC);
998
999 if (rl->count[BLK_RW_ASYNC] >= on_thresh)
1000 blk_set_congested(rl, BLK_RW_ASYNC);
1001 else if (rl->count[BLK_RW_ASYNC] < off_thresh)
1002 blk_clear_congested(rl, BLK_RW_ASYNC);
1003
1004 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
1005 blk_set_rl_full(rl, BLK_RW_SYNC);
1006 } else {
1007 blk_clear_rl_full(rl, BLK_RW_SYNC);
1008 wake_up(&rl->wait[BLK_RW_SYNC]);
1009 }
1010
1011 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
1012 blk_set_rl_full(rl, BLK_RW_ASYNC);
1013 } else {
1014 blk_clear_rl_full(rl, BLK_RW_ASYNC);
1015 wake_up(&rl->wait[BLK_RW_ASYNC]);
1016 }
1017 }
1018
1019 spin_unlock_irq(q->queue_lock);
1020 return 0;
1021}
1022
1023/*
1024 * Determine if elevator data should be initialized when allocating the
1025 * request associated with @bio.
1026 */
1027static bool blk_rq_should_init_elevator(struct bio *bio)
1028{
1029 if (!bio)
1030 return true;
1031
1032 /*
1033 * Flush requests do not use the elevator so skip initialization.
1034 * This allows a request to share the flush and elevator data.
1035 */
1036 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA))
1037 return false;
1038
1039 return true;
1040}
1041
1042/**
1043 * rq_ioc - determine io_context for request allocation
1044 * @bio: request being allocated is for this bio (can be %NULL)
1045 *
1046 * Determine io_context to use for request allocation for @bio. May return
1047 * %NULL if %current->io_context doesn't exist.
1048 */
1049static struct io_context *rq_ioc(struct bio *bio)
1050{
1051#ifdef CONFIG_BLK_CGROUP
1052 if (bio && bio->bi_ioc)
1053 return bio->bi_ioc;
1054#endif
1055 return current->io_context;
1056}
1057
1058/**
1059 * __get_request - get a free request
1060 * @rl: request list to allocate from
1061 * @op: operation and flags
1062 * @bio: bio to allocate request for (can be %NULL)
1063 * @gfp_mask: allocation mask
1064 *
1065 * Get a free request from @q. This function may fail under memory
1066 * pressure or if @q is dead.
1067 *
1068 * Must be called with @q->queue_lock held and,
1069 * Returns ERR_PTR on failure, with @q->queue_lock held.
1070 * Returns request pointer on success, with @q->queue_lock *not held*.
1071 */
1072static struct request *__get_request(struct request_list *rl, unsigned int op,
1073 struct bio *bio, gfp_t gfp_mask)
1074{
1075 struct request_queue *q = rl->q;
1076 struct request *rq;
1077 struct elevator_type *et = q->elevator->type;
1078 struct io_context *ioc = rq_ioc(bio);
1079 struct io_cq *icq = NULL;
1080 const bool is_sync = op_is_sync(op);
1081 int may_queue;
1082 req_flags_t rq_flags = RQF_ALLOCED;
1083
1084 if (unlikely(blk_queue_dying(q)))
1085 return ERR_PTR(-ENODEV);
1086
1087 may_queue = elv_may_queue(q, op);
1088 if (may_queue == ELV_MQUEUE_NO)
1089 goto rq_starved;
1090
1091 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) {
1092 if (rl->count[is_sync]+1 >= q->nr_requests) {
1093 /*
1094 * The queue will fill after this allocation, so set
1095 * it as full, and mark this process as "batching".
1096 * This process will be allowed to complete a batch of
1097 * requests, others will be blocked.
1098 */
1099 if (!blk_rl_full(rl, is_sync)) {
1100 ioc_set_batching(q, ioc);
1101 blk_set_rl_full(rl, is_sync);
1102 } else {
1103 if (may_queue != ELV_MQUEUE_MUST
1104 && !ioc_batching(q, ioc)) {
1105 /*
1106 * The queue is full and the allocating
1107 * process is not a "batcher", and not
1108 * exempted by the IO scheduler
1109 */
1110 return ERR_PTR(-ENOMEM);
1111 }
1112 }
1113 }
1114 blk_set_congested(rl, is_sync);
1115 }
1116
1117 /*
1118 * Only allow batching queuers to allocate up to 50% over the defined
1119 * limit of requests, otherwise we could have thousands of requests
1120 * allocated with any setting of ->nr_requests
1121 */
1122 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
1123 return ERR_PTR(-ENOMEM);
1124
1125 q->nr_rqs[is_sync]++;
1126 rl->count[is_sync]++;
1127 rl->starved[is_sync] = 0;
1128
1129 /*
1130 * Decide whether the new request will be managed by elevator. If
1131 * so, mark @rq_flags and increment elvpriv. Non-zero elvpriv will
1132 * prevent the current elevator from being destroyed until the new
1133 * request is freed. This guarantees icq's won't be destroyed and
1134 * makes creating new ones safe.
1135 *
1136 * Also, lookup icq while holding queue_lock. If it doesn't exist,
1137 * it will be created after releasing queue_lock.
1138 */
1139 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
1140 rq_flags |= RQF_ELVPRIV;
1141 q->nr_rqs_elvpriv++;
1142 if (et->icq_cache && ioc)
1143 icq = ioc_lookup_icq(ioc, q);
1144 }
1145
1146 if (blk_queue_io_stat(q))
1147 rq_flags |= RQF_IO_STAT;
1148 spin_unlock_irq(q->queue_lock);
1149
1150 /* allocate and init request */
1151 rq = mempool_alloc(rl->rq_pool, gfp_mask);
1152 if (!rq)
1153 goto fail_alloc;
1154
1155 blk_rq_init(q, rq);
1156 blk_rq_set_rl(rq, rl);
1157 blk_rq_set_prio(rq, ioc);
1158 rq->cmd_flags = op;
1159 rq->rq_flags = rq_flags;
1160
1161 /* init elvpriv */
1162 if (rq_flags & RQF_ELVPRIV) {
1163 if (unlikely(et->icq_cache && !icq)) {
1164 if (ioc)
1165 icq = ioc_create_icq(ioc, q, gfp_mask);
1166 if (!icq)
1167 goto fail_elvpriv;
1168 }
1169
1170 rq->elv.icq = icq;
1171 if (unlikely(elv_set_request(q, rq, bio, gfp_mask)))
1172 goto fail_elvpriv;
1173
1174 /* @rq->elv.icq holds io_context until @rq is freed */
1175 if (icq)
1176 get_io_context(icq->ioc);
1177 }
1178out:
1179 /*
1180 * ioc may be NULL here, and ioc_batching will be false. That's
1181 * OK, if the queue is under the request limit then requests need
1182 * not count toward the nr_batch_requests limit. There will always
1183 * be some limit enforced by BLK_BATCH_TIME.
1184 */
1185 if (ioc_batching(q, ioc))
1186 ioc->nr_batch_requests--;
1187
1188 trace_block_getrq(q, bio, op);
1189 return rq;
1190
1191fail_elvpriv:
1192 /*
1193 * elvpriv init failed. ioc, icq and elvpriv aren't mempool backed
1194 * and may fail indefinitely under memory pressure and thus
1195 * shouldn't stall IO. Treat this request as !elvpriv. This will
1196 * disturb iosched and blkcg but weird is bettern than dead.
1197 */
1198 printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n",
1199 __func__, dev_name(q->backing_dev_info.dev));
1200
1201 rq->rq_flags &= ~RQF_ELVPRIV;
1202 rq->elv.icq = NULL;
1203
1204 spin_lock_irq(q->queue_lock);
1205 q->nr_rqs_elvpriv--;
1206 spin_unlock_irq(q->queue_lock);
1207 goto out;
1208
1209fail_alloc:
1210 /*
1211 * Allocation failed presumably due to memory. Undo anything we
1212 * might have messed up.
1213 *
1214 * Allocating task should really be put onto the front of the wait
1215 * queue, but this is pretty rare.
1216 */
1217 spin_lock_irq(q->queue_lock);
1218 freed_request(rl, is_sync, rq_flags);
1219
1220 /*
1221 * in the very unlikely event that allocation failed and no
1222 * requests for this direction was pending, mark us starved so that
1223 * freeing of a request in the other direction will notice
1224 * us. another possible fix would be to split the rq mempool into
1225 * READ and WRITE
1226 */
1227rq_starved:
1228 if (unlikely(rl->count[is_sync] == 0))
1229 rl->starved[is_sync] = 1;
1230 return ERR_PTR(-ENOMEM);
1231}
1232
1233/**
1234 * get_request - get a free request
1235 * @q: request_queue to allocate request from
1236 * @op: operation and flags
1237 * @bio: bio to allocate request for (can be %NULL)
1238 * @gfp_mask: allocation mask
1239 *
1240 * Get a free request from @q. If %__GFP_DIRECT_RECLAIM is set in @gfp_mask,
1241 * this function keeps retrying under memory pressure and fails iff @q is dead.
1242 *
1243 * Must be called with @q->queue_lock held and,
1244 * Returns ERR_PTR on failure, with @q->queue_lock held.
1245 * Returns request pointer on success, with @q->queue_lock *not held*.
1246 */
1247static struct request *get_request(struct request_queue *q, unsigned int op,
1248 struct bio *bio, gfp_t gfp_mask)
1249{
1250 const bool is_sync = op_is_sync(op);
1251 DEFINE_WAIT(wait);
1252 struct request_list *rl;
1253 struct request *rq;
1254
1255 rl = blk_get_rl(q, bio); /* transferred to @rq on success */
1256retry:
1257 rq = __get_request(rl, op, bio, gfp_mask);
1258 if (!IS_ERR(rq))
1259 return rq;
1260
1261 if (!gfpflags_allow_blocking(gfp_mask) || unlikely(blk_queue_dying(q))) {
1262 blk_put_rl(rl);
1263 return rq;
1264 }
1265
1266 /* wait on @rl and retry */
1267 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
1268 TASK_UNINTERRUPTIBLE);
1269
1270 trace_block_sleeprq(q, bio, op);
1271
1272 spin_unlock_irq(q->queue_lock);
1273 io_schedule();
1274
1275 /*
1276 * After sleeping, we become a "batching" process and will be able
1277 * to allocate at least one request, and up to a big batch of them
1278 * for a small period time. See ioc_batching, ioc_set_batching
1279 */
1280 ioc_set_batching(q, current->io_context);
1281
1282 spin_lock_irq(q->queue_lock);
1283 finish_wait(&rl->wait[is_sync], &wait);
1284
1285 goto retry;
1286}
1287
1288static struct request *blk_old_get_request(struct request_queue *q, int rw,
1289 gfp_t gfp_mask)
1290{
1291 struct request *rq;
1292
1293 BUG_ON(rw != READ && rw != WRITE);
1294
1295 /* create ioc upfront */
1296 create_io_context(gfp_mask, q->node);
1297
1298 spin_lock_irq(q->queue_lock);
1299 rq = get_request(q, rw, NULL, gfp_mask);
1300 if (IS_ERR(rq)) {
1301 spin_unlock_irq(q->queue_lock);
1302 return rq;
1303 }
1304
1305 /* q->queue_lock is unlocked at this point */
1306 rq->__data_len = 0;
1307 rq->__sector = (sector_t) -1;
1308 rq->bio = rq->biotail = NULL;
1309 return rq;
1310}
1311
1312struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1313{
1314 if (q->mq_ops)
1315 return blk_mq_alloc_request(q, rw,
1316 (gfp_mask & __GFP_DIRECT_RECLAIM) ?
1317 0 : BLK_MQ_REQ_NOWAIT);
1318 else
1319 return blk_old_get_request(q, rw, gfp_mask);
1320}
1321EXPORT_SYMBOL(blk_get_request);
1322
1323/**
1324 * blk_rq_set_block_pc - initialize a request to type BLOCK_PC
1325 * @rq: request to be initialized
1326 *
1327 */
1328void blk_rq_set_block_pc(struct request *rq)
1329{
1330 rq->cmd_type = REQ_TYPE_BLOCK_PC;
1331 memset(rq->__cmd, 0, sizeof(rq->__cmd));
1332}
1333EXPORT_SYMBOL(blk_rq_set_block_pc);
1334
1335/**
1336 * blk_requeue_request - put a request back on queue
1337 * @q: request queue where request should be inserted
1338 * @rq: request to be inserted
1339 *
1340 * Description:
1341 * Drivers often keep queueing requests until the hardware cannot accept
1342 * more, when that condition happens we need to put the request back
1343 * on the queue. Must be called with queue lock held.
1344 */
1345void blk_requeue_request(struct request_queue *q, struct request *rq)
1346{
1347 blk_delete_timer(rq);
1348 blk_clear_rq_complete(rq);
1349 trace_block_rq_requeue(q, rq);
1350 wbt_requeue(q->rq_wb, &rq->issue_stat);
1351
1352 if (rq->rq_flags & RQF_QUEUED)
1353 blk_queue_end_tag(q, rq);
1354
1355 BUG_ON(blk_queued_rq(rq));
1356
1357 elv_requeue_request(q, rq);
1358}
1359EXPORT_SYMBOL(blk_requeue_request);
1360
1361static void add_acct_request(struct request_queue *q, struct request *rq,
1362 int where)
1363{
1364 blk_account_io_start(rq, true);
1365 __elv_add_request(q, rq, where);
1366}
1367
1368static void part_round_stats_single(int cpu, struct hd_struct *part,
1369 unsigned long now)
1370{
1371 int inflight;
1372
1373 if (now == part->stamp)
1374 return;
1375
1376 inflight = part_in_flight(part);
1377 if (inflight) {
1378 __part_stat_add(cpu, part, time_in_queue,
1379 inflight * (now - part->stamp));
1380 __part_stat_add(cpu, part, io_ticks, (now - part->stamp));
1381 }
1382 part->stamp = now;
1383}
1384
1385/**
1386 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1387 * @cpu: cpu number for stats access
1388 * @part: target partition
1389 *
1390 * The average IO queue length and utilisation statistics are maintained
1391 * by observing the current state of the queue length and the amount of
1392 * time it has been in this state for.
1393 *
1394 * Normally, that accounting is done on IO completion, but that can result
1395 * in more than a second's worth of IO being accounted for within any one
1396 * second, leading to >100% utilisation. To deal with that, we call this
1397 * function to do a round-off before returning the results when reading
1398 * /proc/diskstats. This accounts immediately for all queue usage up to
1399 * the current jiffies and restarts the counters again.
1400 */
1401void part_round_stats(int cpu, struct hd_struct *part)
1402{
1403 unsigned long now = jiffies;
1404
1405 if (part->partno)
1406 part_round_stats_single(cpu, &part_to_disk(part)->part0, now);
1407 part_round_stats_single(cpu, part, now);
1408}
1409EXPORT_SYMBOL_GPL(part_round_stats);
1410
1411#ifdef CONFIG_PM
1412static void blk_pm_put_request(struct request *rq)
1413{
1414 if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
1415 pm_runtime_mark_last_busy(rq->q->dev);
1416}
1417#else
1418static inline void blk_pm_put_request(struct request *rq) {}
1419#endif
1420
1421/*
1422 * queue lock must be held
1423 */
1424void __blk_put_request(struct request_queue *q, struct request *req)
1425{
1426 req_flags_t rq_flags = req->rq_flags;
1427
1428 if (unlikely(!q))
1429 return;
1430
1431 if (q->mq_ops) {
1432 blk_mq_free_request(req);
1433 return;
1434 }
1435
1436 blk_pm_put_request(req);
1437
1438 elv_completed_request(q, req);
1439
1440 /* this is a bio leak */
1441 WARN_ON(req->bio != NULL);
1442
1443 wbt_done(q->rq_wb, &req->issue_stat);
1444
1445 /*
1446 * Request may not have originated from ll_rw_blk. if not,
1447 * it didn't come out of our reserved rq pools
1448 */
1449 if (rq_flags & RQF_ALLOCED) {
1450 struct request_list *rl = blk_rq_rl(req);
1451 bool sync = op_is_sync(req->cmd_flags);
1452
1453 BUG_ON(!list_empty(&req->queuelist));
1454 BUG_ON(ELV_ON_HASH(req));
1455
1456 blk_free_request(rl, req);
1457 freed_request(rl, sync, rq_flags);
1458 blk_put_rl(rl);
1459 }
1460}
1461EXPORT_SYMBOL_GPL(__blk_put_request);
1462
1463void blk_put_request(struct request *req)
1464{
1465 struct request_queue *q = req->q;
1466
1467 if (q->mq_ops)
1468 blk_mq_free_request(req);
1469 else {
1470 unsigned long flags;
1471
1472 spin_lock_irqsave(q->queue_lock, flags);
1473 __blk_put_request(q, req);
1474 spin_unlock_irqrestore(q->queue_lock, flags);
1475 }
1476}
1477EXPORT_SYMBOL(blk_put_request);
1478
1479bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1480 struct bio *bio)
1481{
1482 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1483
1484 if (!ll_back_merge_fn(q, req, bio))
1485 return false;
1486
1487 trace_block_bio_backmerge(q, req, bio);
1488
1489 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1490 blk_rq_set_mixed_merge(req);
1491
1492 req->biotail->bi_next = bio;
1493 req->biotail = bio;
1494 req->__data_len += bio->bi_iter.bi_size;
1495 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1496
1497 blk_account_io_start(req, false);
1498 return true;
1499}
1500
1501bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1502 struct bio *bio)
1503{
1504 const int ff = bio->bi_opf & REQ_FAILFAST_MASK;
1505
1506 if (!ll_front_merge_fn(q, req, bio))
1507 return false;
1508
1509 trace_block_bio_frontmerge(q, req, bio);
1510
1511 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1512 blk_rq_set_mixed_merge(req);
1513
1514 bio->bi_next = req->bio;
1515 req->bio = bio;
1516
1517 req->__sector = bio->bi_iter.bi_sector;
1518 req->__data_len += bio->bi_iter.bi_size;
1519 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1520
1521 blk_account_io_start(req, false);
1522 return true;
1523}
1524
1525/**
1526 * blk_attempt_plug_merge - try to merge with %current's plugged list
1527 * @q: request_queue new bio is being queued at
1528 * @bio: new bio being queued
1529 * @request_count: out parameter for number of traversed plugged requests
1530 * @same_queue_rq: pointer to &struct request that gets filled in when
1531 * another request associated with @q is found on the plug list
1532 * (optional, may be %NULL)
1533 *
1534 * Determine whether @bio being queued on @q can be merged with a request
1535 * on %current's plugged list. Returns %true if merge was successful,
1536 * otherwise %false.
1537 *
1538 * Plugging coalesces IOs from the same issuer for the same purpose without
1539 * going through @q->queue_lock. As such it's more of an issuing mechanism
1540 * than scheduling, and the request, while may have elvpriv data, is not
1541 * added on the elevator at this point. In addition, we don't have
1542 * reliable access to the elevator outside queue lock. Only check basic
1543 * merging parameters without querying the elevator.
1544 *
1545 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1546 */
1547bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1548 unsigned int *request_count,
1549 struct request **same_queue_rq)
1550{
1551 struct blk_plug *plug;
1552 struct request *rq;
1553 bool ret = false;
1554 struct list_head *plug_list;
1555
1556 plug = current->plug;
1557 if (!plug)
1558 goto out;
1559 *request_count = 0;
1560
1561 if (q->mq_ops)
1562 plug_list = &plug->mq_list;
1563 else
1564 plug_list = &plug->list;
1565
1566 list_for_each_entry_reverse(rq, plug_list, queuelist) {
1567 int el_ret;
1568
1569 if (rq->q == q) {
1570 (*request_count)++;
1571 /*
1572 * Only blk-mq multiple hardware queues case checks the
1573 * rq in the same queue, there should be only one such
1574 * rq in a queue
1575 **/
1576 if (same_queue_rq)
1577 *same_queue_rq = rq;
1578 }
1579
1580 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1581 continue;
1582
1583 el_ret = blk_try_merge(rq, bio);
1584 if (el_ret == ELEVATOR_BACK_MERGE) {
1585 ret = bio_attempt_back_merge(q, rq, bio);
1586 if (ret)
1587 break;
1588 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1589 ret = bio_attempt_front_merge(q, rq, bio);
1590 if (ret)
1591 break;
1592 }
1593 }
1594out:
1595 return ret;
1596}
1597
1598unsigned int blk_plug_queued_count(struct request_queue *q)
1599{
1600 struct blk_plug *plug;
1601 struct request *rq;
1602 struct list_head *plug_list;
1603 unsigned int ret = 0;
1604
1605 plug = current->plug;
1606 if (!plug)
1607 goto out;
1608
1609 if (q->mq_ops)
1610 plug_list = &plug->mq_list;
1611 else
1612 plug_list = &plug->list;
1613
1614 list_for_each_entry(rq, plug_list, queuelist) {
1615 if (rq->q == q)
1616 ret++;
1617 }
1618out:
1619 return ret;
1620}
1621
1622void init_request_from_bio(struct request *req, struct bio *bio)
1623{
1624 req->cmd_type = REQ_TYPE_FS;
1625 if (bio->bi_opf & REQ_RAHEAD)
1626 req->cmd_flags |= REQ_FAILFAST_MASK;
1627
1628 req->errors = 0;
1629 req->__sector = bio->bi_iter.bi_sector;
1630 if (ioprio_valid(bio_prio(bio)))
1631 req->ioprio = bio_prio(bio);
1632 blk_rq_bio_prep(req->q, req, bio);
1633}
1634
1635static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1636{
1637 struct blk_plug *plug;
1638 int el_ret, where = ELEVATOR_INSERT_SORT;
1639 struct request *req;
1640 unsigned int request_count = 0;
1641 unsigned int wb_acct;
1642
1643 /*
1644 * low level driver can indicate that it wants pages above a
1645 * certain limit bounced to low memory (ie for highmem, or even
1646 * ISA dma in theory)
1647 */
1648 blk_queue_bounce(q, &bio);
1649
1650 blk_queue_split(q, &bio, q->bio_split);
1651
1652 if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
1653 bio->bi_error = -EIO;
1654 bio_endio(bio);
1655 return BLK_QC_T_NONE;
1656 }
1657
1658 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) {
1659 spin_lock_irq(q->queue_lock);
1660 where = ELEVATOR_INSERT_FLUSH;
1661 goto get_rq;
1662 }
1663
1664 /*
1665 * Check if we can merge with the plugged list before grabbing
1666 * any locks.
1667 */
1668 if (!blk_queue_nomerges(q)) {
1669 if (blk_attempt_plug_merge(q, bio, &request_count, NULL))
1670 return BLK_QC_T_NONE;
1671 } else
1672 request_count = blk_plug_queued_count(q);
1673
1674 spin_lock_irq(q->queue_lock);
1675
1676 el_ret = elv_merge(q, &req, bio);
1677 if (el_ret == ELEVATOR_BACK_MERGE) {
1678 if (bio_attempt_back_merge(q, req, bio)) {
1679 elv_bio_merged(q, req, bio);
1680 if (!attempt_back_merge(q, req))
1681 elv_merged_request(q, req, el_ret);
1682 goto out_unlock;
1683 }
1684 } else if (el_ret == ELEVATOR_FRONT_MERGE) {
1685 if (bio_attempt_front_merge(q, req, bio)) {
1686 elv_bio_merged(q, req, bio);
1687 if (!attempt_front_merge(q, req))
1688 elv_merged_request(q, req, el_ret);
1689 goto out_unlock;
1690 }
1691 }
1692
1693get_rq:
1694 wb_acct = wbt_wait(q->rq_wb, bio, q->queue_lock);
1695
1696 /*
1697 * Grab a free request. This is might sleep but can not fail.
1698 * Returns with the queue unlocked.
1699 */
1700 req = get_request(q, bio->bi_opf, bio, GFP_NOIO);
1701 if (IS_ERR(req)) {
1702 __wbt_done(q->rq_wb, wb_acct);
1703 bio->bi_error = PTR_ERR(req);
1704 bio_endio(bio);
1705 goto out_unlock;
1706 }
1707
1708 wbt_track(&req->issue_stat, wb_acct);
1709
1710 /*
1711 * After dropping the lock and possibly sleeping here, our request
1712 * may now be mergeable after it had proven unmergeable (above).
1713 * We don't worry about that case for efficiency. It won't happen
1714 * often, and the elevators are able to handle it.
1715 */
1716 init_request_from_bio(req, bio);
1717
1718 if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags))
1719 req->cpu = raw_smp_processor_id();
1720
1721 plug = current->plug;
1722 if (plug) {
1723 /*
1724 * If this is the first request added after a plug, fire
1725 * of a plug trace.
1726 *
1727 * @request_count may become stale because of schedule
1728 * out, so check plug list again.
1729 */
1730 if (!request_count || list_empty(&plug->list))
1731 trace_block_plug(q);
1732 else {
1733 struct request *last = list_entry_rq(plug->list.prev);
1734 if (request_count >= BLK_MAX_REQUEST_COUNT ||
1735 blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE) {
1736 blk_flush_plug_list(plug, false);
1737 trace_block_plug(q);
1738 }
1739 }
1740 list_add_tail(&req->queuelist, &plug->list);
1741 blk_account_io_start(req, true);
1742 } else {
1743 spin_lock_irq(q->queue_lock);
1744 add_acct_request(q, req, where);
1745 __blk_run_queue(q);
1746out_unlock:
1747 spin_unlock_irq(q->queue_lock);
1748 }
1749
1750 return BLK_QC_T_NONE;
1751}
1752
1753/*
1754 * If bio->bi_dev is a partition, remap the location
1755 */
1756static inline void blk_partition_remap(struct bio *bio)
1757{
1758 struct block_device *bdev = bio->bi_bdev;
1759
1760 /*
1761 * Zone reset does not include bi_size so bio_sectors() is always 0.
1762 * Include a test for the reset op code and perform the remap if needed.
1763 */
1764 if (bdev != bdev->bd_contains &&
1765 (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
1766 struct hd_struct *p = bdev->bd_part;
1767
1768 bio->bi_iter.bi_sector += p->start_sect;
1769 bio->bi_bdev = bdev->bd_contains;
1770
1771 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1772 bdev->bd_dev,
1773 bio->bi_iter.bi_sector - p->start_sect);
1774 }
1775}
1776
1777static void handle_bad_sector(struct bio *bio)
1778{
1779 char b[BDEVNAME_SIZE];
1780
1781 printk(KERN_INFO "attempt to access beyond end of device\n");
1782 printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
1783 bdevname(bio->bi_bdev, b),
1784 bio->bi_opf,
1785 (unsigned long long)bio_end_sector(bio),
1786 (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
1787}
1788
1789#ifdef CONFIG_FAIL_MAKE_REQUEST
1790
1791static DECLARE_FAULT_ATTR(fail_make_request);
1792
1793static int __init setup_fail_make_request(char *str)
1794{
1795 return setup_fault_attr(&fail_make_request, str);
1796}
1797__setup("fail_make_request=", setup_fail_make_request);
1798
1799static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
1800{
1801 return part->make_it_fail && should_fail(&fail_make_request, bytes);
1802}
1803
1804static int __init fail_make_request_debugfs(void)
1805{
1806 struct dentry *dir = fault_create_debugfs_attr("fail_make_request",
1807 NULL, &fail_make_request);
1808
1809 return PTR_ERR_OR_ZERO(dir);
1810}
1811
1812late_initcall(fail_make_request_debugfs);
1813
1814#else /* CONFIG_FAIL_MAKE_REQUEST */
1815
1816static inline bool should_fail_request(struct hd_struct *part,
1817 unsigned int bytes)
1818{
1819 return false;
1820}
1821
1822#endif /* CONFIG_FAIL_MAKE_REQUEST */
1823
1824/*
1825 * Check whether this bio extends beyond the end of the device.
1826 */
1827static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1828{
1829 sector_t maxsector;
1830
1831 if (!nr_sectors)
1832 return 0;
1833
1834 /* Test device or partition size, when known. */
1835 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1836 if (maxsector) {
1837 sector_t sector = bio->bi_iter.bi_sector;
1838
1839 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1840 /*
1841 * This may well happen - the kernel calls bread()
1842 * without checking the size of the device, e.g., when
1843 * mounting a device.
1844 */
1845 handle_bad_sector(bio);
1846 return 1;
1847 }
1848 }
1849
1850 return 0;
1851}
1852
1853static noinline_for_stack bool
1854generic_make_request_checks(struct bio *bio)
1855{
1856 struct request_queue *q;
1857 int nr_sectors = bio_sectors(bio);
1858 int err = -EIO;
1859 char b[BDEVNAME_SIZE];
1860 struct hd_struct *part;
1861
1862 might_sleep();
1863
1864 if (bio_check_eod(bio, nr_sectors))
1865 goto end_io;
1866
1867 q = bdev_get_queue(bio->bi_bdev);
1868 if (unlikely(!q)) {
1869 printk(KERN_ERR
1870 "generic_make_request: Trying to access "
1871 "nonexistent block-device %s (%Lu)\n",
1872 bdevname(bio->bi_bdev, b),
1873 (long long) bio->bi_iter.bi_sector);
1874 goto end_io;
1875 }
1876
1877 part = bio->bi_bdev->bd_part;
1878 if (should_fail_request(part, bio->bi_iter.bi_size) ||
1879 should_fail_request(&part_to_disk(part)->part0,
1880 bio->bi_iter.bi_size))
1881 goto end_io;
1882
1883 /*
1884 * If this device has partitions, remap block n
1885 * of partition p to block n+start(p) of the disk.
1886 */
1887 blk_partition_remap(bio);
1888
1889 if (bio_check_eod(bio, nr_sectors))
1890 goto end_io;
1891
1892 /*
1893 * Filter flush bio's early so that make_request based
1894 * drivers without flush support don't have to worry
1895 * about them.
1896 */
1897 if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) &&
1898 !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
1899 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
1900 if (!nr_sectors) {
1901 err = 0;
1902 goto end_io;
1903 }
1904 }
1905
1906 switch (bio_op(bio)) {
1907 case REQ_OP_DISCARD:
1908 if (!blk_queue_discard(q))
1909 goto not_supported;
1910 break;
1911 case REQ_OP_SECURE_ERASE:
1912 if (!blk_queue_secure_erase(q))
1913 goto not_supported;
1914 break;
1915 case REQ_OP_WRITE_SAME:
1916 if (!bdev_write_same(bio->bi_bdev))
1917 goto not_supported;
1918 break;
1919 case REQ_OP_ZONE_REPORT:
1920 case REQ_OP_ZONE_RESET:
1921 if (!bdev_is_zoned(bio->bi_bdev))
1922 goto not_supported;
1923 break;
1924 case REQ_OP_WRITE_ZEROES:
1925 if (!bdev_write_zeroes_sectors(bio->bi_bdev))
1926 goto not_supported;
1927 break;
1928 default:
1929 break;
1930 }
1931
1932 /*
1933 * Various block parts want %current->io_context and lazy ioc
1934 * allocation ends up trading a lot of pain for a small amount of
1935 * memory. Just allocate it upfront. This may fail and block
1936 * layer knows how to live with it.
1937 */
1938 create_io_context(GFP_ATOMIC, q->node);
1939
1940 if (!blkcg_bio_issue_check(q, bio))
1941 return false;
1942
1943 trace_block_bio_queue(q, bio);
1944 return true;
1945
1946not_supported:
1947 err = -EOPNOTSUPP;
1948end_io:
1949 bio->bi_error = err;
1950 bio_endio(bio);
1951 return false;
1952}
1953
1954/**
1955 * generic_make_request - hand a buffer to its device driver for I/O
1956 * @bio: The bio describing the location in memory and on the device.
1957 *
1958 * generic_make_request() is used to make I/O requests of block
1959 * devices. It is passed a &struct bio, which describes the I/O that needs
1960 * to be done.
1961 *
1962 * generic_make_request() does not return any status. The
1963 * success/failure status of the request, along with notification of
1964 * completion, is delivered asynchronously through the bio->bi_end_io
1965 * function described (one day) else where.
1966 *
1967 * The caller of generic_make_request must make sure that bi_io_vec
1968 * are set to describe the memory buffer, and that bi_dev and bi_sector are
1969 * set to describe the device address, and the
1970 * bi_end_io and optionally bi_private are set to describe how
1971 * completion notification should be signaled.
1972 *
1973 * generic_make_request and the drivers it calls may use bi_next if this
1974 * bio happens to be merged with someone else, and may resubmit the bio to
1975 * a lower device by calling into generic_make_request recursively, which
1976 * means the bio should NOT be touched after the call to ->make_request_fn.
1977 */
1978blk_qc_t generic_make_request(struct bio *bio)
1979{
1980 /*
1981 * bio_list_on_stack[0] contains bios submitted by the current
1982 * make_request_fn.
1983 * bio_list_on_stack[1] contains bios that were submitted before
1984 * the current make_request_fn, but that haven't been processed
1985 * yet.
1986 */
1987 struct bio_list bio_list_on_stack[2];
1988 blk_qc_t ret = BLK_QC_T_NONE;
1989
1990 if (!generic_make_request_checks(bio))
1991 goto out;
1992
1993 /*
1994 * We only want one ->make_request_fn to be active at a time, else
1995 * stack usage with stacked devices could be a problem. So use
1996 * current->bio_list to keep a list of requests submited by a
1997 * make_request_fn function. current->bio_list is also used as a
1998 * flag to say if generic_make_request is currently active in this
1999 * task or not. If it is NULL, then no make_request is active. If
2000 * it is non-NULL, then a make_request is active, and new requests
2001 * should be added at the tail
2002 */
2003 if (current->bio_list) {
2004 bio_list_add(¤t->bio_list[0], bio);
2005 goto out;
2006 }
2007
2008 /* following loop may be a bit non-obvious, and so deserves some
2009 * explanation.
2010 * Before entering the loop, bio->bi_next is NULL (as all callers
2011 * ensure that) so we have a list with a single bio.
2012 * We pretend that we have just taken it off a longer list, so
2013 * we assign bio_list to a pointer to the bio_list_on_stack,
2014 * thus initialising the bio_list of new bios to be
2015 * added. ->make_request() may indeed add some more bios
2016 * through a recursive call to generic_make_request. If it
2017 * did, we find a non-NULL value in bio_list and re-enter the loop
2018 * from the top. In this case we really did just take the bio
2019 * of the top of the list (no pretending) and so remove it from
2020 * bio_list, and call into ->make_request() again.
2021 */
2022 BUG_ON(bio->bi_next);
2023 bio_list_init(&bio_list_on_stack[0]);
2024 current->bio_list = bio_list_on_stack;
2025 do {
2026 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2027
2028 if (likely(blk_queue_enter(q, false) == 0)) {
2029 struct bio_list lower, same;
2030
2031 /* Create a fresh bio_list for all subordinate requests */
2032 bio_list_on_stack[1] = bio_list_on_stack[0];
2033 bio_list_init(&bio_list_on_stack[0]);
2034 ret = q->make_request_fn(q, bio);
2035
2036 blk_queue_exit(q);
2037
2038 /* sort new bios into those for a lower level
2039 * and those for the same level
2040 */
2041 bio_list_init(&lower);
2042 bio_list_init(&same);
2043 while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
2044 if (q == bdev_get_queue(bio->bi_bdev))
2045 bio_list_add(&same, bio);
2046 else
2047 bio_list_add(&lower, bio);
2048 /* now assemble so we handle the lowest level first */
2049 bio_list_merge(&bio_list_on_stack[0], &lower);
2050 bio_list_merge(&bio_list_on_stack[0], &same);
2051 bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
2052 } else {
2053 bio_io_error(bio);
2054 }
2055 bio = bio_list_pop(&bio_list_on_stack[0]);
2056 } while (bio);
2057 current->bio_list = NULL; /* deactivate */
2058
2059out:
2060 return ret;
2061}
2062EXPORT_SYMBOL(generic_make_request);
2063
2064/**
2065 * submit_bio - submit a bio to the block device layer for I/O
2066 * @bio: The &struct bio which describes the I/O
2067 *
2068 * submit_bio() is very similar in purpose to generic_make_request(), and
2069 * uses that function to do most of the work. Both are fairly rough
2070 * interfaces; @bio must be presetup and ready for I/O.
2071 *
2072 */
2073blk_qc_t submit_bio(struct bio *bio)
2074{
2075 /*
2076 * If it's a regular read/write or a barrier with data attached,
2077 * go through the normal accounting stuff before submission.
2078 */
2079 if (bio_has_data(bio)) {
2080 unsigned int count;
2081
2082 if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
2083 count = bdev_logical_block_size(bio->bi_bdev) >> 9;
2084 else
2085 count = bio_sectors(bio);
2086
2087 if (op_is_write(bio_op(bio))) {
2088 count_vm_events(PGPGOUT, count);
2089 } else {
2090 task_io_account_read(bio->bi_iter.bi_size);
2091 count_vm_events(PGPGIN, count);
2092 }
2093
2094 if (unlikely(block_dump)) {
2095 char b[BDEVNAME_SIZE];
2096 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
2097 current->comm, task_pid_nr(current),
2098 op_is_write(bio_op(bio)) ? "WRITE" : "READ",
2099 (unsigned long long)bio->bi_iter.bi_sector,
2100 bdevname(bio->bi_bdev, b),
2101 count);
2102 }
2103 }
2104
2105 return generic_make_request(bio);
2106}
2107EXPORT_SYMBOL(submit_bio);
2108
2109/**
2110 * blk_cloned_rq_check_limits - Helper function to check a cloned request
2111 * for new the queue limits
2112 * @q: the queue
2113 * @rq: the request being checked
2114 *
2115 * Description:
2116 * @rq may have been made based on weaker limitations of upper-level queues
2117 * in request stacking drivers, and it may violate the limitation of @q.
2118 * Since the block layer and the underlying device driver trust @rq
2119 * after it is inserted to @q, it should be checked against @q before
2120 * the insertion using this generic function.
2121 *
2122 * Request stacking drivers like request-based dm may change the queue
2123 * limits when retrying requests on other queues. Those requests need
2124 * to be checked against the new queue limits again during dispatch.
2125 */
2126static int blk_cloned_rq_check_limits(struct request_queue *q,
2127 struct request *rq)
2128{
2129 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) {
2130 printk(KERN_ERR "%s: over max size limit.\n", __func__);
2131 return -EIO;
2132 }
2133
2134 /*
2135 * queue's settings related to segment counting like q->bounce_pfn
2136 * may differ from that of other stacking queues.
2137 * Recalculate it to check the request correctly on this queue's
2138 * limitation.
2139 */
2140 blk_recalc_rq_segments(rq);
2141 if (rq->nr_phys_segments > queue_max_segments(q)) {
2142 printk(KERN_ERR "%s: over max segments limit.\n", __func__);
2143 return -EIO;
2144 }
2145
2146 return 0;
2147}
2148
2149/**
2150 * blk_insert_cloned_request - Helper for stacking drivers to submit a request
2151 * @q: the queue to submit the request
2152 * @rq: the request being queued
2153 */
2154int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2155{
2156 unsigned long flags;
2157 int where = ELEVATOR_INSERT_BACK;
2158
2159 if (blk_cloned_rq_check_limits(q, rq))
2160 return -EIO;
2161
2162 if (rq->rq_disk &&
2163 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2164 return -EIO;
2165
2166 if (q->mq_ops) {
2167 if (blk_queue_io_stat(q))
2168 blk_account_io_start(rq, true);
2169 blk_mq_insert_request(rq, false, true, false);
2170 return 0;
2171 }
2172
2173 spin_lock_irqsave(q->queue_lock, flags);
2174 if (unlikely(blk_queue_dying(q))) {
2175 spin_unlock_irqrestore(q->queue_lock, flags);
2176 return -ENODEV;
2177 }
2178
2179 /*
2180 * Submitting request must be dequeued before calling this function
2181 * because it will be linked to another request_queue
2182 */
2183 BUG_ON(blk_queued_rq(rq));
2184
2185 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
2186 where = ELEVATOR_INSERT_FLUSH;
2187
2188 add_acct_request(q, rq, where);
2189 if (where == ELEVATOR_INSERT_FLUSH)
2190 __blk_run_queue(q);
2191 spin_unlock_irqrestore(q->queue_lock, flags);
2192
2193 return 0;
2194}
2195EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
2196
2197/**
2198 * blk_rq_err_bytes - determine number of bytes till the next failure boundary
2199 * @rq: request to examine
2200 *
2201 * Description:
2202 * A request could be merge of IOs which require different failure
2203 * handling. This function determines the number of bytes which
2204 * can be failed from the beginning of the request without
2205 * crossing into area which need to be retried further.
2206 *
2207 * Return:
2208 * The number of bytes to fail.
2209 *
2210 * Context:
2211 * queue_lock must be held.
2212 */
2213unsigned int blk_rq_err_bytes(const struct request *rq)
2214{
2215 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
2216 unsigned int bytes = 0;
2217 struct bio *bio;
2218
2219 if (!(rq->rq_flags & RQF_MIXED_MERGE))
2220 return blk_rq_bytes(rq);
2221
2222 /*
2223 * Currently the only 'mixing' which can happen is between
2224 * different fastfail types. We can safely fail portions
2225 * which have all the failfast bits that the first one has -
2226 * the ones which are at least as eager to fail as the first
2227 * one.
2228 */
2229 for (bio = rq->bio; bio; bio = bio->bi_next) {
2230 if ((bio->bi_opf & ff) != ff)
2231 break;
2232 bytes += bio->bi_iter.bi_size;
2233 }
2234
2235 /* this could lead to infinite loop */
2236 BUG_ON(blk_rq_bytes(rq) && !bytes);
2237 return bytes;
2238}
2239EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2240
2241void blk_account_io_completion(struct request *req, unsigned int bytes)
2242{
2243 if (blk_do_io_stat(req)) {
2244 const int rw = rq_data_dir(req);
2245 struct hd_struct *part;
2246 int cpu;
2247
2248 cpu = part_stat_lock();
2249 part = req->part;
2250 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2251 part_stat_unlock();
2252 }
2253}
2254
2255void blk_account_io_done(struct request *req)
2256{
2257 /*
2258 * Account IO completion. flush_rq isn't accounted as a
2259 * normal IO on queueing nor completion. Accounting the
2260 * containing request is enough.
2261 */
2262 if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
2263 unsigned long duration = jiffies - req->start_time;
2264 const int rw = rq_data_dir(req);
2265 struct hd_struct *part;
2266 int cpu;
2267
2268 cpu = part_stat_lock();
2269 part = req->part;
2270
2271 part_stat_inc(cpu, part, ios[rw]);
2272 part_stat_add(cpu, part, ticks[rw], duration);
2273 part_round_stats(cpu, part);
2274 part_dec_in_flight(part, rw);
2275
2276 hd_struct_put(part);
2277 part_stat_unlock();
2278 }
2279}
2280
2281#ifdef CONFIG_PM
2282/*
2283 * Don't process normal requests when queue is suspended
2284 * or in the process of suspending/resuming
2285 */
2286static struct request *blk_pm_peek_request(struct request_queue *q,
2287 struct request *rq)
2288{
2289 if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
2290 (q->rpm_status != RPM_ACTIVE && !(rq->rq_flags & RQF_PM))))
2291 return NULL;
2292 else
2293 return rq;
2294}
2295#else
2296static inline struct request *blk_pm_peek_request(struct request_queue *q,
2297 struct request *rq)
2298{
2299 return rq;
2300}
2301#endif
2302
2303void blk_account_io_start(struct request *rq, bool new_io)
2304{
2305 struct hd_struct *part;
2306 int rw = rq_data_dir(rq);
2307 int cpu;
2308
2309 if (!blk_do_io_stat(rq))
2310 return;
2311
2312 cpu = part_stat_lock();
2313
2314 if (!new_io) {
2315 part = rq->part;
2316 part_stat_inc(cpu, part, merges[rw]);
2317 } else {
2318 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2319 if (!hd_struct_try_get(part)) {
2320 /*
2321 * The partition is already being removed,
2322 * the request will be accounted on the disk only
2323 *
2324 * We take a reference on disk->part0 although that
2325 * partition will never be deleted, so we can treat
2326 * it as any other partition.
2327 */
2328 part = &rq->rq_disk->part0;
2329 hd_struct_get(part);
2330 }
2331 part_round_stats(cpu, part);
2332 part_inc_in_flight(part, rw);
2333 rq->part = part;
2334 }
2335
2336 part_stat_unlock();
2337}
2338
2339/**
2340 * blk_peek_request - peek at the top of a request queue
2341 * @q: request queue to peek at
2342 *
2343 * Description:
2344 * Return the request at the top of @q. The returned request
2345 * should be started using blk_start_request() before LLD starts
2346 * processing it.
2347 *
2348 * Return:
2349 * Pointer to the request at the top of @q if available. Null
2350 * otherwise.
2351 *
2352 * Context:
2353 * queue_lock must be held.
2354 */
2355struct request *blk_peek_request(struct request_queue *q)
2356{
2357 struct request *rq;
2358 int ret;
2359
2360 while ((rq = __elv_next_request(q)) != NULL) {
2361
2362 rq = blk_pm_peek_request(q, rq);
2363 if (!rq)
2364 break;
2365
2366 if (!(rq->rq_flags & RQF_STARTED)) {
2367 /*
2368 * This is the first time the device driver
2369 * sees this request (possibly after
2370 * requeueing). Notify IO scheduler.
2371 */
2372 if (rq->rq_flags & RQF_SORTED)
2373 elv_activate_rq(q, rq);
2374
2375 /*
2376 * just mark as started even if we don't start
2377 * it, a request that has been delayed should
2378 * not be passed by new incoming requests
2379 */
2380 rq->rq_flags |= RQF_STARTED;
2381 trace_block_rq_issue(q, rq);
2382 }
2383
2384 if (!q->boundary_rq || q->boundary_rq == rq) {
2385 q->end_sector = rq_end_sector(rq);
2386 q->boundary_rq = NULL;
2387 }
2388
2389 if (rq->rq_flags & RQF_DONTPREP)
2390 break;
2391
2392 if (q->dma_drain_size && blk_rq_bytes(rq)) {
2393 /*
2394 * make sure space for the drain appears we
2395 * know we can do this because max_hw_segments
2396 * has been adjusted to be one fewer than the
2397 * device can handle
2398 */
2399 rq->nr_phys_segments++;
2400 }
2401
2402 if (!q->prep_rq_fn)
2403 break;
2404
2405 ret = q->prep_rq_fn(q, rq);
2406 if (ret == BLKPREP_OK) {
2407 break;
2408 } else if (ret == BLKPREP_DEFER) {
2409 /*
2410 * the request may have been (partially) prepped.
2411 * we need to keep this request in the front to
2412 * avoid resource deadlock. RQF_STARTED will
2413 * prevent other fs requests from passing this one.
2414 */
2415 if (q->dma_drain_size && blk_rq_bytes(rq) &&
2416 !(rq->rq_flags & RQF_DONTPREP)) {
2417 /*
2418 * remove the space for the drain we added
2419 * so that we don't add it again
2420 */
2421 --rq->nr_phys_segments;
2422 }
2423
2424 rq = NULL;
2425 break;
2426 } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2427 int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2428
2429 rq->rq_flags |= RQF_QUIET;
2430 /*
2431 * Mark this request as started so we don't trigger
2432 * any debug logic in the end I/O path.
2433 */
2434 blk_start_request(rq);
2435 __blk_end_request_all(rq, err);
2436 } else {
2437 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
2438 break;
2439 }
2440 }
2441
2442 return rq;
2443}
2444EXPORT_SYMBOL(blk_peek_request);
2445
2446void blk_dequeue_request(struct request *rq)
2447{
2448 struct request_queue *q = rq->q;
2449
2450 BUG_ON(list_empty(&rq->queuelist));
2451 BUG_ON(ELV_ON_HASH(rq));
2452
2453 list_del_init(&rq->queuelist);
2454
2455 /*
2456 * the time frame between a request being removed from the lists
2457 * and to it is freed is accounted as io that is in progress at
2458 * the driver side.
2459 */
2460 if (blk_account_rq(rq)) {
2461 q->in_flight[rq_is_sync(rq)]++;
2462 set_io_start_time_ns(rq);
2463 }
2464}
2465
2466/**
2467 * blk_start_request - start request processing on the driver
2468 * @req: request to dequeue
2469 *
2470 * Description:
2471 * Dequeue @req and start timeout timer on it. This hands off the
2472 * request to the driver.
2473 *
2474 * Block internal functions which don't want to start timer should
2475 * call blk_dequeue_request().
2476 *
2477 * Context:
2478 * queue_lock must be held.
2479 */
2480void blk_start_request(struct request *req)
2481{
2482 blk_dequeue_request(req);
2483
2484 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2485 blk_stat_set_issue_time(&req->issue_stat);
2486 req->rq_flags |= RQF_STATS;
2487 wbt_issue(req->q->rq_wb, &req->issue_stat);
2488 }
2489
2490 /*
2491 * We are now handing the request to the hardware, initialize
2492 * resid_len to full count and add the timeout handler.
2493 */
2494 req->resid_len = blk_rq_bytes(req);
2495 if (unlikely(blk_bidi_rq(req)))
2496 req->next_rq->resid_len = blk_rq_bytes(req->next_rq);
2497
2498 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
2499 blk_add_timer(req);
2500}
2501EXPORT_SYMBOL(blk_start_request);
2502
2503/**
2504 * blk_fetch_request - fetch a request from a request queue
2505 * @q: request queue to fetch a request from
2506 *
2507 * Description:
2508 * Return the request at the top of @q. The request is started on
2509 * return and LLD can start processing it immediately.
2510 *
2511 * Return:
2512 * Pointer to the request at the top of @q if available. Null
2513 * otherwise.
2514 *
2515 * Context:
2516 * queue_lock must be held.
2517 */
2518struct request *blk_fetch_request(struct request_queue *q)
2519{
2520 struct request *rq;
2521
2522 rq = blk_peek_request(q);
2523 if (rq)
2524 blk_start_request(rq);
2525 return rq;
2526}
2527EXPORT_SYMBOL(blk_fetch_request);
2528
2529/**
2530 * blk_update_request - Special helper function for request stacking drivers
2531 * @req: the request being processed
2532 * @error: %0 for success, < %0 for error
2533 * @nr_bytes: number of bytes to complete @req
2534 *
2535 * Description:
2536 * Ends I/O on a number of bytes attached to @req, but doesn't complete
2537 * the request structure even if @req doesn't have leftover.
2538 * If @req has leftover, sets it up for the next range of segments.
2539 *
2540 * This special helper function is only for request stacking drivers
2541 * (e.g. request-based dm) so that they can handle partial completion.
2542 * Actual device drivers should use blk_end_request instead.
2543 *
2544 * Passing the result of blk_rq_bytes() as @nr_bytes guarantees
2545 * %false return from this function.
2546 *
2547 * Return:
2548 * %false - this request doesn't have any more data
2549 * %true - this request has more data
2550 **/
2551bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2552{
2553 int total_bytes;
2554
2555 trace_block_rq_complete(req->q, req, nr_bytes);
2556
2557 if (!req->bio)
2558 return false;
2559
2560 /*
2561 * For fs requests, rq is just carrier of independent bio's
2562 * and each partial completion should be handled separately.
2563 * Reset per-request error on each partial completion.
2564 *
2565 * TODO: tj: This is too subtle. It would be better to let
2566 * low level drivers do what they see fit.
2567 */
2568 if (req->cmd_type == REQ_TYPE_FS)
2569 req->errors = 0;
2570
2571 if (error && req->cmd_type == REQ_TYPE_FS &&
2572 !(req->rq_flags & RQF_QUIET)) {
2573 char *error_type;
2574
2575 switch (error) {
2576 case -ENOLINK:
2577 error_type = "recoverable transport";
2578 break;
2579 case -EREMOTEIO:
2580 error_type = "critical target";
2581 break;
2582 case -EBADE:
2583 error_type = "critical nexus";
2584 break;
2585 case -ETIMEDOUT:
2586 error_type = "timeout";
2587 break;
2588 case -ENOSPC:
2589 error_type = "critical space allocation";
2590 break;
2591 case -ENODATA:
2592 error_type = "critical medium";
2593 break;
2594 case -EIO:
2595 default:
2596 error_type = "I/O";
2597 break;
2598 }
2599 printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
2600 __func__, error_type, req->rq_disk ?
2601 req->rq_disk->disk_name : "?",
2602 (unsigned long long)blk_rq_pos(req));
2603
2604 }
2605
2606 blk_account_io_completion(req, nr_bytes);
2607
2608 total_bytes = 0;
2609 while (req->bio) {
2610 struct bio *bio = req->bio;
2611 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2612
2613 if (bio_bytes == bio->bi_iter.bi_size)
2614 req->bio = bio->bi_next;
2615
2616 req_bio_endio(req, bio, bio_bytes, error);
2617
2618 total_bytes += bio_bytes;
2619 nr_bytes -= bio_bytes;
2620
2621 if (!nr_bytes)
2622 break;
2623 }
2624
2625 /*
2626 * completely done
2627 */
2628 if (!req->bio) {
2629 /*
2630 * Reset counters so that the request stacking driver
2631 * can find how many bytes remain in the request
2632 * later.
2633 */
2634 req->__data_len = 0;
2635 return false;
2636 }
2637
2638 WARN_ON_ONCE(req->rq_flags & RQF_SPECIAL_PAYLOAD);
2639
2640 req->__data_len -= total_bytes;
2641
2642 /* update sector only for requests with clear definition of sector */
2643 if (req->cmd_type == REQ_TYPE_FS)
2644 req->__sector += total_bytes >> 9;
2645
2646 /* mixed attributes always follow the first bio */
2647 if (req->rq_flags & RQF_MIXED_MERGE) {
2648 req->cmd_flags &= ~REQ_FAILFAST_MASK;
2649 req->cmd_flags |= req->bio->bi_opf & REQ_FAILFAST_MASK;
2650 }
2651
2652 /*
2653 * If total number of sectors is less than the first segment
2654 * size, something has gone terribly wrong.
2655 */
2656 if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
2657 blk_dump_rq_flags(req, "request botched");
2658 req->__data_len = blk_rq_cur_bytes(req);
2659 }
2660
2661 /* recalculate the number of segments */
2662 blk_recalc_rq_segments(req);
2663
2664 return true;
2665}
2666EXPORT_SYMBOL_GPL(blk_update_request);
2667
2668static bool blk_update_bidi_request(struct request *rq, int error,
2669 unsigned int nr_bytes,
2670 unsigned int bidi_bytes)
2671{
2672 if (blk_update_request(rq, error, nr_bytes))
2673 return true;
2674
2675 /* Bidi request must be completed as a whole */
2676 if (unlikely(blk_bidi_rq(rq)) &&
2677 blk_update_request(rq->next_rq, error, bidi_bytes))
2678 return true;
2679
2680 if (blk_queue_add_random(rq->q))
2681 add_disk_randomness(rq->rq_disk);
2682
2683 return false;
2684}
2685
2686/**
2687 * blk_unprep_request - unprepare a request
2688 * @req: the request
2689 *
2690 * This function makes a request ready for complete resubmission (or
2691 * completion). It happens only after all error handling is complete,
2692 * so represents the appropriate moment to deallocate any resources
2693 * that were allocated to the request in the prep_rq_fn. The queue
2694 * lock is held when calling this.
2695 */
2696void blk_unprep_request(struct request *req)
2697{
2698 struct request_queue *q = req->q;
2699
2700 req->rq_flags &= ~RQF_DONTPREP;
2701 if (q->unprep_rq_fn)
2702 q->unprep_rq_fn(q, req);
2703}
2704EXPORT_SYMBOL_GPL(blk_unprep_request);
2705
2706/*
2707 * queue lock must be held
2708 */
2709void blk_finish_request(struct request *req, int error)
2710{
2711 struct request_queue *q = req->q;
2712
2713 if (req->rq_flags & RQF_STATS)
2714 blk_stat_add(&q->rq_stats[rq_data_dir(req)], req);
2715
2716 if (req->rq_flags & RQF_QUEUED)
2717 blk_queue_end_tag(q, req);
2718
2719 BUG_ON(blk_queued_rq(req));
2720
2721 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2722 laptop_io_completion(&req->q->backing_dev_info);
2723
2724 blk_delete_timer(req);
2725
2726 if (req->rq_flags & RQF_DONTPREP)
2727 blk_unprep_request(req);
2728
2729 blk_account_io_done(req);
2730
2731 if (req->end_io) {
2732 wbt_done(req->q->rq_wb, &req->issue_stat);
2733 req->end_io(req, error);
2734 } else {
2735 if (blk_bidi_rq(req))
2736 __blk_put_request(req->next_rq->q, req->next_rq);
2737
2738 __blk_put_request(q, req);
2739 }
2740}
2741EXPORT_SYMBOL(blk_finish_request);
2742
2743/**
2744 * blk_end_bidi_request - Complete a bidi request
2745 * @rq: the request to complete
2746 * @error: %0 for success, < %0 for error
2747 * @nr_bytes: number of bytes to complete @rq
2748 * @bidi_bytes: number of bytes to complete @rq->next_rq
2749 *
2750 * Description:
2751 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq.
2752 * Drivers that supports bidi can safely call this member for any
2753 * type of request, bidi or uni. In the later case @bidi_bytes is
2754 * just ignored.
2755 *
2756 * Return:
2757 * %false - we are done with this request
2758 * %true - still buffers pending for this request
2759 **/
2760static bool blk_end_bidi_request(struct request *rq, int error,
2761 unsigned int nr_bytes, unsigned int bidi_bytes)
2762{
2763 struct request_queue *q = rq->q;
2764 unsigned long flags;
2765
2766 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2767 return true;
2768
2769 spin_lock_irqsave(q->queue_lock, flags);
2770 blk_finish_request(rq, error);
2771 spin_unlock_irqrestore(q->queue_lock, flags);
2772
2773 return false;
2774}
2775
2776/**
2777 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2778 * @rq: the request to complete
2779 * @error: %0 for success, < %0 for error
2780 * @nr_bytes: number of bytes to complete @rq
2781 * @bidi_bytes: number of bytes to complete @rq->next_rq
2782 *
2783 * Description:
2784 * Identical to blk_end_bidi_request() except that queue lock is
2785 * assumed to be locked on entry and remains so on return.
2786 *
2787 * Return:
2788 * %false - we are done with this request
2789 * %true - still buffers pending for this request
2790 **/
2791bool __blk_end_bidi_request(struct request *rq, int error,
2792 unsigned int nr_bytes, unsigned int bidi_bytes)
2793{
2794 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
2795 return true;
2796
2797 blk_finish_request(rq, error);
2798
2799 return false;
2800}
2801
2802/**
2803 * blk_end_request - Helper function for drivers to complete the request.
2804 * @rq: the request being processed
2805 * @error: %0 for success, < %0 for error
2806 * @nr_bytes: number of bytes to complete
2807 *
2808 * Description:
2809 * Ends I/O on a number of bytes attached to @rq.
2810 * If @rq has leftover, sets it up for the next range of segments.
2811 *
2812 * Return:
2813 * %false - we are done with this request
2814 * %true - still buffers pending for this request
2815 **/
2816bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2817{
2818 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2819}
2820EXPORT_SYMBOL(blk_end_request);
2821
2822/**
2823 * blk_end_request_all - Helper function for drives to finish the request.
2824 * @rq: the request to finish
2825 * @error: %0 for success, < %0 for error
2826 *
2827 * Description:
2828 * Completely finish @rq.
2829 */
2830void blk_end_request_all(struct request *rq, int error)
2831{
2832 bool pending;
2833 unsigned int bidi_bytes = 0;
2834
2835 if (unlikely(blk_bidi_rq(rq)))
2836 bidi_bytes = blk_rq_bytes(rq->next_rq);
2837
2838 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2839 BUG_ON(pending);
2840}
2841EXPORT_SYMBOL(blk_end_request_all);
2842
2843/**
2844 * blk_end_request_cur - Helper function to finish the current request chunk.
2845 * @rq: the request to finish the current chunk for
2846 * @error: %0 for success, < %0 for error
2847 *
2848 * Description:
2849 * Complete the current consecutively mapped chunk from @rq.
2850 *
2851 * Return:
2852 * %false - we are done with this request
2853 * %true - still buffers pending for this request
2854 */
2855bool blk_end_request_cur(struct request *rq, int error)
2856{
2857 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2858}
2859EXPORT_SYMBOL(blk_end_request_cur);
2860
2861/**
2862 * blk_end_request_err - Finish a request till the next failure boundary.
2863 * @rq: the request to finish till the next failure boundary for
2864 * @error: must be negative errno
2865 *
2866 * Description:
2867 * Complete @rq till the next failure boundary.
2868 *
2869 * Return:
2870 * %false - we are done with this request
2871 * %true - still buffers pending for this request
2872 */
2873bool blk_end_request_err(struct request *rq, int error)
2874{
2875 WARN_ON(error >= 0);
2876 return blk_end_request(rq, error, blk_rq_err_bytes(rq));
2877}
2878EXPORT_SYMBOL_GPL(blk_end_request_err);
2879
2880/**
2881 * __blk_end_request - Helper function for drivers to complete the request.
2882 * @rq: the request being processed
2883 * @error: %0 for success, < %0 for error
2884 * @nr_bytes: number of bytes to complete
2885 *
2886 * Description:
2887 * Must be called with queue lock held unlike blk_end_request().
2888 *
2889 * Return:
2890 * %false - we are done with this request
2891 * %true - still buffers pending for this request
2892 **/
2893bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2894{
2895 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2896}
2897EXPORT_SYMBOL(__blk_end_request);
2898
2899/**
2900 * __blk_end_request_all - Helper function for drives to finish the request.
2901 * @rq: the request to finish
2902 * @error: %0 for success, < %0 for error
2903 *
2904 * Description:
2905 * Completely finish @rq. Must be called with queue lock held.
2906 */
2907void __blk_end_request_all(struct request *rq, int error)
2908{
2909 bool pending;
2910 unsigned int bidi_bytes = 0;
2911
2912 if (unlikely(blk_bidi_rq(rq)))
2913 bidi_bytes = blk_rq_bytes(rq->next_rq);
2914
2915 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2916 BUG_ON(pending);
2917}
2918EXPORT_SYMBOL(__blk_end_request_all);
2919
2920/**
2921 * __blk_end_request_cur - Helper function to finish the current request chunk.
2922 * @rq: the request to finish the current chunk for
2923 * @error: %0 for success, < %0 for error
2924 *
2925 * Description:
2926 * Complete the current consecutively mapped chunk from @rq. Must
2927 * be called with queue lock held.
2928 *
2929 * Return:
2930 * %false - we are done with this request
2931 * %true - still buffers pending for this request
2932 */
2933bool __blk_end_request_cur(struct request *rq, int error)
2934{
2935 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2936}
2937EXPORT_SYMBOL(__blk_end_request_cur);
2938
2939/**
2940 * __blk_end_request_err - Finish a request till the next failure boundary.
2941 * @rq: the request to finish till the next failure boundary for
2942 * @error: must be negative errno
2943 *
2944 * Description:
2945 * Complete @rq till the next failure boundary. Must be called
2946 * with queue lock held.
2947 *
2948 * Return:
2949 * %false - we are done with this request
2950 * %true - still buffers pending for this request
2951 */
2952bool __blk_end_request_err(struct request *rq, int error)
2953{
2954 WARN_ON(error >= 0);
2955 return __blk_end_request(rq, error, blk_rq_err_bytes(rq));
2956}
2957EXPORT_SYMBOL_GPL(__blk_end_request_err);
2958
2959void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2960 struct bio *bio)
2961{
2962 if (bio_has_data(bio))
2963 rq->nr_phys_segments = bio_phys_segments(q, bio);
2964
2965 rq->__data_len = bio->bi_iter.bi_size;
2966 rq->bio = rq->biotail = bio;
2967
2968 if (bio->bi_bdev)
2969 rq->rq_disk = bio->bi_bdev->bd_disk;
2970}
2971
2972#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
2973/**
2974 * rq_flush_dcache_pages - Helper function to flush all pages in a request
2975 * @rq: the request to be flushed
2976 *
2977 * Description:
2978 * Flush all pages in @rq.
2979 */
2980void rq_flush_dcache_pages(struct request *rq)
2981{
2982 struct req_iterator iter;
2983 struct bio_vec bvec;
2984
2985 rq_for_each_segment(bvec, rq, iter)
2986 flush_dcache_page(bvec.bv_page);
2987}
2988EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2989#endif
2990
2991/**
2992 * blk_lld_busy - Check if underlying low-level drivers of a device are busy
2993 * @q : the queue of the device being checked
2994 *
2995 * Description:
2996 * Check if underlying low-level drivers of a device are busy.
2997 * If the drivers want to export their busy state, they must set own
2998 * exporting function using blk_queue_lld_busy() first.
2999 *
3000 * Basically, this function is used only by request stacking drivers
3001 * to stop dispatching requests to underlying devices when underlying
3002 * devices are busy. This behavior helps more I/O merging on the queue
3003 * of the request stacking driver and prevents I/O throughput regression
3004 * on burst I/O load.
3005 *
3006 * Return:
3007 * 0 - Not busy (The request stacking driver should dispatch request)
3008 * 1 - Busy (The request stacking driver should stop dispatching request)
3009 */
3010int blk_lld_busy(struct request_queue *q)
3011{
3012 if (q->lld_busy_fn)
3013 return q->lld_busy_fn(q);
3014
3015 return 0;
3016}
3017EXPORT_SYMBOL_GPL(blk_lld_busy);
3018
3019/**
3020 * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
3021 * @rq: the clone request to be cleaned up
3022 *
3023 * Description:
3024 * Free all bios in @rq for a cloned request.
3025 */
3026void blk_rq_unprep_clone(struct request *rq)
3027{
3028 struct bio *bio;
3029
3030 while ((bio = rq->bio) != NULL) {
3031 rq->bio = bio->bi_next;
3032
3033 bio_put(bio);
3034 }
3035}
3036EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
3037
3038/*
3039 * Copy attributes of the original request to the clone request.
3040 * The actual data parts (e.g. ->cmd, ->sense) are not copied.
3041 */
3042static void __blk_rq_prep_clone(struct request *dst, struct request *src)
3043{
3044 dst->cpu = src->cpu;
3045 dst->cmd_flags = src->cmd_flags | REQ_NOMERGE;
3046 dst->cmd_type = src->cmd_type;
3047 dst->__sector = blk_rq_pos(src);
3048 dst->__data_len = blk_rq_bytes(src);
3049 dst->nr_phys_segments = src->nr_phys_segments;
3050 dst->ioprio = src->ioprio;
3051 dst->extra_len = src->extra_len;
3052}
3053
3054/**
3055 * blk_rq_prep_clone - Helper function to setup clone request
3056 * @rq: the request to be setup
3057 * @rq_src: original request to be cloned
3058 * @bs: bio_set that bios for clone are allocated from
3059 * @gfp_mask: memory allocation mask for bio
3060 * @bio_ctr: setup function to be called for each clone bio.
3061 * Returns %0 for success, non %0 for failure.
3062 * @data: private data to be passed to @bio_ctr
3063 *
3064 * Description:
3065 * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
3066 * The actual data parts of @rq_src (e.g. ->cmd, ->sense)
3067 * are not copied, and copying such parts is the caller's responsibility.
3068 * Also, pages which the original bios are pointing to are not copied
3069 * and the cloned bios just point same pages.
3070 * So cloned bios must be completed before original bios, which means
3071 * the caller must complete @rq before @rq_src.
3072 */
3073int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
3074 struct bio_set *bs, gfp_t gfp_mask,
3075 int (*bio_ctr)(struct bio *, struct bio *, void *),
3076 void *data)
3077{
3078 struct bio *bio, *bio_src;
3079
3080 if (!bs)
3081 bs = fs_bio_set;
3082
3083 __rq_for_each_bio(bio_src, rq_src) {
3084 bio = bio_clone_fast(bio_src, gfp_mask, bs);
3085 if (!bio)
3086 goto free_and_out;
3087
3088 if (bio_ctr && bio_ctr(bio, bio_src, data))
3089 goto free_and_out;
3090
3091 if (rq->bio) {
3092 rq->biotail->bi_next = bio;
3093 rq->biotail = bio;
3094 } else
3095 rq->bio = rq->biotail = bio;
3096 }
3097
3098 __blk_rq_prep_clone(rq, rq_src);
3099
3100 return 0;
3101
3102free_and_out:
3103 if (bio)
3104 bio_put(bio);
3105 blk_rq_unprep_clone(rq);
3106
3107 return -ENOMEM;
3108}
3109EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
3110
3111int kblockd_schedule_work(struct work_struct *work)
3112{
3113 return queue_work(kblockd_workqueue, work);
3114}
3115EXPORT_SYMBOL(kblockd_schedule_work);
3116
3117int kblockd_schedule_work_on(int cpu, struct work_struct *work)
3118{
3119 return queue_work_on(cpu, kblockd_workqueue, work);
3120}
3121EXPORT_SYMBOL(kblockd_schedule_work_on);
3122
3123int kblockd_schedule_delayed_work(struct delayed_work *dwork,
3124 unsigned long delay)
3125{
3126 return queue_delayed_work(kblockd_workqueue, dwork, delay);
3127}
3128EXPORT_SYMBOL(kblockd_schedule_delayed_work);
3129
3130int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork,
3131 unsigned long delay)
3132{
3133 return queue_delayed_work_on(cpu, kblockd_workqueue, dwork, delay);
3134}
3135EXPORT_SYMBOL(kblockd_schedule_delayed_work_on);
3136
3137/**
3138 * blk_start_plug - initialize blk_plug and track it inside the task_struct
3139 * @plug: The &struct blk_plug that needs to be initialized
3140 *
3141 * Description:
3142 * Tracking blk_plug inside the task_struct will help with auto-flushing the
3143 * pending I/O should the task end up blocking between blk_start_plug() and
3144 * blk_finish_plug(). This is important from a performance perspective, but
3145 * also ensures that we don't deadlock. For instance, if the task is blocking
3146 * for a memory allocation, memory reclaim could end up wanting to free a
3147 * page belonging to that request that is currently residing in our private
3148 * plug. By flushing the pending I/O when the process goes to sleep, we avoid
3149 * this kind of deadlock.
3150 */
3151void blk_start_plug(struct blk_plug *plug)
3152{
3153 struct task_struct *tsk = current;
3154
3155 /*
3156 * If this is a nested plug, don't actually assign it.
3157 */
3158 if (tsk->plug)
3159 return;
3160
3161 INIT_LIST_HEAD(&plug->list);
3162 INIT_LIST_HEAD(&plug->mq_list);
3163 INIT_LIST_HEAD(&plug->cb_list);
3164 /*
3165 * Store ordering should not be needed here, since a potential
3166 * preempt will imply a full memory barrier
3167 */
3168 tsk->plug = plug;
3169}
3170EXPORT_SYMBOL(blk_start_plug);
3171
3172static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
3173{
3174 struct request *rqa = container_of(a, struct request, queuelist);
3175 struct request *rqb = container_of(b, struct request, queuelist);
3176
3177 return !(rqa->q < rqb->q ||
3178 (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
3179}
3180
3181/*
3182 * If 'from_schedule' is true, then postpone the dispatch of requests
3183 * until a safe kblockd context. We due this to avoid accidental big
3184 * additional stack usage in driver dispatch, in places where the originally
3185 * plugger did not intend it.
3186 */
3187static void queue_unplugged(struct request_queue *q, unsigned int depth,
3188 bool from_schedule)
3189 __releases(q->queue_lock)
3190{
3191 trace_block_unplug(q, depth, !from_schedule);
3192
3193 if (from_schedule)
3194 blk_run_queue_async(q);
3195 else
3196 __blk_run_queue(q);
3197 spin_unlock(q->queue_lock);
3198}
3199
3200static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
3201{
3202 LIST_HEAD(callbacks);
3203
3204 while (!list_empty(&plug->cb_list)) {
3205 list_splice_init(&plug->cb_list, &callbacks);
3206
3207 while (!list_empty(&callbacks)) {
3208 struct blk_plug_cb *cb = list_first_entry(&callbacks,
3209 struct blk_plug_cb,
3210 list);
3211 list_del(&cb->list);
3212 cb->callback(cb, from_schedule);
3213 }
3214 }
3215}
3216
3217struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
3218 int size)
3219{
3220 struct blk_plug *plug = current->plug;
3221 struct blk_plug_cb *cb;
3222
3223 if (!plug)
3224 return NULL;
3225
3226 list_for_each_entry(cb, &plug->cb_list, list)
3227 if (cb->callback == unplug && cb->data == data)
3228 return cb;
3229
3230 /* Not currently on the callback list */
3231 BUG_ON(size < sizeof(*cb));
3232 cb = kzalloc(size, GFP_ATOMIC);
3233 if (cb) {
3234 cb->data = data;
3235 cb->callback = unplug;
3236 list_add(&cb->list, &plug->cb_list);
3237 }
3238 return cb;
3239}
3240EXPORT_SYMBOL(blk_check_plugged);
3241
3242void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3243{
3244 struct request_queue *q;
3245 unsigned long flags;
3246 struct request *rq;
3247 LIST_HEAD(list);
3248 unsigned int depth;
3249
3250 flush_plug_callbacks(plug, from_schedule);
3251
3252 if (!list_empty(&plug->mq_list))
3253 blk_mq_flush_plug_list(plug, from_schedule);
3254
3255 if (list_empty(&plug->list))
3256 return;
3257
3258 list_splice_init(&plug->list, &list);
3259
3260 list_sort(NULL, &list, plug_rq_cmp);
3261
3262 q = NULL;
3263 depth = 0;
3264
3265 /*
3266 * Save and disable interrupts here, to avoid doing it for every
3267 * queue lock we have to take.
3268 */
3269 local_irq_save(flags);
3270 while (!list_empty(&list)) {
3271 rq = list_entry_rq(list.next);
3272 list_del_init(&rq->queuelist);
3273 BUG_ON(!rq->q);
3274 if (rq->q != q) {
3275 /*
3276 * This drops the queue lock
3277 */
3278 if (q)
3279 queue_unplugged(q, depth, from_schedule);
3280 q = rq->q;
3281 depth = 0;
3282 spin_lock(q->queue_lock);
3283 }
3284
3285 /*
3286 * Short-circuit if @q is dead
3287 */
3288 if (unlikely(blk_queue_dying(q))) {
3289 __blk_end_request_all(rq, -ENODEV);
3290 continue;
3291 }
3292
3293 /*
3294 * rq is already accounted, so use raw insert
3295 */
3296 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))
3297 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3298 else
3299 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
3300
3301 depth++;
3302 }
3303
3304 /*
3305 * This drops the queue lock
3306 */
3307 if (q)
3308 queue_unplugged(q, depth, from_schedule);
3309
3310 local_irq_restore(flags);
3311}
3312
3313void blk_finish_plug(struct blk_plug *plug)
3314{
3315 if (plug != current->plug)
3316 return;
3317 blk_flush_plug_list(plug, false);
3318
3319 current->plug = NULL;
3320}
3321EXPORT_SYMBOL(blk_finish_plug);
3322
3323#ifdef CONFIG_PM
3324/**
3325 * blk_pm_runtime_init - Block layer runtime PM initialization routine
3326 * @q: the queue of the device
3327 * @dev: the device the queue belongs to
3328 *
3329 * Description:
3330 * Initialize runtime-PM-related fields for @q and start auto suspend for
3331 * @dev. Drivers that want to take advantage of request-based runtime PM
3332 * should call this function after @dev has been initialized, and its
3333 * request queue @q has been allocated, and runtime PM for it can not happen
3334 * yet(either due to disabled/forbidden or its usage_count > 0). In most
3335 * cases, driver should call this function before any I/O has taken place.
3336 *
3337 * This function takes care of setting up using auto suspend for the device,
3338 * the autosuspend delay is set to -1 to make runtime suspend impossible
3339 * until an updated value is either set by user or by driver. Drivers do
3340 * not need to touch other autosuspend settings.
3341 *
3342 * The block layer runtime PM is request based, so only works for drivers
3343 * that use request as their IO unit instead of those directly use bio's.
3344 */
3345void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
3346{
3347 q->dev = dev;
3348 q->rpm_status = RPM_ACTIVE;
3349 pm_runtime_set_autosuspend_delay(q->dev, -1);
3350 pm_runtime_use_autosuspend(q->dev);
3351}
3352EXPORT_SYMBOL(blk_pm_runtime_init);
3353
3354/**
3355 * blk_pre_runtime_suspend - Pre runtime suspend check
3356 * @q: the queue of the device
3357 *
3358 * Description:
3359 * This function will check if runtime suspend is allowed for the device
3360 * by examining if there are any requests pending in the queue. If there
3361 * are requests pending, the device can not be runtime suspended; otherwise,
3362 * the queue's status will be updated to SUSPENDING and the driver can
3363 * proceed to suspend the device.
3364 *
3365 * For the not allowed case, we mark last busy for the device so that
3366 * runtime PM core will try to autosuspend it some time later.
3367 *
3368 * This function should be called near the start of the device's
3369 * runtime_suspend callback.
3370 *
3371 * Return:
3372 * 0 - OK to runtime suspend the device
3373 * -EBUSY - Device should not be runtime suspended
3374 */
3375int blk_pre_runtime_suspend(struct request_queue *q)
3376{
3377 int ret = 0;
3378
3379 if (!q->dev)
3380 return ret;
3381
3382 spin_lock_irq(q->queue_lock);
3383 if (q->nr_pending) {
3384 ret = -EBUSY;
3385 pm_runtime_mark_last_busy(q->dev);
3386 } else {
3387 q->rpm_status = RPM_SUSPENDING;
3388 }
3389 spin_unlock_irq(q->queue_lock);
3390 return ret;
3391}
3392EXPORT_SYMBOL(blk_pre_runtime_suspend);
3393
3394/**
3395 * blk_post_runtime_suspend - Post runtime suspend processing
3396 * @q: the queue of the device
3397 * @err: return value of the device's runtime_suspend function
3398 *
3399 * Description:
3400 * Update the queue's runtime status according to the return value of the
3401 * device's runtime suspend function and mark last busy for the device so
3402 * that PM core will try to auto suspend the device at a later time.
3403 *
3404 * This function should be called near the end of the device's
3405 * runtime_suspend callback.
3406 */
3407void blk_post_runtime_suspend(struct request_queue *q, int err)
3408{
3409 if (!q->dev)
3410 return;
3411
3412 spin_lock_irq(q->queue_lock);
3413 if (!err) {
3414 q->rpm_status = RPM_SUSPENDED;
3415 } else {
3416 q->rpm_status = RPM_ACTIVE;
3417 pm_runtime_mark_last_busy(q->dev);
3418 }
3419 spin_unlock_irq(q->queue_lock);
3420}
3421EXPORT_SYMBOL(blk_post_runtime_suspend);
3422
3423/**
3424 * blk_pre_runtime_resume - Pre runtime resume processing
3425 * @q: the queue of the device
3426 *
3427 * Description:
3428 * Update the queue's runtime status to RESUMING in preparation for the
3429 * runtime resume of the device.
3430 *
3431 * This function should be called near the start of the device's
3432 * runtime_resume callback.
3433 */
3434void blk_pre_runtime_resume(struct request_queue *q)
3435{
3436 if (!q->dev)
3437 return;
3438
3439 spin_lock_irq(q->queue_lock);
3440 q->rpm_status = RPM_RESUMING;
3441 spin_unlock_irq(q->queue_lock);
3442}
3443EXPORT_SYMBOL(blk_pre_runtime_resume);
3444
3445/**
3446 * blk_post_runtime_resume - Post runtime resume processing
3447 * @q: the queue of the device
3448 * @err: return value of the device's runtime_resume function
3449 *
3450 * Description:
3451 * Update the queue's runtime status according to the return value of the
3452 * device's runtime_resume function. If it is successfully resumed, process
3453 * the requests that are queued into the device's queue when it is resuming
3454 * and then mark last busy and initiate autosuspend for it.
3455 *
3456 * This function should be called near the end of the device's
3457 * runtime_resume callback.
3458 */
3459void blk_post_runtime_resume(struct request_queue *q, int err)
3460{
3461 if (!q->dev)
3462 return;
3463
3464 spin_lock_irq(q->queue_lock);
3465 if (!err) {
3466 q->rpm_status = RPM_ACTIVE;
3467 __blk_run_queue(q);
3468 pm_runtime_mark_last_busy(q->dev);
3469 pm_request_autosuspend(q->dev);
3470 } else {
3471 q->rpm_status = RPM_SUSPENDED;
3472 }
3473 spin_unlock_irq(q->queue_lock);
3474}
3475EXPORT_SYMBOL(blk_post_runtime_resume);
3476
3477/**
3478 * blk_set_runtime_active - Force runtime status of the queue to be active
3479 * @q: the queue of the device
3480 *
3481 * If the device is left runtime suspended during system suspend the resume
3482 * hook typically resumes the device and corrects runtime status
3483 * accordingly. However, that does not affect the queue runtime PM status
3484 * which is still "suspended". This prevents processing requests from the
3485 * queue.
3486 *
3487 * This function can be used in driver's resume hook to correct queue
3488 * runtime PM status and re-enable peeking requests from the queue. It
3489 * should be called before first request is added to the queue.
3490 */
3491void blk_set_runtime_active(struct request_queue *q)
3492{
3493 spin_lock_irq(q->queue_lock);
3494 q->rpm_status = RPM_ACTIVE;
3495 pm_runtime_mark_last_busy(q->dev);
3496 pm_request_autosuspend(q->dev);
3497 spin_unlock_irq(q->queue_lock);
3498}
3499EXPORT_SYMBOL(blk_set_runtime_active);
3500#endif
3501
3502int __init blk_dev_init(void)
3503{
3504 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
3505 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3506 FIELD_SIZEOF(struct request, cmd_flags));
3507 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
3508 FIELD_SIZEOF(struct bio, bi_opf));
3509
3510 /* used for unplugging and affects IO latency/throughput - HIGHPRI */
3511 kblockd_workqueue = alloc_workqueue("kblockd",
3512 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
3513 if (!kblockd_workqueue)
3514 panic("Failed to create kblockd\n");
3515
3516 request_cachep = kmem_cache_create("blkdev_requests",
3517 sizeof(struct request), 0, SLAB_PANIC, NULL);
3518
3519 blk_requestq_cachep = kmem_cache_create("request_queue",
3520 sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
3521
3522 return 0;
3523}