Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.2
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef BLK_MQ_SCHED_H
 3#define BLK_MQ_SCHED_H
 4
 5#include "elevator.h"
 6#include "blk-mq.h"
 7#include "blk-mq-tag.h"
 8
 9#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
 
10
 
 
 
11bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
12		unsigned int nr_segs, struct request **merged_request);
13bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
14		unsigned int nr_segs);
15bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
16				   struct list_head *free);
17void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
18void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
19
20void blk_mq_sched_insert_request(struct request *rq, bool at_head,
21				 bool run_queue, bool async);
22void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
23				  struct blk_mq_ctx *ctx,
24				  struct list_head *list, bool run_queue_async);
25
26void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
27
28int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
29void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
30void blk_mq_sched_free_rqs(struct request_queue *q);
31
32static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
 
 
33{
34	if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
35		__blk_mq_sched_restart(hctx);
36}
37
38static inline bool bio_mergeable(struct bio *bio)
39{
40	return !(bio->bi_opf & REQ_NOMERGE_FLAGS);
41}
42
43static inline bool
44blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
45			 struct bio *bio)
46{
47	if (rq->rq_flags & RQF_ELV) {
48		struct elevator_queue *e = q->elevator;
 
 
49
50		if (e->type->ops.allow_merge)
51			return e->type->ops.allow_merge(q, rq, bio);
52	}
53	return true;
54}
55
56static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
57{
58	if (rq->rq_flags & RQF_ELV) {
59		struct elevator_queue *e = rq->q->elevator;
60
61		if (e->type->ops.completed_request)
62			e->type->ops.completed_request(rq, now);
63	}
64}
65
66static inline void blk_mq_sched_requeue_request(struct request *rq)
67{
68	if (rq->rq_flags & RQF_ELV) {
69		struct request_queue *q = rq->q;
70		struct elevator_queue *e = q->elevator;
71
72		if ((rq->rq_flags & RQF_ELVPRIV) && e->type->ops.requeue_request)
73			e->type->ops.requeue_request(rq);
74	}
75}
76
77static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
78{
79	struct elevator_queue *e = hctx->queue->elevator;
80
81	if (e && e->type->ops.has_work)
82		return e->type->ops.has_work(hctx);
83
84	return false;
85}
86
87static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
88{
89	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
90}
91
92#endif
v5.9
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef BLK_MQ_SCHED_H
 3#define BLK_MQ_SCHED_H
 4
 
 5#include "blk-mq.h"
 6#include "blk-mq-tag.h"
 7
 8void blk_mq_sched_free_hctx_data(struct request_queue *q,
 9				 void (*exit)(struct blk_mq_hw_ctx *));
10
11void blk_mq_sched_assign_ioc(struct request *rq);
12
13void blk_mq_sched_request_inserted(struct request *rq);
14bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
15		unsigned int nr_segs, struct request **merged_request);
16bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
17		unsigned int nr_segs);
18bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
 
19void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx);
20void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
21
22void blk_mq_sched_insert_request(struct request *rq, bool at_head,
23				 bool run_queue, bool async);
24void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
25				  struct blk_mq_ctx *ctx,
26				  struct list_head *list, bool run_queue_async);
27
28void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
29
30int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
31void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
32void blk_mq_sched_free_requests(struct request_queue *q);
33
34static inline bool
35blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
36		unsigned int nr_segs)
37{
38	if (blk_queue_nomerges(q) || !bio_mergeable(bio))
39		return false;
 
40
41	return __blk_mq_sched_bio_merge(q, bio, nr_segs);
 
 
42}
43
44static inline bool
45blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
46			 struct bio *bio)
47{
48	struct elevator_queue *e = q->elevator;
49
50	if (e && e->type->ops.allow_merge)
51		return e->type->ops.allow_merge(q, rq, bio);
52
 
 
 
53	return true;
54}
55
56static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
57{
58	struct elevator_queue *e = rq->q->elevator;
 
59
60	if (e && e->type->ops.completed_request)
61		e->type->ops.completed_request(rq, now);
 
62}
63
64static inline void blk_mq_sched_requeue_request(struct request *rq)
65{
66	struct request_queue *q = rq->q;
67	struct elevator_queue *e = q->elevator;
68
69	if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
70		e->type->ops.requeue_request(rq);
 
 
71}
72
73static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
74{
75	struct elevator_queue *e = hctx->queue->elevator;
76
77	if (e && e->type->ops.has_work)
78		return e->type->ops.has_work(hctx);
79
80	return false;
81}
82
83static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
84{
85	return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
86}
87
88#endif