Loading...
1#ifndef BLK_INTERNAL_H
2#define BLK_INTERNAL_H
3
4#include <linux/idr.h>
5#include <linux/blk-mq.h>
6#include "blk-mq.h"
7
8/* Amount of time in which a process may batch requests */
9#define BLK_BATCH_TIME (HZ/50UL)
10
11/* Number of requests a "batching" process may submit */
12#define BLK_BATCH_REQ 32
13
14/* Max future timer expiry for timeouts */
15#define BLK_MAX_TIMEOUT (5 * HZ)
16
17struct blk_flush_queue {
18 unsigned int flush_queue_delayed:1;
19 unsigned int flush_pending_idx:1;
20 unsigned int flush_running_idx:1;
21 unsigned long flush_pending_since;
22 struct list_head flush_queue[2];
23 struct list_head flush_data_in_flight;
24 struct request *flush_rq;
25
26 /*
27 * flush_rq shares tag with this rq, both can't be active
28 * at the same time
29 */
30 struct request *orig_rq;
31 spinlock_t mq_flush_lock;
32};
33
34extern struct kmem_cache *blk_requestq_cachep;
35extern struct kmem_cache *request_cachep;
36extern struct kobj_type blk_queue_ktype;
37extern struct ida blk_queue_ida;
38
39static inline struct blk_flush_queue *blk_get_flush_queue(
40 struct request_queue *q, struct blk_mq_ctx *ctx)
41{
42 struct blk_mq_hw_ctx *hctx;
43
44 if (!q->mq_ops)
45 return q->fq;
46
47 hctx = q->mq_ops->map_queue(q, ctx->cpu);
48
49 return hctx->fq;
50}
51
52static inline void __blk_get_queue(struct request_queue *q)
53{
54 kobject_get(&q->kobj);
55}
56
57struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
58 int node, int cmd_size);
59void blk_free_flush_queue(struct blk_flush_queue *q);
60
61int blk_init_rl(struct request_list *rl, struct request_queue *q,
62 gfp_t gfp_mask);
63void blk_exit_rl(struct request_list *rl);
64void init_request_from_bio(struct request *req, struct bio *bio);
65void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
66 struct bio *bio);
67int blk_rq_append_bio(struct request_queue *q, struct request *rq,
68 struct bio *bio);
69void blk_queue_bypass_start(struct request_queue *q);
70void blk_queue_bypass_end(struct request_queue *q);
71void blk_dequeue_request(struct request *rq);
72void __blk_queue_free_tags(struct request_queue *q);
73bool __blk_end_bidi_request(struct request *rq, int error,
74 unsigned int nr_bytes, unsigned int bidi_bytes);
75void blk_freeze_queue(struct request_queue *q);
76
77static inline void blk_queue_enter_live(struct request_queue *q)
78{
79 /*
80 * Given that running in generic_make_request() context
81 * guarantees that a live reference against q_usage_counter has
82 * been established, further references under that same context
83 * need not check that the queue has been frozen (marked dead).
84 */
85 percpu_ref_get(&q->q_usage_counter);
86}
87
88#ifdef CONFIG_BLK_DEV_INTEGRITY
89void blk_flush_integrity(void);
90#else
91static inline void blk_flush_integrity(void)
92{
93}
94#endif
95
96void blk_timeout_work(struct work_struct *work);
97unsigned long blk_rq_timeout(unsigned long timeout);
98void blk_add_timer(struct request *req);
99void blk_delete_timer(struct request *);
100
101
102bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
103 struct bio *bio);
104bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
105 struct bio *bio);
106bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
107 unsigned int *request_count,
108 struct request **same_queue_rq);
109unsigned int blk_plug_queued_count(struct request_queue *q);
110
111void blk_account_io_start(struct request *req, bool new_io);
112void blk_account_io_completion(struct request *req, unsigned int bytes);
113void blk_account_io_done(struct request *req);
114
115/*
116 * Internal atomic flags for request handling
117 */
118enum rq_atomic_flags {
119 REQ_ATOM_COMPLETE = 0,
120 REQ_ATOM_STARTED,
121};
122
123/*
124 * EH timer and IO completion will both attempt to 'grab' the request, make
125 * sure that only one of them succeeds
126 */
127static inline int blk_mark_rq_complete(struct request *rq)
128{
129 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
130}
131
132static inline void blk_clear_rq_complete(struct request *rq)
133{
134 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
135}
136
137/*
138 * Internal elevator interface
139 */
140#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
141
142void blk_insert_flush(struct request *rq);
143
144static inline struct request *__elv_next_request(struct request_queue *q)
145{
146 struct request *rq;
147 struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
148
149 while (1) {
150 if (!list_empty(&q->queue_head)) {
151 rq = list_entry_rq(q->queue_head.next);
152 return rq;
153 }
154
155 /*
156 * Flush request is running and flush request isn't queueable
157 * in the drive, we can hold the queue till flush request is
158 * finished. Even we don't do this, driver can't dispatch next
159 * requests and will requeue them. And this can improve
160 * throughput too. For example, we have request flush1, write1,
161 * flush 2. flush1 is dispatched, then queue is hold, write1
162 * isn't inserted to queue. After flush1 is finished, flush2
163 * will be dispatched. Since disk cache is already clean,
164 * flush2 will be finished very soon, so looks like flush2 is
165 * folded to flush1.
166 * Since the queue is hold, a flag is set to indicate the queue
167 * should be restarted later. Please see flush_end_io() for
168 * details.
169 */
170 if (fq->flush_pending_idx != fq->flush_running_idx &&
171 !queue_flush_queueable(q)) {
172 fq->flush_queue_delayed = 1;
173 return NULL;
174 }
175 if (unlikely(blk_queue_bypass(q)) ||
176 !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
177 return NULL;
178 }
179}
180
181static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
182{
183 struct elevator_queue *e = q->elevator;
184
185 if (e->type->ops.elevator_activate_req_fn)
186 e->type->ops.elevator_activate_req_fn(q, rq);
187}
188
189static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
190{
191 struct elevator_queue *e = q->elevator;
192
193 if (e->type->ops.elevator_deactivate_req_fn)
194 e->type->ops.elevator_deactivate_req_fn(q, rq);
195}
196
197#ifdef CONFIG_FAIL_IO_TIMEOUT
198int blk_should_fake_timeout(struct request_queue *);
199ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
200ssize_t part_timeout_store(struct device *, struct device_attribute *,
201 const char *, size_t);
202#else
203static inline int blk_should_fake_timeout(struct request_queue *q)
204{
205 return 0;
206}
207#endif
208
209int ll_back_merge_fn(struct request_queue *q, struct request *req,
210 struct bio *bio);
211int ll_front_merge_fn(struct request_queue *q, struct request *req,
212 struct bio *bio);
213int attempt_back_merge(struct request_queue *q, struct request *rq);
214int attempt_front_merge(struct request_queue *q, struct request *rq);
215int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
216 struct request *next);
217void blk_recalc_rq_segments(struct request *rq);
218void blk_rq_set_mixed_merge(struct request *rq);
219bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
220int blk_try_merge(struct request *rq, struct bio *bio);
221
222void blk_queue_congestion_threshold(struct request_queue *q);
223
224int blk_dev_init(void);
225
226
227/*
228 * Return the threshold (number of used requests) at which the queue is
229 * considered to be congested. It include a little hysteresis to keep the
230 * context switch rate down.
231 */
232static inline int queue_congestion_on_threshold(struct request_queue *q)
233{
234 return q->nr_congestion_on;
235}
236
237/*
238 * The threshold at which a queue is considered to be uncongested
239 */
240static inline int queue_congestion_off_threshold(struct request_queue *q)
241{
242 return q->nr_congestion_off;
243}
244
245extern int blk_update_nr_requests(struct request_queue *, unsigned int);
246
247/*
248 * Contribute to IO statistics IFF:
249 *
250 * a) it's attached to a gendisk, and
251 * b) the queue had IO stats enabled when this request was started, and
252 * c) it's a file system request
253 */
254static inline int blk_do_io_stat(struct request *rq)
255{
256 return rq->rq_disk &&
257 (rq->cmd_flags & REQ_IO_STAT) &&
258 (rq->cmd_type == REQ_TYPE_FS);
259}
260
261/*
262 * Internal io_context interface
263 */
264void get_io_context(struct io_context *ioc);
265struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
266struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
267 gfp_t gfp_mask);
268void ioc_clear_queue(struct request_queue *q);
269
270int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
271
272/**
273 * create_io_context - try to create task->io_context
274 * @gfp_mask: allocation mask
275 * @node: allocation node
276 *
277 * If %current->io_context is %NULL, allocate a new io_context and install
278 * it. Returns the current %current->io_context which may be %NULL if
279 * allocation failed.
280 *
281 * Note that this function can't be called with IRQ disabled because
282 * task_lock which protects %current->io_context is IRQ-unsafe.
283 */
284static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
285{
286 WARN_ON_ONCE(irqs_disabled());
287 if (unlikely(!current->io_context))
288 create_task_io_context(current, gfp_mask, node);
289 return current->io_context;
290}
291
292/*
293 * Internal throttling interface
294 */
295#ifdef CONFIG_BLK_DEV_THROTTLING
296extern void blk_throtl_drain(struct request_queue *q);
297extern int blk_throtl_init(struct request_queue *q);
298extern void blk_throtl_exit(struct request_queue *q);
299#else /* CONFIG_BLK_DEV_THROTTLING */
300static inline void blk_throtl_drain(struct request_queue *q) { }
301static inline int blk_throtl_init(struct request_queue *q) { return 0; }
302static inline void blk_throtl_exit(struct request_queue *q) { }
303#endif /* CONFIG_BLK_DEV_THROTTLING */
304
305#endif /* BLK_INTERNAL_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef BLK_INTERNAL_H
3#define BLK_INTERNAL_H
4
5#include <linux/bio-integrity.h>
6#include <linux/blk-crypto.h>
7#include <linux/lockdep.h>
8#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
9#include <linux/sched/sysctl.h>
10#include <linux/timekeeping.h>
11#include <xen/xen.h>
12#include "blk-crypto-internal.h"
13
14struct elevator_type;
15
16/* Max future timer expiry for timeouts */
17#define BLK_MAX_TIMEOUT (5 * HZ)
18
19extern struct dentry *blk_debugfs_root;
20
21struct blk_flush_queue {
22 spinlock_t mq_flush_lock;
23 unsigned int flush_pending_idx:1;
24 unsigned int flush_running_idx:1;
25 blk_status_t rq_status;
26 unsigned long flush_pending_since;
27 struct list_head flush_queue[2];
28 unsigned long flush_data_in_flight;
29 struct request *flush_rq;
30};
31
32bool is_flush_rq(struct request *req);
33
34struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
35 gfp_t flags);
36void blk_free_flush_queue(struct blk_flush_queue *q);
37
38bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
39bool blk_queue_start_drain(struct request_queue *q);
40bool __blk_freeze_queue_start(struct request_queue *q,
41 struct task_struct *owner);
42int __bio_queue_enter(struct request_queue *q, struct bio *bio);
43void submit_bio_noacct_nocheck(struct bio *bio);
44void bio_await_chain(struct bio *bio);
45
46static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
47{
48 rcu_read_lock();
49 if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
50 goto fail;
51
52 /*
53 * The code that increments the pm_only counter must ensure that the
54 * counter is globally visible before the queue is unfrozen.
55 */
56 if (blk_queue_pm_only(q) &&
57 (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
58 goto fail_put;
59
60 rcu_read_unlock();
61 return true;
62
63fail_put:
64 blk_queue_exit(q);
65fail:
66 rcu_read_unlock();
67 return false;
68}
69
70static inline int bio_queue_enter(struct bio *bio)
71{
72 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
73
74 if (blk_try_enter_queue(q, false)) {
75 rwsem_acquire_read(&q->io_lockdep_map, 0, 0, _RET_IP_);
76 rwsem_release(&q->io_lockdep_map, _RET_IP_);
77 return 0;
78 }
79 return __bio_queue_enter(q, bio);
80}
81
82static inline void blk_wait_io(struct completion *done)
83{
84 /* Prevent hang_check timer from firing at us during very long I/O */
85 unsigned long timeout = sysctl_hung_task_timeout_secs * HZ / 2;
86
87 if (timeout)
88 while (!wait_for_completion_io_timeout(done, timeout))
89 ;
90 else
91 wait_for_completion_io(done);
92}
93
94#define BIO_INLINE_VECS 4
95struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
96 gfp_t gfp_mask);
97void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
98
99bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
100 struct page *page, unsigned len, unsigned offset,
101 bool *same_page);
102
103static inline bool biovec_phys_mergeable(struct request_queue *q,
104 struct bio_vec *vec1, struct bio_vec *vec2)
105{
106 unsigned long mask = queue_segment_boundary(q);
107 phys_addr_t addr1 = bvec_phys(vec1);
108 phys_addr_t addr2 = bvec_phys(vec2);
109
110 /*
111 * Merging adjacent physical pages may not work correctly under KMSAN
112 * if their metadata pages aren't adjacent. Just disable merging.
113 */
114 if (IS_ENABLED(CONFIG_KMSAN))
115 return false;
116
117 if (addr1 + vec1->bv_len != addr2)
118 return false;
119 if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
120 return false;
121 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
122 return false;
123 return true;
124}
125
126static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
127 struct bio_vec *bprv, unsigned int offset)
128{
129 return (offset & lim->virt_boundary_mask) ||
130 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
131}
132
133/*
134 * Check if adding a bio_vec after bprv with offset would create a gap in
135 * the SG list. Most drivers don't care about this, but some do.
136 */
137static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
138 struct bio_vec *bprv, unsigned int offset)
139{
140 if (!lim->virt_boundary_mask)
141 return false;
142 return __bvec_gap_to_prev(lim, bprv, offset);
143}
144
145static inline bool rq_mergeable(struct request *rq)
146{
147 if (blk_rq_is_passthrough(rq))
148 return false;
149
150 if (req_op(rq) == REQ_OP_FLUSH)
151 return false;
152
153 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
154 return false;
155
156 if (req_op(rq) == REQ_OP_ZONE_APPEND)
157 return false;
158
159 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
160 return false;
161 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
162 return false;
163
164 return true;
165}
166
167/*
168 * There are two different ways to handle DISCARD merges:
169 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
170 * send the bios to controller together. The ranges don't need to be
171 * contiguous.
172 * 2) Otherwise, the request will be normal read/write requests. The ranges
173 * need to be contiguous.
174 */
175static inline bool blk_discard_mergable(struct request *req)
176{
177 if (req_op(req) == REQ_OP_DISCARD &&
178 queue_max_discard_segments(req->q) > 1)
179 return true;
180 return false;
181}
182
183static inline unsigned int blk_rq_get_max_segments(struct request *rq)
184{
185 if (req_op(rq) == REQ_OP_DISCARD)
186 return queue_max_discard_segments(rq->q);
187 return queue_max_segments(rq->q);
188}
189
190static inline unsigned int blk_queue_get_max_sectors(struct request *rq)
191{
192 struct request_queue *q = rq->q;
193 enum req_op op = req_op(rq);
194
195 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
196 return min(q->limits.max_discard_sectors,
197 UINT_MAX >> SECTOR_SHIFT);
198
199 if (unlikely(op == REQ_OP_WRITE_ZEROES))
200 return q->limits.max_write_zeroes_sectors;
201
202 if (rq->cmd_flags & REQ_ATOMIC)
203 return q->limits.atomic_write_max_sectors;
204
205 return q->limits.max_sectors;
206}
207
208#ifdef CONFIG_BLK_DEV_INTEGRITY
209void blk_flush_integrity(void);
210void bio_integrity_free(struct bio *bio);
211
212/*
213 * Integrity payloads can either be owned by the submitter, in which case
214 * bio_uninit will free them, or owned and generated by the block layer,
215 * in which case we'll verify them here (for reads) and free them before
216 * the bio is handed back to the submitted.
217 */
218bool __bio_integrity_endio(struct bio *bio);
219static inline bool bio_integrity_endio(struct bio *bio)
220{
221 struct bio_integrity_payload *bip = bio_integrity(bio);
222
223 if (bip && (bip->bip_flags & BIP_BLOCK_INTEGRITY))
224 return __bio_integrity_endio(bio);
225 return true;
226}
227
228bool blk_integrity_merge_rq(struct request_queue *, struct request *,
229 struct request *);
230bool blk_integrity_merge_bio(struct request_queue *, struct request *,
231 struct bio *);
232
233static inline bool integrity_req_gap_back_merge(struct request *req,
234 struct bio *next)
235{
236 struct bio_integrity_payload *bip = bio_integrity(req->bio);
237 struct bio_integrity_payload *bip_next = bio_integrity(next);
238
239 return bvec_gap_to_prev(&req->q->limits,
240 &bip->bip_vec[bip->bip_vcnt - 1],
241 bip_next->bip_vec[0].bv_offset);
242}
243
244static inline bool integrity_req_gap_front_merge(struct request *req,
245 struct bio *bio)
246{
247 struct bio_integrity_payload *bip = bio_integrity(bio);
248 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
249
250 return bvec_gap_to_prev(&req->q->limits,
251 &bip->bip_vec[bip->bip_vcnt - 1],
252 bip_next->bip_vec[0].bv_offset);
253}
254
255extern const struct attribute_group blk_integrity_attr_group;
256#else /* CONFIG_BLK_DEV_INTEGRITY */
257static inline bool blk_integrity_merge_rq(struct request_queue *rq,
258 struct request *r1, struct request *r2)
259{
260 return true;
261}
262static inline bool blk_integrity_merge_bio(struct request_queue *rq,
263 struct request *r, struct bio *b)
264{
265 return true;
266}
267static inline bool integrity_req_gap_back_merge(struct request *req,
268 struct bio *next)
269{
270 return false;
271}
272static inline bool integrity_req_gap_front_merge(struct request *req,
273 struct bio *bio)
274{
275 return false;
276}
277
278static inline void blk_flush_integrity(void)
279{
280}
281static inline bool bio_integrity_endio(struct bio *bio)
282{
283 return true;
284}
285static inline void bio_integrity_free(struct bio *bio)
286{
287}
288#endif /* CONFIG_BLK_DEV_INTEGRITY */
289
290unsigned long blk_rq_timeout(unsigned long timeout);
291void blk_add_timer(struct request *req);
292
293enum bio_merge_status {
294 BIO_MERGE_OK,
295 BIO_MERGE_NONE,
296 BIO_MERGE_FAILED,
297};
298
299enum bio_merge_status bio_attempt_back_merge(struct request *req,
300 struct bio *bio, unsigned int nr_segs);
301bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
302 unsigned int nr_segs);
303bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
304 struct bio *bio, unsigned int nr_segs);
305
306/*
307 * Plug flush limits
308 */
309#define BLK_MAX_REQUEST_COUNT 32
310#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
311
312/*
313 * Internal elevator interface
314 */
315#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
316
317bool blk_insert_flush(struct request *rq);
318
319int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
320void elevator_disable(struct request_queue *q);
321void elevator_exit(struct request_queue *q);
322int elv_register_queue(struct request_queue *q, bool uevent);
323void elv_unregister_queue(struct request_queue *q);
324
325ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
326 char *buf);
327ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
328 char *buf);
329ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
330 char *buf);
331ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
332 char *buf);
333ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
334 const char *buf, size_t count);
335ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
336ssize_t part_timeout_store(struct device *, struct device_attribute *,
337 const char *, size_t);
338
339struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
340 unsigned *nsegs);
341struct bio *bio_split_write_zeroes(struct bio *bio,
342 const struct queue_limits *lim, unsigned *nsegs);
343struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
344 unsigned *nr_segs);
345struct bio *bio_split_zone_append(struct bio *bio,
346 const struct queue_limits *lim, unsigned *nr_segs);
347
348/*
349 * All drivers must accept single-segments bios that are smaller than PAGE_SIZE.
350 *
351 * This is a quick and dirty check that relies on the fact that bi_io_vec[0] is
352 * always valid if a bio has data. The check might lead to occasional false
353 * positives when bios are cloned, but compared to the performance impact of
354 * cloned bios themselves the loop below doesn't matter anyway.
355 */
356static inline bool bio_may_need_split(struct bio *bio,
357 const struct queue_limits *lim)
358{
359 return lim->chunk_sectors || bio->bi_vcnt != 1 ||
360 bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
361}
362
363/**
364 * __bio_split_to_limits - split a bio to fit the queue limits
365 * @bio: bio to be split
366 * @lim: queue limits to split based on
367 * @nr_segs: returns the number of segments in the returned bio
368 *
369 * Check if @bio needs splitting based on the queue limits, and if so split off
370 * a bio fitting the limits from the beginning of @bio and return it. @bio is
371 * shortened to the remainder and re-submitted.
372 *
373 * The split bio is allocated from @q->bio_split, which is provided by the
374 * block layer.
375 */
376static inline struct bio *__bio_split_to_limits(struct bio *bio,
377 const struct queue_limits *lim, unsigned int *nr_segs)
378{
379 switch (bio_op(bio)) {
380 case REQ_OP_READ:
381 case REQ_OP_WRITE:
382 if (bio_may_need_split(bio, lim))
383 return bio_split_rw(bio, lim, nr_segs);
384 *nr_segs = 1;
385 return bio;
386 case REQ_OP_ZONE_APPEND:
387 return bio_split_zone_append(bio, lim, nr_segs);
388 case REQ_OP_DISCARD:
389 case REQ_OP_SECURE_ERASE:
390 return bio_split_discard(bio, lim, nr_segs);
391 case REQ_OP_WRITE_ZEROES:
392 return bio_split_write_zeroes(bio, lim, nr_segs);
393 default:
394 /* other operations can't be split */
395 *nr_segs = 0;
396 return bio;
397 }
398}
399
400int ll_back_merge_fn(struct request *req, struct bio *bio,
401 unsigned int nr_segs);
402bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
403 struct request *next);
404unsigned int blk_recalc_rq_segments(struct request *rq);
405bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
406enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
407
408int blk_set_default_limits(struct queue_limits *lim);
409void blk_apply_bdi_limits(struct backing_dev_info *bdi,
410 struct queue_limits *lim);
411int blk_dev_init(void);
412
413void update_io_ticks(struct block_device *part, unsigned long now, bool end);
414unsigned int part_in_flight(struct block_device *part);
415
416static inline void req_set_nomerge(struct request_queue *q, struct request *req)
417{
418 req->cmd_flags |= REQ_NOMERGE;
419 if (req == q->last_merge)
420 q->last_merge = NULL;
421}
422
423/*
424 * Internal io_context interface
425 */
426struct io_cq *ioc_find_get_icq(struct request_queue *q);
427struct io_cq *ioc_lookup_icq(struct request_queue *q);
428#ifdef CONFIG_BLK_ICQ
429void ioc_clear_queue(struct request_queue *q);
430#else
431static inline void ioc_clear_queue(struct request_queue *q)
432{
433}
434#endif /* CONFIG_BLK_ICQ */
435
436struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
437
438static inline bool blk_queue_may_bounce(struct request_queue *q)
439{
440 return IS_ENABLED(CONFIG_BOUNCE) &&
441 (q->limits.features & BLK_FEAT_BOUNCE_HIGH) &&
442 max_low_pfn >= max_pfn;
443}
444
445static inline struct bio *blk_queue_bounce(struct bio *bio,
446 struct request_queue *q)
447{
448 if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
449 return __blk_queue_bounce(bio, q);
450 return bio;
451}
452
453#ifdef CONFIG_BLK_DEV_ZONED
454void disk_init_zone_resources(struct gendisk *disk);
455void disk_free_zone_resources(struct gendisk *disk);
456static inline bool bio_zone_write_plugging(struct bio *bio)
457{
458 return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING);
459}
460void blk_zone_write_plug_bio_merged(struct bio *bio);
461void blk_zone_write_plug_init_request(struct request *rq);
462static inline void blk_zone_update_request_bio(struct request *rq,
463 struct bio *bio)
464{
465 /*
466 * For zone append requests, the request sector indicates the location
467 * at which the BIO data was written. Return this value to the BIO
468 * issuer through the BIO iter sector.
469 * For plugged zone writes, which include emulated zone append, we need
470 * the original BIO sector so that blk_zone_write_plug_bio_endio() can
471 * lookup the zone write plug.
472 */
473 if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio))
474 bio->bi_iter.bi_sector = rq->__sector;
475}
476void blk_zone_write_plug_bio_endio(struct bio *bio);
477static inline void blk_zone_bio_endio(struct bio *bio)
478{
479 /*
480 * For write BIOs to zoned devices, signal the completion of the BIO so
481 * that the next write BIO can be submitted by zone write plugging.
482 */
483 if (bio_zone_write_plugging(bio))
484 blk_zone_write_plug_bio_endio(bio);
485}
486
487void blk_zone_write_plug_finish_request(struct request *rq);
488static inline void blk_zone_finish_request(struct request *rq)
489{
490 if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING)
491 blk_zone_write_plug_finish_request(rq);
492}
493int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd,
494 unsigned long arg);
495int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode,
496 unsigned int cmd, unsigned long arg);
497#else /* CONFIG_BLK_DEV_ZONED */
498static inline void disk_init_zone_resources(struct gendisk *disk)
499{
500}
501static inline void disk_free_zone_resources(struct gendisk *disk)
502{
503}
504static inline bool bio_zone_write_plugging(struct bio *bio)
505{
506 return false;
507}
508static inline void blk_zone_write_plug_bio_merged(struct bio *bio)
509{
510}
511static inline void blk_zone_write_plug_init_request(struct request *rq)
512{
513}
514static inline void blk_zone_update_request_bio(struct request *rq,
515 struct bio *bio)
516{
517}
518static inline void blk_zone_bio_endio(struct bio *bio)
519{
520}
521static inline void blk_zone_finish_request(struct request *rq)
522{
523}
524static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
525 unsigned int cmd, unsigned long arg)
526{
527 return -ENOTTY;
528}
529static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
530 blk_mode_t mode, unsigned int cmd, unsigned long arg)
531{
532 return -ENOTTY;
533}
534#endif /* CONFIG_BLK_DEV_ZONED */
535
536struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
537void bdev_add(struct block_device *bdev, dev_t dev);
538void bdev_unhash(struct block_device *bdev);
539void bdev_drop(struct block_device *bdev);
540
541int blk_alloc_ext_minor(void);
542void blk_free_ext_minor(unsigned int minor);
543#define ADDPART_FLAG_NONE 0
544#define ADDPART_FLAG_RAID 1
545#define ADDPART_FLAG_WHOLEDISK 2
546#define ADDPART_FLAG_READONLY 4
547int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
548 sector_t length);
549int bdev_del_partition(struct gendisk *disk, int partno);
550int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
551 sector_t length);
552void drop_partition(struct block_device *part);
553
554void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors);
555
556struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
557 struct lock_class_key *lkclass);
558
559int bio_add_hw_page(struct request_queue *q, struct bio *bio,
560 struct page *page, unsigned int len, unsigned int offset,
561 unsigned int max_sectors, bool *same_page);
562
563int bio_add_hw_folio(struct request_queue *q, struct bio *bio,
564 struct folio *folio, size_t len, size_t offset,
565 unsigned int max_sectors, bool *same_page);
566
567/*
568 * Clean up a page appropriately, where the page may be pinned, may have a
569 * ref taken on it or neither.
570 */
571static inline void bio_release_page(struct bio *bio, struct page *page)
572{
573 if (bio_flagged(bio, BIO_PAGE_PINNED))
574 unpin_user_page(page);
575}
576
577struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id);
578
579int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode);
580
581int disk_alloc_events(struct gendisk *disk);
582void disk_add_events(struct gendisk *disk);
583void disk_del_events(struct gendisk *disk);
584void disk_release_events(struct gendisk *disk);
585void disk_block_events(struct gendisk *disk);
586void disk_unblock_events(struct gendisk *disk);
587void disk_flush_events(struct gendisk *disk, unsigned int mask);
588extern struct device_attribute dev_attr_events;
589extern struct device_attribute dev_attr_events_async;
590extern struct device_attribute dev_attr_events_poll_msecs;
591
592extern struct attribute_group blk_trace_attr_group;
593
594blk_mode_t file_to_blk_mode(struct file *file);
595int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
596 loff_t lstart, loff_t lend);
597long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
598int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
599long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
600
601extern const struct address_space_operations def_blk_aops;
602
603int disk_register_independent_access_ranges(struct gendisk *disk);
604void disk_unregister_independent_access_ranges(struct gendisk *disk);
605
606#ifdef CONFIG_FAIL_MAKE_REQUEST
607bool should_fail_request(struct block_device *part, unsigned int bytes);
608#else /* CONFIG_FAIL_MAKE_REQUEST */
609static inline bool should_fail_request(struct block_device *part,
610 unsigned int bytes)
611{
612 return false;
613}
614#endif /* CONFIG_FAIL_MAKE_REQUEST */
615
616/*
617 * Optimized request reference counting. Ideally we'd make timeouts be more
618 * clever, as that's the only reason we need references at all... But until
619 * this happens, this is faster than using refcount_t. Also see:
620 *
621 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
622 */
623#define req_ref_zero_or_close_to_overflow(req) \
624 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
625
626static inline bool req_ref_inc_not_zero(struct request *req)
627{
628 return atomic_inc_not_zero(&req->ref);
629}
630
631static inline bool req_ref_put_and_test(struct request *req)
632{
633 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
634 return atomic_dec_and_test(&req->ref);
635}
636
637static inline void req_ref_set(struct request *req, int value)
638{
639 atomic_set(&req->ref, value);
640}
641
642static inline int req_ref_read(struct request *req)
643{
644 return atomic_read(&req->ref);
645}
646
647static inline u64 blk_time_get_ns(void)
648{
649 struct blk_plug *plug = current->plug;
650
651 if (!plug || !in_task())
652 return ktime_get_ns();
653
654 /*
655 * 0 could very well be a valid time, but rather than flag "this is
656 * a valid timestamp" separately, just accept that we'll do an extra
657 * ktime_get_ns() if we just happen to get 0 as the current time.
658 */
659 if (!plug->cur_ktime) {
660 plug->cur_ktime = ktime_get_ns();
661 current->flags |= PF_BLOCK_TS;
662 }
663 return plug->cur_ktime;
664}
665
666static inline ktime_t blk_time_get(void)
667{
668 return ns_to_ktime(blk_time_get_ns());
669}
670
671/*
672 * From most significant bit:
673 * 1 bit: reserved for other usage, see below
674 * 12 bits: original size of bio
675 * 51 bits: issue time of bio
676 */
677#define BIO_ISSUE_RES_BITS 1
678#define BIO_ISSUE_SIZE_BITS 12
679#define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
680#define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
681#define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
682#define BIO_ISSUE_SIZE_MASK \
683 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
684#define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
685
686/* Reserved bit for blk-throtl */
687#define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
688
689static inline u64 __bio_issue_time(u64 time)
690{
691 return time & BIO_ISSUE_TIME_MASK;
692}
693
694static inline u64 bio_issue_time(struct bio_issue *issue)
695{
696 return __bio_issue_time(issue->value);
697}
698
699static inline sector_t bio_issue_size(struct bio_issue *issue)
700{
701 return ((issue->value & BIO_ISSUE_SIZE_MASK) >> BIO_ISSUE_SIZE_SHIFT);
702}
703
704static inline void bio_issue_init(struct bio_issue *issue,
705 sector_t size)
706{
707 size &= (1ULL << BIO_ISSUE_SIZE_BITS) - 1;
708 issue->value = ((issue->value & BIO_ISSUE_RES_MASK) |
709 (blk_time_get_ns() & BIO_ISSUE_TIME_MASK) |
710 ((u64)size << BIO_ISSUE_SIZE_SHIFT));
711}
712
713void bdev_release(struct file *bdev_file);
714int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
715 const struct blk_holder_ops *hops, struct file *bdev_file);
716int bdev_permission(dev_t dev, blk_mode_t mode, void *holder);
717
718void blk_integrity_generate(struct bio *bio);
719void blk_integrity_verify(struct bio *bio);
720void blk_integrity_prepare(struct request *rq);
721void blk_integrity_complete(struct request *rq, unsigned int nr_bytes);
722
723static inline void blk_freeze_acquire_lock(struct request_queue *q, bool
724 disk_dead, bool queue_dying)
725{
726 if (!disk_dead)
727 rwsem_acquire(&q->io_lockdep_map, 0, 1, _RET_IP_);
728 if (!queue_dying)
729 rwsem_acquire(&q->q_lockdep_map, 0, 1, _RET_IP_);
730}
731
732static inline void blk_unfreeze_release_lock(struct request_queue *q, bool
733 disk_dead, bool queue_dying)
734{
735 if (!queue_dying)
736 rwsem_release(&q->q_lockdep_map, _RET_IP_);
737 if (!disk_dead)
738 rwsem_release(&q->io_lockdep_map, _RET_IP_);
739}
740
741#endif /* BLK_INTERNAL_H */