Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef BLK_INTERNAL_H
  3#define BLK_INTERNAL_H
  4
  5#include <linux/blk-crypto.h>
  6#include <linux/memblock.h>	/* for max_pfn/max_low_pfn */
  7#include <xen/xen.h>
  8#include "blk-crypto-internal.h"
  9
 10struct elevator_type;
 
 
 
 
 11
 12/* Max future timer expiry for timeouts */
 13#define BLK_MAX_TIMEOUT		(5 * HZ)
 14
 15extern struct dentry *blk_debugfs_root;
 16
 17struct blk_flush_queue {
 
 18	unsigned int		flush_pending_idx:1;
 19	unsigned int		flush_running_idx:1;
 20	blk_status_t 		rq_status;
 21	unsigned long		flush_pending_since;
 22	struct list_head	flush_queue[2];
 23	struct list_head	flush_data_in_flight;
 24	struct request		*flush_rq;
 25
 26	spinlock_t		mq_flush_lock;
 27};
 28
 29bool is_flush_rq(struct request *req);
 30
 31struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
 32					      gfp_t flags);
 33void blk_free_flush_queue(struct blk_flush_queue *q);
 34
 35void blk_freeze_queue(struct request_queue *q);
 36void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
 37void blk_queue_start_drain(struct request_queue *q);
 38int __bio_queue_enter(struct request_queue *q, struct bio *bio);
 39void submit_bio_noacct_nocheck(struct bio *bio);
 40
 41static inline bool blk_try_enter_queue(struct request_queue *q, bool pm)
 42{
 43	rcu_read_lock();
 44	if (!percpu_ref_tryget_live_rcu(&q->q_usage_counter))
 45		goto fail;
 46
 47	/*
 48	 * The code that increments the pm_only counter must ensure that the
 49	 * counter is globally visible before the queue is unfrozen.
 50	 */
 51	if (blk_queue_pm_only(q) &&
 52	    (!pm || queue_rpm_status(q) == RPM_SUSPENDED))
 53		goto fail_put;
 54
 55	rcu_read_unlock();
 56	return true;
 
 
 57
 58fail_put:
 59	blk_queue_exit(q);
 60fail:
 61	rcu_read_unlock();
 62	return false;
 
 63}
 64
 65static inline int bio_queue_enter(struct bio *bio)
 66{
 67	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 68
 69	if (blk_try_enter_queue(q, false))
 70		return 0;
 71	return __bio_queue_enter(q, bio);
 72}
 73
 74#define BIO_INLINE_VECS 4
 75struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
 
 
 
 76		gfp_t gfp_mask);
 77void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs);
 
 
 
 
 
 
 
 
 
 
 78
 79static inline bool biovec_phys_mergeable(struct request_queue *q,
 80		struct bio_vec *vec1, struct bio_vec *vec2)
 81{
 82	unsigned long mask = queue_segment_boundary(q);
 83	phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
 84	phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
 85
 86	/*
 87	 * Merging adjacent physical pages may not work correctly under KMSAN
 88	 * if their metadata pages aren't adjacent. Just disable merging.
 
 
 89	 */
 90	if (IS_ENABLED(CONFIG_KMSAN))
 91		return false;
 92
 93	if (addr1 + vec1->bv_len != addr2)
 94		return false;
 95	if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
 96		return false;
 97	if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
 98		return false;
 99	return true;
100}
101
102static inline bool __bvec_gap_to_prev(const struct queue_limits *lim,
103		struct bio_vec *bprv, unsigned int offset)
104{
105	return (offset & lim->virt_boundary_mask) ||
106		((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
107}
108
109/*
110 * Check if adding a bio_vec after bprv with offset would create a gap in
111 * the SG list. Most drivers don't care about this, but some do.
112 */
113static inline bool bvec_gap_to_prev(const struct queue_limits *lim,
114		struct bio_vec *bprv, unsigned int offset)
115{
116	if (!lim->virt_boundary_mask)
117		return false;
118	return __bvec_gap_to_prev(lim, bprv, offset);
119}
 
120
121static inline bool rq_mergeable(struct request *rq)
122{
123	if (blk_rq_is_passthrough(rq))
124		return false;
125
126	if (req_op(rq) == REQ_OP_FLUSH)
127		return false;
128
129	if (req_op(rq) == REQ_OP_WRITE_ZEROES)
130		return false;
131
132	if (req_op(rq) == REQ_OP_ZONE_APPEND)
133		return false;
134
135	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
136		return false;
137	if (rq->rq_flags & RQF_NOMERGE_FLAGS)
138		return false;
 
 
 
 
 
 
 
 
139
140	return true;
141}
 
 
 
 
 
 
142
143/*
144 * There are two different ways to handle DISCARD merges:
145 *  1) If max_discard_segments > 1, the driver treats every bio as a range and
146 *     send the bios to controller together. The ranges don't need to be
147 *     contiguous.
148 *  2) Otherwise, the request will be normal read/write requests.  The ranges
149 *     need to be contiguous.
150 */
151static inline bool blk_discard_mergable(struct request *req)
152{
153	if (req_op(req) == REQ_OP_DISCARD &&
154	    queue_max_discard_segments(req->q) > 1)
155		return true;
156	return false;
157}
158
159static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
160						     enum req_op op)
161{
162	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
163		return min(q->limits.max_discard_sectors,
164			   UINT_MAX >> SECTOR_SHIFT);
165
166	if (unlikely(op == REQ_OP_WRITE_ZEROES))
167		return q->limits.max_write_zeroes_sectors;
168
169	return q->limits.max_sectors;
170}
171
172#ifdef CONFIG_BLK_DEV_INTEGRITY
173void blk_flush_integrity(void);
174bool __bio_integrity_endio(struct bio *);
175void bio_integrity_free(struct bio *bio);
176static inline bool bio_integrity_endio(struct bio *bio)
177{
178	if (bio_integrity(bio))
179		return __bio_integrity_endio(bio);
180	return true;
181}
182
183bool blk_integrity_merge_rq(struct request_queue *, struct request *,
184		struct request *);
185bool blk_integrity_merge_bio(struct request_queue *, struct request *,
186		struct bio *);
187
188static inline bool integrity_req_gap_back_merge(struct request *req,
189		struct bio *next)
190{
191	struct bio_integrity_payload *bip = bio_integrity(req->bio);
192	struct bio_integrity_payload *bip_next = bio_integrity(next);
193
194	return bvec_gap_to_prev(&req->q->limits,
195				&bip->bip_vec[bip->bip_vcnt - 1],
196				bip_next->bip_vec[0].bv_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197}
198
199static inline bool integrity_req_gap_front_merge(struct request *req,
200		struct bio *bio)
201{
202	struct bio_integrity_payload *bip = bio_integrity(bio);
203	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
204
205	return bvec_gap_to_prev(&req->q->limits,
206				&bip->bip_vec[bip->bip_vcnt - 1],
207				bip_next->bip_vec[0].bv_offset);
208}
209
210int blk_integrity_add(struct gendisk *disk);
211void blk_integrity_del(struct gendisk *);
212#else /* CONFIG_BLK_DEV_INTEGRITY */
213static inline bool blk_integrity_merge_rq(struct request_queue *rq,
214		struct request *r1, struct request *r2)
215{
216	return true;
217}
218static inline bool blk_integrity_merge_bio(struct request_queue *rq,
219		struct request *r, struct bio *b)
220{
221	return true;
222}
223static inline bool integrity_req_gap_back_merge(struct request *req,
224		struct bio *next)
225{
226	return false;
227}
228static inline bool integrity_req_gap_front_merge(struct request *req,
229		struct bio *bio)
230{
231	return false;
232}
233
234static inline void blk_flush_integrity(void)
235{
236}
237static inline bool bio_integrity_endio(struct bio *bio)
238{
239	return true;
240}
241static inline void bio_integrity_free(struct bio *bio)
242{
243}
244static inline int blk_integrity_add(struct gendisk *disk)
245{
246	return 0;
247}
248static inline void blk_integrity_del(struct gendisk *disk)
249{
250}
251#endif /* CONFIG_BLK_DEV_INTEGRITY */
252
253unsigned long blk_rq_timeout(unsigned long timeout);
254void blk_add_timer(struct request *req);
255const char *blk_status_to_str(blk_status_t status);
256
257bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
258		unsigned int nr_segs);
259bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
260			struct bio *bio, unsigned int nr_segs);
261
262/*
263 * Plug flush limits
264 */
265#define BLK_MAX_REQUEST_COUNT	32
266#define BLK_PLUG_FLUSH_SIZE	(128 * 1024)
267
268/*
269 * Internal elevator interface
270 */
271#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
272
273void blk_insert_flush(struct request *rq);
274
275int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
276void elevator_disable(struct request_queue *q);
277void elevator_exit(struct request_queue *q);
278int elv_register_queue(struct request_queue *q, bool uevent);
279void elv_unregister_queue(struct request_queue *q);
280
281ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
282		char *buf);
283ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
284		char *buf);
285ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
286		char *buf);
287ssize_t part_fail_show(struct device *dev, struct device_attribute *attr,
288		char *buf);
289ssize_t part_fail_store(struct device *dev, struct device_attribute *attr,
290		const char *buf, size_t count);
291ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
292ssize_t part_timeout_store(struct device *, struct device_attribute *,
293				const char *, size_t);
294
295static inline bool bio_may_exceed_limits(struct bio *bio,
296					 const struct queue_limits *lim)
297{
298	switch (bio_op(bio)) {
299	case REQ_OP_DISCARD:
300	case REQ_OP_SECURE_ERASE:
301	case REQ_OP_WRITE_ZEROES:
302		return true; /* non-trivial splitting decisions */
303	default:
304		break;
305	}
306
307	/*
308	 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
309	 * This is a quick and dirty check that relies on the fact that
310	 * bi_io_vec[0] is always valid if a bio has data.  The check might
311	 * lead to occasional false negatives when bios are cloned, but compared
312	 * to the performance impact of cloned bios themselves the loop below
313	 * doesn't matter anyway.
314	 */
315	return lim->chunk_sectors || bio->bi_vcnt != 1 ||
316		bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
317}
 
318
319struct bio *__bio_split_to_limits(struct bio *bio,
320				  const struct queue_limits *lim,
321				  unsigned int *nr_segs);
322int ll_back_merge_fn(struct request *req, struct bio *bio,
323		unsigned int nr_segs);
324bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 
325				struct request *next);
326unsigned int blk_recalc_rq_segments(struct request *rq);
327void blk_rq_set_mixed_merge(struct request *rq);
328bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
329enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
 
 
330
331void blk_set_default_limits(struct queue_limits *lim);
332int blk_dev_init(void);
333
 
334/*
335 * Contribute to IO statistics IFF:
336 *
337 *	a) it's attached to a gendisk, and
338 *	b) the queue had IO stats enabled when this request was started
339 */
340static inline bool blk_do_io_stat(struct request *rq)
341{
342	return (rq->rq_flags & RQF_IO_STAT) && !blk_rq_is_passthrough(rq);
343}
344
345void update_io_ticks(struct block_device *part, unsigned long now, bool end);
346
347static inline void req_set_nomerge(struct request_queue *q, struct request *req)
348{
349	req->cmd_flags |= REQ_NOMERGE;
350	if (req == q->last_merge)
351		q->last_merge = NULL;
352}
353
354/*
355 * Internal io_context interface
356 */
357struct io_cq *ioc_find_get_icq(struct request_queue *q);
358struct io_cq *ioc_lookup_icq(struct request_queue *q);
359#ifdef CONFIG_BLK_ICQ
360void ioc_clear_queue(struct request_queue *q);
361#else
362static inline void ioc_clear_queue(struct request_queue *q)
363{
 
364}
365#endif /* CONFIG_BLK_ICQ */
366
367#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
368extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
369extern ssize_t blk_throtl_sample_time_store(struct request_queue *q,
370	const char *page, size_t count);
371extern void blk_throtl_bio_endio(struct bio *bio);
372extern void blk_throtl_stat_add(struct request *rq, u64 time);
373#else
374static inline void blk_throtl_bio_endio(struct bio *bio) { }
375static inline void blk_throtl_stat_add(struct request *rq, u64 time) { }
376#endif
377
378struct bio *__blk_queue_bounce(struct bio *bio, struct request_queue *q);
379
380static inline bool blk_queue_may_bounce(struct request_queue *q)
381{
382	return IS_ENABLED(CONFIG_BOUNCE) &&
383		q->limits.bounce == BLK_BOUNCE_HIGH &&
384		max_low_pfn >= max_pfn;
385}
386
387static inline struct bio *blk_queue_bounce(struct bio *bio,
388		struct request_queue *q)
389{
390	if (unlikely(blk_queue_may_bounce(q) && bio_has_data(bio)))
391		return __blk_queue_bounce(bio, q);
392	return bio;
393}
394
395#ifdef CONFIG_BLK_CGROUP_IOLATENCY
396int blk_iolatency_init(struct gendisk *disk);
397#else
398static inline int blk_iolatency_init(struct gendisk *disk) { return 0; };
399#endif
400
401#ifdef CONFIG_BLK_DEV_ZONED
402void disk_free_zone_bitmaps(struct gendisk *disk);
403void disk_clear_zone_settings(struct gendisk *disk);
404#else
405static inline void disk_free_zone_bitmaps(struct gendisk *disk) {}
406static inline void disk_clear_zone_settings(struct gendisk *disk) {}
407#endif
408
409int blk_alloc_ext_minor(void);
410void blk_free_ext_minor(unsigned int minor);
411#define ADDPART_FLAG_NONE	0
412#define ADDPART_FLAG_RAID	1
413#define ADDPART_FLAG_WHOLEDISK	2
414int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
415		sector_t length);
416int bdev_del_partition(struct gendisk *disk, int partno);
417int bdev_resize_partition(struct gendisk *disk, int partno, sector_t start,
418		sector_t length);
419void blk_drop_partitions(struct gendisk *disk);
420
421struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
422		struct lock_class_key *lkclass);
423
424int bio_add_hw_page(struct request_queue *q, struct bio *bio,
425		struct page *page, unsigned int len, unsigned int offset,
426		unsigned int max_sectors, bool *same_page);
427
428struct request_queue *blk_alloc_queue(int node_id);
429
430int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
431
432int disk_alloc_events(struct gendisk *disk);
433void disk_add_events(struct gendisk *disk);
434void disk_del_events(struct gendisk *disk);
435void disk_release_events(struct gendisk *disk);
436void disk_block_events(struct gendisk *disk);
437void disk_unblock_events(struct gendisk *disk);
438void disk_flush_events(struct gendisk *disk, unsigned int mask);
439extern struct device_attribute dev_attr_events;
440extern struct device_attribute dev_attr_events_async;
441extern struct device_attribute dev_attr_events_poll_msecs;
442
443extern struct attribute_group blk_trace_attr_group;
444
445long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
446long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg);
447
448extern const struct address_space_operations def_blk_aops;
449
450int disk_register_independent_access_ranges(struct gendisk *disk);
451void disk_unregister_independent_access_ranges(struct gendisk *disk);
452
453#ifdef CONFIG_FAIL_MAKE_REQUEST
454bool should_fail_request(struct block_device *part, unsigned int bytes);
455#else /* CONFIG_FAIL_MAKE_REQUEST */
456static inline bool should_fail_request(struct block_device *part,
457					unsigned int bytes)
458{
459	return false;
460}
461#endif /* CONFIG_FAIL_MAKE_REQUEST */
462
463/*
464 * Optimized request reference counting. Ideally we'd make timeouts be more
465 * clever, as that's the only reason we need references at all... But until
466 * this happens, this is faster than using refcount_t. Also see:
467 *
468 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
 
 
469 */
470#define req_ref_zero_or_close_to_overflow(req)	\
471	((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
472
473static inline bool req_ref_inc_not_zero(struct request *req)
474{
475	return atomic_inc_not_zero(&req->ref);
 
 
476}
477
478static inline bool req_ref_put_and_test(struct request *req)
479{
480	WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
481	return atomic_dec_and_test(&req->ref);
482}
 
 
 
483
484static inline void req_ref_set(struct request *req, int value)
485{
486	atomic_set(&req->ref, value);
487}
488
489static inline int req_ref_read(struct request *req)
 
 
 
 
 
 
 
 
 
 
 
 
490{
491	return atomic_read(&req->ref);
 
 
 
492}
 
 
 
 
 
 
 
 
 
 
 
 
 
493
494#endif /* BLK_INTERNAL_H */
v4.10.11
 
  1#ifndef BLK_INTERNAL_H
  2#define BLK_INTERNAL_H
  3
  4#include <linux/idr.h>
  5#include <linux/blk-mq.h>
  6#include "blk-mq.h"
 
  7
  8/* Amount of time in which a process may batch requests */
  9#define BLK_BATCH_TIME	(HZ/50UL)
 10
 11/* Number of requests a "batching" process may submit */
 12#define BLK_BATCH_REQ	32
 13
 14/* Max future timer expiry for timeouts */
 15#define BLK_MAX_TIMEOUT		(5 * HZ)
 16
 
 
 17struct blk_flush_queue {
 18	unsigned int		flush_queue_delayed:1;
 19	unsigned int		flush_pending_idx:1;
 20	unsigned int		flush_running_idx:1;
 
 21	unsigned long		flush_pending_since;
 22	struct list_head	flush_queue[2];
 23	struct list_head	flush_data_in_flight;
 24	struct request		*flush_rq;
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26	/*
 27	 * flush_rq shares tag with this rq, both can't be active
 28	 * at the same time
 29	 */
 30	struct request		*orig_rq;
 31	spinlock_t		mq_flush_lock;
 32};
 33
 34extern struct kmem_cache *blk_requestq_cachep;
 35extern struct kmem_cache *request_cachep;
 36extern struct kobj_type blk_queue_ktype;
 37extern struct ida blk_queue_ida;
 38
 39static inline struct blk_flush_queue *blk_get_flush_queue(
 40		struct request_queue *q, struct blk_mq_ctx *ctx)
 41{
 42	if (q->mq_ops)
 43		return blk_mq_map_queue(q, ctx->cpu)->fq;
 44	return q->fq;
 45}
 46
 47static inline void __blk_get_queue(struct request_queue *q)
 48{
 49	kobject_get(&q->kobj);
 
 
 
 
 50}
 51
 52struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
 53		int node, int cmd_size);
 54void blk_free_flush_queue(struct blk_flush_queue *q);
 55
 56int blk_init_rl(struct request_list *rl, struct request_queue *q,
 57		gfp_t gfp_mask);
 58void blk_exit_rl(struct request_list *rl);
 59void init_request_from_bio(struct request *req, struct bio *bio);
 60void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 61			struct bio *bio);
 62void blk_queue_bypass_start(struct request_queue *q);
 63void blk_queue_bypass_end(struct request_queue *q);
 64void blk_dequeue_request(struct request *rq);
 65void __blk_queue_free_tags(struct request_queue *q);
 66bool __blk_end_bidi_request(struct request *rq, int error,
 67			    unsigned int nr_bytes, unsigned int bidi_bytes);
 68void blk_freeze_queue(struct request_queue *q);
 69
 70static inline void blk_queue_enter_live(struct request_queue *q)
 
 71{
 
 
 
 
 72	/*
 73	 * Given that running in generic_make_request() context
 74	 * guarantees that a live reference against q_usage_counter has
 75	 * been established, further references under that same context
 76	 * need not check that the queue has been frozen (marked dead).
 77	 */
 78	percpu_ref_get(&q->q_usage_counter);
 
 
 
 
 
 
 
 
 
 79}
 80
 81#ifdef CONFIG_BLK_DEV_INTEGRITY
 82void blk_flush_integrity(void);
 83#else
 84static inline void blk_flush_integrity(void)
 
 
 
 
 
 
 
 
 
 85{
 
 
 
 86}
 87#endif
 88
 89void blk_timeout_work(struct work_struct *work);
 90unsigned long blk_rq_timeout(unsigned long timeout);
 91void blk_add_timer(struct request *req);
 92void blk_delete_timer(struct request *);
 
 
 
 
 
 
 93
 
 
 94
 95bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
 96			     struct bio *bio);
 97bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 98			    struct bio *bio);
 99bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
100			    unsigned int *request_count,
101			    struct request **same_queue_rq);
102unsigned int blk_plug_queued_count(struct request_queue *q);
103
104void blk_account_io_start(struct request *req, bool new_io);
105void blk_account_io_completion(struct request *req, unsigned int bytes);
106void blk_account_io_done(struct request *req);
107
108/*
109 * Internal atomic flags for request handling
110 */
111enum rq_atomic_flags {
112	REQ_ATOM_COMPLETE = 0,
113	REQ_ATOM_STARTED,
114	REQ_ATOM_POLL_SLEPT,
115};
116
117/*
118 * EH timer and IO completion will both attempt to 'grab' the request, make
119 * sure that only one of them succeeds
 
 
 
 
120 */
121static inline int blk_mark_rq_complete(struct request *rq)
122{
123	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 
 
 
124}
125
126static inline void blk_clear_rq_complete(struct request *rq)
 
127{
128	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 
 
 
 
 
 
 
129}
130
131/*
132 * Internal elevator interface
133 */
134#define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
 
 
 
 
 
 
135
136void blk_insert_flush(struct request *rq);
 
 
 
137
138static inline struct request *__elv_next_request(struct request_queue *q)
 
139{
140	struct request *rq;
141	struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
142
143	while (1) {
144		if (!list_empty(&q->queue_head)) {
145			rq = list_entry_rq(q->queue_head.next);
146			return rq;
147		}
148
149		/*
150		 * Flush request is running and flush request isn't queueable
151		 * in the drive, we can hold the queue till flush request is
152		 * finished. Even we don't do this, driver can't dispatch next
153		 * requests and will requeue them. And this can improve
154		 * throughput too. For example, we have request flush1, write1,
155		 * flush 2. flush1 is dispatched, then queue is hold, write1
156		 * isn't inserted to queue. After flush1 is finished, flush2
157		 * will be dispatched. Since disk cache is already clean,
158		 * flush2 will be finished very soon, so looks like flush2 is
159		 * folded to flush1.
160		 * Since the queue is hold, a flag is set to indicate the queue
161		 * should be restarted later. Please see flush_end_io() for
162		 * details.
163		 */
164		if (fq->flush_pending_idx != fq->flush_running_idx &&
165				!queue_flush_queueable(q)) {
166			fq->flush_queue_delayed = 1;
167			return NULL;
168		}
169		if (unlikely(blk_queue_bypass(q)) ||
170		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
171			return NULL;
172	}
173}
174
175static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
 
176{
177	struct elevator_queue *e = q->elevator;
 
178
179	if (e->type->ops.elevator_activate_req_fn)
180		e->type->ops.elevator_activate_req_fn(q, rq);
 
181}
182
183static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
 
 
 
 
 
 
 
 
 
184{
185	struct elevator_queue *e = q->elevator;
 
 
 
 
 
 
 
 
 
 
 
186
187	if (e->type->ops.elevator_deactivate_req_fn)
188		e->type->ops.elevator_deactivate_req_fn(q, rq);
 
 
 
 
189}
 
 
 
 
 
 
 
 
 
 
 
190
191#ifdef CONFIG_FAIL_IO_TIMEOUT
192int blk_should_fake_timeout(struct request_queue *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
194ssize_t part_timeout_store(struct device *, struct device_attribute *,
195				const char *, size_t);
196#else
197static inline int blk_should_fake_timeout(struct request_queue *q)
 
198{
199	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200}
201#endif
202
203int ll_back_merge_fn(struct request_queue *q, struct request *req,
204		     struct bio *bio);
205int ll_front_merge_fn(struct request_queue *q, struct request *req, 
206		      struct bio *bio);
207int attempt_back_merge(struct request_queue *q, struct request *rq);
208int attempt_front_merge(struct request_queue *q, struct request *rq);
209int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
210				struct request *next);
211void blk_recalc_rq_segments(struct request *rq);
212void blk_rq_set_mixed_merge(struct request *rq);
213bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
214int blk_try_merge(struct request *rq, struct bio *bio);
215
216void blk_queue_congestion_threshold(struct request_queue *q);
217
 
218int blk_dev_init(void);
219
220
221/*
222 * Return the threshold (number of used requests) at which the queue is
223 * considered to be congested.  It include a little hysteresis to keep the
224 * context switch rate down.
 
225 */
226static inline int queue_congestion_on_threshold(struct request_queue *q)
227{
228	return q->nr_congestion_on;
 
 
 
 
 
 
 
 
 
229}
230
231/*
232 * The threshold at which a queue is considered to be uncongested
233 */
234static inline int queue_congestion_off_threshold(struct request_queue *q)
 
 
 
 
 
235{
236	return q->nr_congestion_off;
237}
 
238
239extern int blk_update_nr_requests(struct request_queue *, unsigned int);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
241/*
242 * Contribute to IO statistics IFF:
 
 
243 *
244 *	a) it's attached to a gendisk, and
245 *	b) the queue had IO stats enabled when this request was started, and
246 *	c) it's a file system request
247 */
248static inline int blk_do_io_stat(struct request *rq)
 
 
 
249{
250	return rq->rq_disk &&
251	       (rq->rq_flags & RQF_IO_STAT) &&
252		(rq->cmd_type == REQ_TYPE_FS);
253}
254
255/*
256 * Internal io_context interface
257 */
258void get_io_context(struct io_context *ioc);
259struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
260struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
261			     gfp_t gfp_mask);
262void ioc_clear_queue(struct request_queue *q);
263
264int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
 
 
 
265
266/**
267 * create_io_context - try to create task->io_context
268 * @gfp_mask: allocation mask
269 * @node: allocation node
270 *
271 * If %current->io_context is %NULL, allocate a new io_context and install
272 * it.  Returns the current %current->io_context which may be %NULL if
273 * allocation failed.
274 *
275 * Note that this function can't be called with IRQ disabled because
276 * task_lock which protects %current->io_context is IRQ-unsafe.
277 */
278static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
279{
280	WARN_ON_ONCE(irqs_disabled());
281	if (unlikely(!current->io_context))
282		create_task_io_context(current, gfp_mask, node);
283	return current->io_context;
284}
285
286/*
287 * Internal throttling interface
288 */
289#ifdef CONFIG_BLK_DEV_THROTTLING
290extern void blk_throtl_drain(struct request_queue *q);
291extern int blk_throtl_init(struct request_queue *q);
292extern void blk_throtl_exit(struct request_queue *q);
293#else /* CONFIG_BLK_DEV_THROTTLING */
294static inline void blk_throtl_drain(struct request_queue *q) { }
295static inline int blk_throtl_init(struct request_queue *q) { return 0; }
296static inline void blk_throtl_exit(struct request_queue *q) { }
297#endif /* CONFIG_BLK_DEV_THROTTLING */
298
299#endif /* BLK_INTERNAL_H */