Linux Audio

Check our new training course

Loading...
v3.15
  1#ifndef BLK_INTERNAL_H
  2#define BLK_INTERNAL_H
  3
  4#include <linux/idr.h>
  5
  6/* Amount of time in which a process may batch requests */
  7#define BLK_BATCH_TIME	(HZ/50UL)
  8
  9/* Number of requests a "batching" process may submit */
 10#define BLK_BATCH_REQ	32
 11
 12extern struct kmem_cache *blk_requestq_cachep;
 13extern struct kmem_cache *request_cachep;
 14extern struct kobj_type blk_queue_ktype;
 15extern struct ida blk_queue_ida;
 16
 17static inline void __blk_get_queue(struct request_queue *q)
 18{
 19	kobject_get(&q->kobj);
 20}
 21
 22int blk_init_rl(struct request_list *rl, struct request_queue *q,
 23		gfp_t gfp_mask);
 24void blk_exit_rl(struct request_list *rl);
 25void init_request_from_bio(struct request *req, struct bio *bio);
 26void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 27			struct bio *bio);
 28int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 29		      struct bio *bio);
 30void blk_queue_bypass_start(struct request_queue *q);
 31void blk_queue_bypass_end(struct request_queue *q);
 32void blk_dequeue_request(struct request *rq);
 33void __blk_queue_free_tags(struct request_queue *q);
 34bool __blk_end_bidi_request(struct request *rq, int error,
 35			    unsigned int nr_bytes, unsigned int bidi_bytes);
 36
 37void blk_rq_timed_out_timer(unsigned long data);
 38void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout,
 39			  unsigned int *next_set);
 40void __blk_add_timer(struct request *req, struct list_head *timeout_list);
 41void blk_delete_timer(struct request *);
 42void blk_add_timer(struct request *);
 43
 44
 45bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
 46			     struct bio *bio);
 47bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
 48			    struct bio *bio);
 49bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 50			    unsigned int *request_count);
 51
 52void blk_account_io_start(struct request *req, bool new_io);
 53void blk_account_io_completion(struct request *req, unsigned int bytes);
 54void blk_account_io_done(struct request *req);
 55
 56/*
 57 * Internal atomic flags for request handling
 58 */
 59enum rq_atomic_flags {
 60	REQ_ATOM_COMPLETE = 0,
 61	REQ_ATOM_STARTED,
 62};
 63
 64/*
 65 * EH timer and IO completion will both attempt to 'grab' the request, make
 66 * sure that only one of them succeeds
 67 */
 68static inline int blk_mark_rq_complete(struct request *rq)
 69{
 70	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 71}
 72
 73static inline void blk_clear_rq_complete(struct request *rq)
 74{
 75	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 76}
 77
 78/*
 79 * Internal elevator interface
 80 */
 81#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
 82
 83void blk_insert_flush(struct request *rq);
 84void blk_abort_flushes(struct request_queue *q);
 85
 86static inline struct request *__elv_next_request(struct request_queue *q)
 87{
 88	struct request *rq;
 89
 90	while (1) {
 91		if (!list_empty(&q->queue_head)) {
 92			rq = list_entry_rq(q->queue_head.next);
 93			return rq;
 94		}
 95
 96		/*
 97		 * Flush request is running and flush request isn't queueable
 98		 * in the drive, we can hold the queue till flush request is
 99		 * finished. Even we don't do this, driver can't dispatch next
100		 * requests and will requeue them. And this can improve
101		 * throughput too. For example, we have request flush1, write1,
102		 * flush 2. flush1 is dispatched, then queue is hold, write1
103		 * isn't inserted to queue. After flush1 is finished, flush2
104		 * will be dispatched. Since disk cache is already clean,
105		 * flush2 will be finished very soon, so looks like flush2 is
106		 * folded to flush1.
107		 * Since the queue is hold, a flag is set to indicate the queue
108		 * should be restarted later. Please see flush_end_io() for
109		 * details.
110		 */
111		if (q->flush_pending_idx != q->flush_running_idx &&
112				!queue_flush_queueable(q)) {
113			q->flush_queue_delayed = 1;
114			return NULL;
115		}
116		if (unlikely(blk_queue_bypass(q)) ||
117		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
118			return NULL;
119	}
120}
121
122static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
123{
124	struct elevator_queue *e = q->elevator;
125
126	if (e->type->ops.elevator_activate_req_fn)
127		e->type->ops.elevator_activate_req_fn(q, rq);
128}
129
130static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
131{
132	struct elevator_queue *e = q->elevator;
133
134	if (e->type->ops.elevator_deactivate_req_fn)
135		e->type->ops.elevator_deactivate_req_fn(q, rq);
136}
137
138#ifdef CONFIG_FAIL_IO_TIMEOUT
139int blk_should_fake_timeout(struct request_queue *);
140ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
141ssize_t part_timeout_store(struct device *, struct device_attribute *,
142				const char *, size_t);
143#else
144static inline int blk_should_fake_timeout(struct request_queue *q)
145{
146	return 0;
147}
148#endif
149
150int ll_back_merge_fn(struct request_queue *q, struct request *req,
151		     struct bio *bio);
152int ll_front_merge_fn(struct request_queue *q, struct request *req, 
153		      struct bio *bio);
154int attempt_back_merge(struct request_queue *q, struct request *rq);
155int attempt_front_merge(struct request_queue *q, struct request *rq);
156int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
157				struct request *next);
158void blk_recalc_rq_segments(struct request *rq);
159void blk_rq_set_mixed_merge(struct request *rq);
160bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
161int blk_try_merge(struct request *rq, struct bio *bio);
162
163void blk_queue_congestion_threshold(struct request_queue *q);
164
165void __blk_run_queue_uncond(struct request_queue *q);
166
167int blk_dev_init(void);
168
169
170/*
171 * Return the threshold (number of used requests) at which the queue is
172 * considered to be congested.  It include a little hysteresis to keep the
173 * context switch rate down.
174 */
175static inline int queue_congestion_on_threshold(struct request_queue *q)
176{
177	return q->nr_congestion_on;
178}
179
180/*
181 * The threshold at which a queue is considered to be uncongested
182 */
183static inline int queue_congestion_off_threshold(struct request_queue *q)
184{
185	return q->nr_congestion_off;
186}
187
188/*
189 * Contribute to IO statistics IFF:
190 *
191 *	a) it's attached to a gendisk, and
192 *	b) the queue had IO stats enabled when this request was started, and
193 *	c) it's a file system request
194 */
195static inline int blk_do_io_stat(struct request *rq)
196{
197	return rq->rq_disk &&
198	       (rq->cmd_flags & REQ_IO_STAT) &&
199		(rq->cmd_type == REQ_TYPE_FS);
 
200}
201
202/*
203 * Internal io_context interface
204 */
205void get_io_context(struct io_context *ioc);
206struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
207struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
208			     gfp_t gfp_mask);
209void ioc_clear_queue(struct request_queue *q);
210
211int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
212
213/**
214 * create_io_context - try to create task->io_context
215 * @gfp_mask: allocation mask
216 * @node: allocation node
217 *
218 * If %current->io_context is %NULL, allocate a new io_context and install
219 * it.  Returns the current %current->io_context which may be %NULL if
220 * allocation failed.
221 *
222 * Note that this function can't be called with IRQ disabled because
223 * task_lock which protects %current->io_context is IRQ-unsafe.
224 */
225static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
226{
227	WARN_ON_ONCE(irqs_disabled());
228	if (unlikely(!current->io_context))
229		create_task_io_context(current, gfp_mask, node);
230	return current->io_context;
231}
232
233/*
234 * Internal throttling interface
235 */
236#ifdef CONFIG_BLK_DEV_THROTTLING
237extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
238extern void blk_throtl_drain(struct request_queue *q);
239extern int blk_throtl_init(struct request_queue *q);
240extern void blk_throtl_exit(struct request_queue *q);
241#else /* CONFIG_BLK_DEV_THROTTLING */
242static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
243{
244	return false;
245}
246static inline void blk_throtl_drain(struct request_queue *q) { }
247static inline int blk_throtl_init(struct request_queue *q) { return 0; }
248static inline void blk_throtl_exit(struct request_queue *q) { }
249#endif /* CONFIG_BLK_DEV_THROTTLING */
250
251#endif /* BLK_INTERNAL_H */
v3.5.6
  1#ifndef BLK_INTERNAL_H
  2#define BLK_INTERNAL_H
  3
  4#include <linux/idr.h>
  5
  6/* Amount of time in which a process may batch requests */
  7#define BLK_BATCH_TIME	(HZ/50UL)
  8
  9/* Number of requests a "batching" process may submit */
 10#define BLK_BATCH_REQ	32
 11
 12extern struct kmem_cache *blk_requestq_cachep;
 
 13extern struct kobj_type blk_queue_ktype;
 14extern struct ida blk_queue_ida;
 15
 16static inline void __blk_get_queue(struct request_queue *q)
 17{
 18	kobject_get(&q->kobj);
 19}
 20
 
 
 
 21void init_request_from_bio(struct request *req, struct bio *bio);
 22void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
 23			struct bio *bio);
 24int blk_rq_append_bio(struct request_queue *q, struct request *rq,
 25		      struct bio *bio);
 26void blk_queue_bypass_start(struct request_queue *q);
 27void blk_queue_bypass_end(struct request_queue *q);
 28void blk_dequeue_request(struct request *rq);
 29void __blk_queue_free_tags(struct request_queue *q);
 30bool __blk_end_bidi_request(struct request *rq, int error,
 31			    unsigned int nr_bytes, unsigned int bidi_bytes);
 32
 33void blk_rq_timed_out_timer(unsigned long data);
 
 
 
 34void blk_delete_timer(struct request *);
 35void blk_add_timer(struct request *);
 36void __generic_unplug_device(struct request_queue *);
 
 
 
 
 
 
 
 
 
 
 
 37
 38/*
 39 * Internal atomic flags for request handling
 40 */
 41enum rq_atomic_flags {
 42	REQ_ATOM_COMPLETE = 0,
 
 43};
 44
 45/*
 46 * EH timer and IO completion will both attempt to 'grab' the request, make
 47 * sure that only one of them succeeds
 48 */
 49static inline int blk_mark_rq_complete(struct request *rq)
 50{
 51	return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 52}
 53
 54static inline void blk_clear_rq_complete(struct request *rq)
 55{
 56	clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
 57}
 58
 59/*
 60 * Internal elevator interface
 61 */
 62#define ELV_ON_HASH(rq)		(!hlist_unhashed(&(rq)->hash))
 63
 64void blk_insert_flush(struct request *rq);
 65void blk_abort_flushes(struct request_queue *q);
 66
 67static inline struct request *__elv_next_request(struct request_queue *q)
 68{
 69	struct request *rq;
 70
 71	while (1) {
 72		if (!list_empty(&q->queue_head)) {
 73			rq = list_entry_rq(q->queue_head.next);
 74			return rq;
 75		}
 76
 77		/*
 78		 * Flush request is running and flush request isn't queueable
 79		 * in the drive, we can hold the queue till flush request is
 80		 * finished. Even we don't do this, driver can't dispatch next
 81		 * requests and will requeue them. And this can improve
 82		 * throughput too. For example, we have request flush1, write1,
 83		 * flush 2. flush1 is dispatched, then queue is hold, write1
 84		 * isn't inserted to queue. After flush1 is finished, flush2
 85		 * will be dispatched. Since disk cache is already clean,
 86		 * flush2 will be finished very soon, so looks like flush2 is
 87		 * folded to flush1.
 88		 * Since the queue is hold, a flag is set to indicate the queue
 89		 * should be restarted later. Please see flush_end_io() for
 90		 * details.
 91		 */
 92		if (q->flush_pending_idx != q->flush_running_idx &&
 93				!queue_flush_queueable(q)) {
 94			q->flush_queue_delayed = 1;
 95			return NULL;
 96		}
 97		if (unlikely(blk_queue_dead(q)) ||
 98		    !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
 99			return NULL;
100	}
101}
102
103static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
104{
105	struct elevator_queue *e = q->elevator;
106
107	if (e->type->ops.elevator_activate_req_fn)
108		e->type->ops.elevator_activate_req_fn(q, rq);
109}
110
111static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
112{
113	struct elevator_queue *e = q->elevator;
114
115	if (e->type->ops.elevator_deactivate_req_fn)
116		e->type->ops.elevator_deactivate_req_fn(q, rq);
117}
118
119#ifdef CONFIG_FAIL_IO_TIMEOUT
120int blk_should_fake_timeout(struct request_queue *);
121ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
122ssize_t part_timeout_store(struct device *, struct device_attribute *,
123				const char *, size_t);
124#else
125static inline int blk_should_fake_timeout(struct request_queue *q)
126{
127	return 0;
128}
129#endif
130
131int ll_back_merge_fn(struct request_queue *q, struct request *req,
132		     struct bio *bio);
133int ll_front_merge_fn(struct request_queue *q, struct request *req, 
134		      struct bio *bio);
135int attempt_back_merge(struct request_queue *q, struct request *rq);
136int attempt_front_merge(struct request_queue *q, struct request *rq);
137int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
138				struct request *next);
139void blk_recalc_rq_segments(struct request *rq);
140void blk_rq_set_mixed_merge(struct request *rq);
141bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
142int blk_try_merge(struct request *rq, struct bio *bio);
143
144void blk_queue_congestion_threshold(struct request_queue *q);
145
 
 
146int blk_dev_init(void);
147
148
149/*
150 * Return the threshold (number of used requests) at which the queue is
151 * considered to be congested.  It include a little hysteresis to keep the
152 * context switch rate down.
153 */
154static inline int queue_congestion_on_threshold(struct request_queue *q)
155{
156	return q->nr_congestion_on;
157}
158
159/*
160 * The threshold at which a queue is considered to be uncongested
161 */
162static inline int queue_congestion_off_threshold(struct request_queue *q)
163{
164	return q->nr_congestion_off;
165}
166
167/*
168 * Contribute to IO statistics IFF:
169 *
170 *	a) it's attached to a gendisk, and
171 *	b) the queue had IO stats enabled when this request was started, and
172 *	c) it's a file system request or a discard request
173 */
174static inline int blk_do_io_stat(struct request *rq)
175{
176	return rq->rq_disk &&
177	       (rq->cmd_flags & REQ_IO_STAT) &&
178	       (rq->cmd_type == REQ_TYPE_FS ||
179	        (rq->cmd_flags & REQ_DISCARD));
180}
181
182/*
183 * Internal io_context interface
184 */
185void get_io_context(struct io_context *ioc);
186struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
187struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
188			     gfp_t gfp_mask);
189void ioc_clear_queue(struct request_queue *q);
190
191int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
192
193/**
194 * create_io_context - try to create task->io_context
195 * @gfp_mask: allocation mask
196 * @node: allocation node
197 *
198 * If %current->io_context is %NULL, allocate a new io_context and install
199 * it.  Returns the current %current->io_context which may be %NULL if
200 * allocation failed.
201 *
202 * Note that this function can't be called with IRQ disabled because
203 * task_lock which protects %current->io_context is IRQ-unsafe.
204 */
205static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
206{
207	WARN_ON_ONCE(irqs_disabled());
208	if (unlikely(!current->io_context))
209		create_task_io_context(current, gfp_mask, node);
210	return current->io_context;
211}
212
213/*
214 * Internal throttling interface
215 */
216#ifdef CONFIG_BLK_DEV_THROTTLING
217extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
218extern void blk_throtl_drain(struct request_queue *q);
219extern int blk_throtl_init(struct request_queue *q);
220extern void blk_throtl_exit(struct request_queue *q);
221#else /* CONFIG_BLK_DEV_THROTTLING */
222static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
223{
224	return false;
225}
226static inline void blk_throtl_drain(struct request_queue *q) { }
227static inline int blk_throtl_init(struct request_queue *q) { return 0; }
228static inline void blk_throtl_exit(struct request_queue *q) { }
229#endif /* CONFIG_BLK_DEV_THROTTLING */
230
231#endif /* BLK_INTERNAL_H */