Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Functions related to tagged command queuing
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <linux/slab.h>
  9
 10#include "blk.h"
 11
 12/**
 13 * blk_queue_find_tag - find a request by its tag and queue
 14 * @q:	 The request queue for the device
 15 * @tag: The tag of the request
 16 *
 17 * Notes:
 18 *    Should be used when a device returns a tag and you want to match
 19 *    it with a request.
 20 *
 21 *    no locks need be held.
 22 **/
 23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 24{
 25	return blk_map_queue_find_tag(q->queue_tags, tag);
 26}
 27EXPORT_SYMBOL(blk_queue_find_tag);
 28
 29/**
 30 * __blk_free_tags - release a given set of tag maintenance info
 31 * @bqt:	the tag map to free
 32 *
 33 * Tries to free the specified @bqt.  Returns true if it was
 34 * actually freed and false if there are still references using it
 35 */
 36static int __blk_free_tags(struct blk_queue_tag *bqt)
 37{
 38	int retval;
 39
 40	retval = atomic_dec_and_test(&bqt->refcnt);
 41	if (retval) {
 42		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
 43							bqt->max_depth);
 44
 45		kfree(bqt->tag_index);
 46		bqt->tag_index = NULL;
 47
 48		kfree(bqt->tag_map);
 49		bqt->tag_map = NULL;
 50
 51		kfree(bqt);
 52	}
 53
 54	return retval;
 55}
 
 56
 57/**
 58 * __blk_queue_free_tags - release tag maintenance info
 59 * @q:  the request queue for the device
 60 *
 61 *  Notes:
 62 *    blk_cleanup_queue() will take care of calling this function, if tagging
 63 *    has been used. So there's no need to call this directly.
 64 **/
 65void __blk_queue_free_tags(struct request_queue *q)
 66{
 67	struct blk_queue_tag *bqt = q->queue_tags;
 68
 69	if (!bqt)
 70		return;
 71
 72	__blk_free_tags(bqt);
 73
 74	q->queue_tags = NULL;
 75	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 76}
 77
 78/**
 79 * blk_free_tags - release a given set of tag maintenance info
 80 * @bqt:	the tag map to free
 81 *
 82 * For externally managed @bqt frees the map.  Callers of this
 83 * function must guarantee to have released all the queues that
 84 * might have been using this tag map.
 85 */
 86void blk_free_tags(struct blk_queue_tag *bqt)
 87{
 88	if (unlikely(!__blk_free_tags(bqt)))
 89		BUG();
 90}
 91EXPORT_SYMBOL(blk_free_tags);
 92
 93/**
 94 * blk_queue_free_tags - release tag maintenance info
 95 * @q:  the request queue for the device
 96 *
 97 *  Notes:
 98 *	This is used to disable tagged queuing to a device, yet leave
 99 *	queue in function.
100 **/
101void blk_queue_free_tags(struct request_queue *q)
102{
103	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104}
105EXPORT_SYMBOL(blk_queue_free_tags);
106
107static int
108init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109{
110	struct request **tag_index;
111	unsigned long *tag_map;
112	int nr_ulongs;
113
114	if (q && depth > q->nr_requests * 2) {
115		depth = q->nr_requests * 2;
116		printk(KERN_ERR "%s: adjusted depth to %d\n",
117		       __func__, depth);
118	}
119
120	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121	if (!tag_index)
122		goto fail;
123
124	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126	if (!tag_map)
127		goto fail;
128
129	tags->real_max_depth = depth;
130	tags->max_depth = depth;
131	tags->tag_index = tag_index;
132	tags->tag_map = tag_map;
133
134	return 0;
135fail:
136	kfree(tag_index);
137	return -ENOMEM;
138}
139
140static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141						   int depth)
142{
143	struct blk_queue_tag *tags;
144
145	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146	if (!tags)
147		goto fail;
148
149	if (init_tag_map(q, tags, depth))
150		goto fail;
151
152	atomic_set(&tags->refcnt, 1);
 
 
153	return tags;
154fail:
155	kfree(tags);
156	return NULL;
157}
158
159/**
160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth:	the maximum queue depth supported
 
162 **/
163struct blk_queue_tag *blk_init_tags(int depth)
164{
165	return __blk_queue_init_tags(NULL, depth);
166}
167EXPORT_SYMBOL(blk_init_tags);
168
169/**
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q:  the request queue for the device
172 * @depth:  the maximum queue depth supported
173 * @tags: the tag to use
 
174 *
175 * Queue lock must be held here if the function is called to resize an
176 * existing map.
177 **/
178int blk_queue_init_tags(struct request_queue *q, int depth,
179			struct blk_queue_tag *tags)
180{
181	int rc;
182
183	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184
185	if (!tags && !q->queue_tags) {
186		tags = __blk_queue_init_tags(q, depth);
187
188		if (!tags)
189			return -ENOMEM;
190
191	} else if (q->queue_tags) {
192		rc = blk_queue_resize_tags(q, depth);
193		if (rc)
194			return rc;
195		queue_flag_set(QUEUE_FLAG_QUEUED, q);
196		return 0;
197	} else
198		atomic_inc(&tags->refcnt);
199
200	/*
201	 * assign it, all done
202	 */
203	q->queue_tags = tags;
204	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
205	INIT_LIST_HEAD(&q->tag_busy_list);
206	return 0;
207}
208EXPORT_SYMBOL(blk_queue_init_tags);
209
210/**
211 * blk_queue_resize_tags - change the queueing depth
212 * @q:  the request queue for the device
213 * @new_depth: the new max command queueing depth
214 *
215 *  Notes:
216 *    Must be called with the queue lock held.
217 **/
218int blk_queue_resize_tags(struct request_queue *q, int new_depth)
219{
220	struct blk_queue_tag *bqt = q->queue_tags;
221	struct request **tag_index;
222	unsigned long *tag_map;
223	int max_depth, nr_ulongs;
224
225	if (!bqt)
226		return -ENXIO;
227
228	/*
229	 * if we already have large enough real_max_depth.  just
230	 * adjust max_depth.  *NOTE* as requests with tag value
231	 * between new_depth and real_max_depth can be in-flight, tag
232	 * map can not be shrunk blindly here.
233	 */
234	if (new_depth <= bqt->real_max_depth) {
235		bqt->max_depth = new_depth;
236		return 0;
237	}
238
239	/*
240	 * Currently cannot replace a shared tag map with a new
241	 * one, so error out if this is the case
242	 */
243	if (atomic_read(&bqt->refcnt) != 1)
244		return -EBUSY;
245
246	/*
247	 * save the old state info, so we can copy it back
248	 */
249	tag_index = bqt->tag_index;
250	tag_map = bqt->tag_map;
251	max_depth = bqt->real_max_depth;
252
253	if (init_tag_map(q, bqt, new_depth))
254		return -ENOMEM;
255
256	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
257	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
258	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259
260	kfree(tag_index);
261	kfree(tag_map);
262	return 0;
263}
264EXPORT_SYMBOL(blk_queue_resize_tags);
265
266/**
267 * blk_queue_end_tag - end tag operations for a request
268 * @q:  the request queue for the device
269 * @rq: the request that has completed
270 *
271 *  Description:
272 *    Typically called when end_that_request_first() returns %0, meaning
273 *    all transfers have been done for a request. It's important to call
274 *    this function before end_that_request_last(), as that will put the
275 *    request back on the free list thus corrupting the internal tag list.
276 *
277 *  Notes:
278 *   queue lock must be held.
279 **/
280void blk_queue_end_tag(struct request_queue *q, struct request *rq)
281{
282	struct blk_queue_tag *bqt = q->queue_tags;
283	unsigned tag = rq->tag; /* negative tags invalid */
284
 
 
285	BUG_ON(tag >= bqt->real_max_depth);
286
287	list_del_init(&rq->queuelist);
288	rq->cmd_flags &= ~REQ_QUEUED;
289	rq->tag = -1;
 
290
291	if (unlikely(bqt->tag_index[tag] == NULL))
292		printk(KERN_ERR "%s: tag %d is missing\n",
293		       __func__, tag);
294
295	bqt->tag_index[tag] = NULL;
296
297	if (unlikely(!test_bit(tag, bqt->tag_map))) {
298		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
299		       __func__, tag);
300		return;
301	}
302	/*
303	 * The tag_map bit acts as a lock for tag_index[bit], so we need
304	 * unlock memory barrier semantics.
305	 */
306	clear_bit_unlock(tag, bqt->tag_map);
307}
308EXPORT_SYMBOL(blk_queue_end_tag);
309
310/**
311 * blk_queue_start_tag - find a free tag and assign it
312 * @q:  the request queue for the device
313 * @rq:  the block request that needs tagging
314 *
315 *  Description:
316 *    This can either be used as a stand-alone helper, or possibly be
317 *    assigned as the queue &prep_rq_fn (in which case &struct request
318 *    automagically gets a tag assigned). Note that this function
319 *    assumes that any type of request can be queued! if this is not
320 *    true for your device, you must check the request type before
321 *    calling this function.  The request will also be removed from
322 *    the request queue, so it's the drivers responsibility to readd
323 *    it if it should need to be restarted for some reason.
324 *
325 *  Notes:
326 *   queue lock must be held.
327 **/
328int blk_queue_start_tag(struct request_queue *q, struct request *rq)
329{
330	struct blk_queue_tag *bqt = q->queue_tags;
331	unsigned max_depth;
332	int tag;
333
334	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
 
 
335		printk(KERN_ERR
336		       "%s: request %p for device [%s] already tagged %d",
337		       __func__, rq,
338		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
339		BUG();
340	}
341
342	/*
343	 * Protect against shared tag maps, as we may not have exclusive
344	 * access to the tag map.
345	 *
346	 * We reserve a few tags just for sync IO, since we don't want
347	 * to starve sync IO on behalf of flooding async IO.
348	 */
349	max_depth = bqt->max_depth;
350	if (!rq_is_sync(rq) && max_depth > 1) {
351		switch (max_depth) {
352		case 2:
353			max_depth = 1;
354			break;
355		case 3:
356			max_depth = 2;
357			break;
358		default:
359			max_depth -= 2;
360		}
361		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
362			return 1;
363	}
364
365	do {
366		tag = find_first_zero_bit(bqt->tag_map, max_depth);
367		if (tag >= max_depth)
368			return 1;
 
 
 
 
 
 
 
 
 
 
 
 
369
370	} while (test_and_set_bit_lock(tag, bqt->tag_map));
371	/*
372	 * We need lock ordering semantics given by test_and_set_bit_lock.
373	 * See blk_queue_end_tag for details.
374	 */
375
376	rq->cmd_flags |= REQ_QUEUED;
 
377	rq->tag = tag;
378	bqt->tag_index[tag] = rq;
379	blk_start_request(rq);
380	list_add(&rq->queuelist, &q->tag_busy_list);
381	return 0;
382}
383EXPORT_SYMBOL(blk_queue_start_tag);
384
385/**
386 * blk_queue_invalidate_tags - invalidate all pending tags
387 * @q:  the request queue for the device
388 *
389 *  Description:
390 *   Hardware conditions may dictate a need to stop all pending requests.
391 *   In this case, we will safely clear the block side of the tag queue and
392 *   readd all requests to the request queue in the right order.
393 *
394 *  Notes:
395 *   queue lock must be held.
396 **/
397void blk_queue_invalidate_tags(struct request_queue *q)
398{
399	struct list_head *tmp, *n;
 
 
400
401	list_for_each_safe(tmp, n, &q->tag_busy_list)
402		blk_requeue_request(q, list_entry_rq(tmp));
403}
404EXPORT_SYMBOL(blk_queue_invalidate_tags);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Functions related to tagged command queuing
  4 */
  5#include <linux/kernel.h>
  6#include <linux/module.h>
  7#include <linux/bio.h>
  8#include <linux/blkdev.h>
  9#include <linux/slab.h>
 10
 11#include "blk.h"
 12
 13/**
 14 * blk_queue_find_tag - find a request by its tag and queue
 15 * @q:	 The request queue for the device
 16 * @tag: The tag of the request
 17 *
 18 * Notes:
 19 *    Should be used when a device returns a tag and you want to match
 20 *    it with a request.
 21 *
 22 *    no locks need be held.
 23 **/
 24struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 25{
 26	return blk_map_queue_find_tag(q->queue_tags, tag);
 27}
 28EXPORT_SYMBOL(blk_queue_find_tag);
 29
 30/**
 31 * blk_free_tags - release a given set of tag maintenance info
 32 * @bqt:	the tag map to free
 33 *
 34 * Drop the reference count on @bqt and frees it when the last reference
 35 * is dropped.
 36 */
 37void blk_free_tags(struct blk_queue_tag *bqt)
 38{
 39	if (atomic_dec_and_test(&bqt->refcnt)) {
 
 
 
 40		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
 41							bqt->max_depth);
 42
 43		kfree(bqt->tag_index);
 44		bqt->tag_index = NULL;
 45
 46		kfree(bqt->tag_map);
 47		bqt->tag_map = NULL;
 48
 49		kfree(bqt);
 50	}
 
 
 51}
 52EXPORT_SYMBOL(blk_free_tags);
 53
 54/**
 55 * __blk_queue_free_tags - release tag maintenance info
 56 * @q:  the request queue for the device
 57 *
 58 *  Notes:
 59 *    blk_cleanup_queue() will take care of calling this function, if tagging
 60 *    has been used. So there's no need to call this directly.
 61 **/
 62void __blk_queue_free_tags(struct request_queue *q)
 63{
 64	struct blk_queue_tag *bqt = q->queue_tags;
 65
 66	if (!bqt)
 67		return;
 68
 69	blk_free_tags(bqt);
 70
 71	q->queue_tags = NULL;
 72	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 73}
 74
 75/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76 * blk_queue_free_tags - release tag maintenance info
 77 * @q:  the request queue for the device
 78 *
 79 *  Notes:
 80 *	This is used to disable tagged queuing to a device, yet leave
 81 *	queue in function.
 82 **/
 83void blk_queue_free_tags(struct request_queue *q)
 84{
 85	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 86}
 87EXPORT_SYMBOL(blk_queue_free_tags);
 88
 89static int
 90init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
 91{
 92	struct request **tag_index;
 93	unsigned long *tag_map;
 94	int nr_ulongs;
 95
 96	if (q && depth > q->nr_requests * 2) {
 97		depth = q->nr_requests * 2;
 98		printk(KERN_ERR "%s: adjusted depth to %d\n",
 99		       __func__, depth);
100	}
101
102	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
103	if (!tag_index)
104		goto fail;
105
106	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
107	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
108	if (!tag_map)
109		goto fail;
110
111	tags->real_max_depth = depth;
112	tags->max_depth = depth;
113	tags->tag_index = tag_index;
114	tags->tag_map = tag_map;
115
116	return 0;
117fail:
118	kfree(tag_index);
119	return -ENOMEM;
120}
121
122static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
123						int depth, int alloc_policy)
124{
125	struct blk_queue_tag *tags;
126
127	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
128	if (!tags)
129		goto fail;
130
131	if (init_tag_map(q, tags, depth))
132		goto fail;
133
134	atomic_set(&tags->refcnt, 1);
135	tags->alloc_policy = alloc_policy;
136	tags->next_tag = 0;
137	return tags;
138fail:
139	kfree(tags);
140	return NULL;
141}
142
143/**
144 * blk_init_tags - initialize the tag info for an external tag map
145 * @depth:	the maximum queue depth supported
146 * @alloc_policy: tag allocation policy
147 **/
148struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
149{
150	return __blk_queue_init_tags(NULL, depth, alloc_policy);
151}
152EXPORT_SYMBOL(blk_init_tags);
153
154/**
155 * blk_queue_init_tags - initialize the queue tag info
156 * @q:  the request queue for the device
157 * @depth:  the maximum queue depth supported
158 * @tags: the tag to use
159 * @alloc_policy: tag allocation policy
160 *
161 * Queue lock must be held here if the function is called to resize an
162 * existing map.
163 **/
164int blk_queue_init_tags(struct request_queue *q, int depth,
165			struct blk_queue_tag *tags, int alloc_policy)
166{
167	int rc;
168
169	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
170
171	if (!tags && !q->queue_tags) {
172		tags = __blk_queue_init_tags(q, depth, alloc_policy);
173
174		if (!tags)
175			return -ENOMEM;
176
177	} else if (q->queue_tags) {
178		rc = blk_queue_resize_tags(q, depth);
179		if (rc)
180			return rc;
181		queue_flag_set(QUEUE_FLAG_QUEUED, q);
182		return 0;
183	} else
184		atomic_inc(&tags->refcnt);
185
186	/*
187	 * assign it, all done
188	 */
189	q->queue_tags = tags;
190	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
191	INIT_LIST_HEAD(&q->tag_busy_list);
192	return 0;
193}
194EXPORT_SYMBOL(blk_queue_init_tags);
195
196/**
197 * blk_queue_resize_tags - change the queueing depth
198 * @q:  the request queue for the device
199 * @new_depth: the new max command queueing depth
200 *
201 *  Notes:
202 *    Must be called with the queue lock held.
203 **/
204int blk_queue_resize_tags(struct request_queue *q, int new_depth)
205{
206	struct blk_queue_tag *bqt = q->queue_tags;
207	struct request **tag_index;
208	unsigned long *tag_map;
209	int max_depth, nr_ulongs;
210
211	if (!bqt)
212		return -ENXIO;
213
214	/*
215	 * if we already have large enough real_max_depth.  just
216	 * adjust max_depth.  *NOTE* as requests with tag value
217	 * between new_depth and real_max_depth can be in-flight, tag
218	 * map can not be shrunk blindly here.
219	 */
220	if (new_depth <= bqt->real_max_depth) {
221		bqt->max_depth = new_depth;
222		return 0;
223	}
224
225	/*
226	 * Currently cannot replace a shared tag map with a new
227	 * one, so error out if this is the case
228	 */
229	if (atomic_read(&bqt->refcnt) != 1)
230		return -EBUSY;
231
232	/*
233	 * save the old state info, so we can copy it back
234	 */
235	tag_index = bqt->tag_index;
236	tag_map = bqt->tag_map;
237	max_depth = bqt->real_max_depth;
238
239	if (init_tag_map(q, bqt, new_depth))
240		return -ENOMEM;
241
242	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
243	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
244	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
245
246	kfree(tag_index);
247	kfree(tag_map);
248	return 0;
249}
250EXPORT_SYMBOL(blk_queue_resize_tags);
251
252/**
253 * blk_queue_end_tag - end tag operations for a request
254 * @q:  the request queue for the device
255 * @rq: the request that has completed
256 *
257 *  Description:
258 *    Typically called when end_that_request_first() returns %0, meaning
259 *    all transfers have been done for a request. It's important to call
260 *    this function before end_that_request_last(), as that will put the
261 *    request back on the free list thus corrupting the internal tag list.
 
 
 
262 **/
263void blk_queue_end_tag(struct request_queue *q, struct request *rq)
264{
265	struct blk_queue_tag *bqt = q->queue_tags;
266	unsigned tag = rq->tag; /* negative tags invalid */
267
268	lockdep_assert_held(q->queue_lock);
269
270	BUG_ON(tag >= bqt->real_max_depth);
271
272	list_del_init(&rq->queuelist);
273	rq->rq_flags &= ~RQF_QUEUED;
274	rq->tag = -1;
275	rq->internal_tag = -1;
276
277	if (unlikely(bqt->tag_index[tag] == NULL))
278		printk(KERN_ERR "%s: tag %d is missing\n",
279		       __func__, tag);
280
281	bqt->tag_index[tag] = NULL;
282
283	if (unlikely(!test_bit(tag, bqt->tag_map))) {
284		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
285		       __func__, tag);
286		return;
287	}
288	/*
289	 * The tag_map bit acts as a lock for tag_index[bit], so we need
290	 * unlock memory barrier semantics.
291	 */
292	clear_bit_unlock(tag, bqt->tag_map);
293}
 
294
295/**
296 * blk_queue_start_tag - find a free tag and assign it
297 * @q:  the request queue for the device
298 * @rq:  the block request that needs tagging
299 *
300 *  Description:
301 *    This can either be used as a stand-alone helper, or possibly be
302 *    assigned as the queue &prep_rq_fn (in which case &struct request
303 *    automagically gets a tag assigned). Note that this function
304 *    assumes that any type of request can be queued! if this is not
305 *    true for your device, you must check the request type before
306 *    calling this function.  The request will also be removed from
307 *    the request queue, so it's the drivers responsibility to readd
308 *    it if it should need to be restarted for some reason.
 
 
 
309 **/
310int blk_queue_start_tag(struct request_queue *q, struct request *rq)
311{
312	struct blk_queue_tag *bqt = q->queue_tags;
313	unsigned max_depth;
314	int tag;
315
316	lockdep_assert_held(q->queue_lock);
317
318	if (unlikely((rq->rq_flags & RQF_QUEUED))) {
319		printk(KERN_ERR
320		       "%s: request %p for device [%s] already tagged %d",
321		       __func__, rq,
322		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
323		BUG();
324	}
325
326	/*
327	 * Protect against shared tag maps, as we may not have exclusive
328	 * access to the tag map.
329	 *
330	 * We reserve a few tags just for sync IO, since we don't want
331	 * to starve sync IO on behalf of flooding async IO.
332	 */
333	max_depth = bqt->max_depth;
334	if (!rq_is_sync(rq) && max_depth > 1) {
335		switch (max_depth) {
336		case 2:
337			max_depth = 1;
338			break;
339		case 3:
340			max_depth = 2;
341			break;
342		default:
343			max_depth -= 2;
344		}
345		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
346			return 1;
347	}
348
349	do {
350		if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
351			tag = find_first_zero_bit(bqt->tag_map, max_depth);
352			if (tag >= max_depth)
353				return 1;
354		} else {
355			int start = bqt->next_tag;
356			int size = min_t(int, bqt->max_depth, max_depth + start);
357			tag = find_next_zero_bit(bqt->tag_map, size, start);
358			if (tag >= size && start + size > bqt->max_depth) {
359				size = start + size - bqt->max_depth;
360				tag = find_first_zero_bit(bqt->tag_map, size);
361			}
362			if (tag >= size)
363				return 1;
364		}
365
366	} while (test_and_set_bit_lock(tag, bqt->tag_map));
367	/*
368	 * We need lock ordering semantics given by test_and_set_bit_lock.
369	 * See blk_queue_end_tag for details.
370	 */
371
372	bqt->next_tag = (tag + 1) % bqt->max_depth;
373	rq->rq_flags |= RQF_QUEUED;
374	rq->tag = tag;
375	bqt->tag_index[tag] = rq;
376	blk_start_request(rq);
377	list_add(&rq->queuelist, &q->tag_busy_list);
378	return 0;
379}
380EXPORT_SYMBOL(blk_queue_start_tag);
381
382/**
383 * blk_queue_invalidate_tags - invalidate all pending tags
384 * @q:  the request queue for the device
385 *
386 *  Description:
387 *   Hardware conditions may dictate a need to stop all pending requests.
388 *   In this case, we will safely clear the block side of the tag queue and
389 *   readd all requests to the request queue in the right order.
 
 
 
390 **/
391void blk_queue_invalidate_tags(struct request_queue *q)
392{
393	struct list_head *tmp, *n;
394
395	lockdep_assert_held(q->queue_lock);
396
397	list_for_each_safe(tmp, n, &q->tag_busy_list)
398		blk_requeue_request(q, list_entry_rq(tmp));
399}
400EXPORT_SYMBOL(blk_queue_invalidate_tags);