Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.15
  1/*
  2 * Functions related to tagged command queuing
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <linux/slab.h>
  9
 10#include "blk.h"
 11
 12/**
 13 * blk_queue_find_tag - find a request by its tag and queue
 14 * @q:	 The request queue for the device
 15 * @tag: The tag of the request
 16 *
 17 * Notes:
 18 *    Should be used when a device returns a tag and you want to match
 19 *    it with a request.
 20 *
 21 *    no locks need be held.
 22 **/
 23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 24{
 25	return blk_map_queue_find_tag(q->queue_tags, tag);
 26}
 27EXPORT_SYMBOL(blk_queue_find_tag);
 28
 29/**
 30 * __blk_free_tags - release a given set of tag maintenance info
 31 * @bqt:	the tag map to free
 32 *
 33 * Tries to free the specified @bqt.  Returns true if it was
 34 * actually freed and false if there are still references using it
 35 */
 36static int __blk_free_tags(struct blk_queue_tag *bqt)
 37{
 38	int retval;
 39
 40	retval = atomic_dec_and_test(&bqt->refcnt);
 41	if (retval) {
 42		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
 43							bqt->max_depth);
 44
 45		kfree(bqt->tag_index);
 46		bqt->tag_index = NULL;
 47
 48		kfree(bqt->tag_map);
 49		bqt->tag_map = NULL;
 50
 51		kfree(bqt);
 52	}
 53
 54	return retval;
 55}
 56
 57/**
 58 * __blk_queue_free_tags - release tag maintenance info
 59 * @q:  the request queue for the device
 60 *
 61 *  Notes:
 62 *    blk_cleanup_queue() will take care of calling this function, if tagging
 63 *    has been used. So there's no need to call this directly.
 64 **/
 65void __blk_queue_free_tags(struct request_queue *q)
 66{
 67	struct blk_queue_tag *bqt = q->queue_tags;
 68
 69	if (!bqt)
 70		return;
 71
 72	__blk_free_tags(bqt);
 73
 74	q->queue_tags = NULL;
 75	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 76}
 77
 78/**
 79 * blk_free_tags - release a given set of tag maintenance info
 80 * @bqt:	the tag map to free
 81 *
 82 * For externally managed @bqt frees the map.  Callers of this
 83 * function must guarantee to have released all the queues that
 84 * might have been using this tag map.
 85 */
 86void blk_free_tags(struct blk_queue_tag *bqt)
 87{
 88	if (unlikely(!__blk_free_tags(bqt)))
 89		BUG();
 90}
 91EXPORT_SYMBOL(blk_free_tags);
 92
 93/**
 94 * blk_queue_free_tags - release tag maintenance info
 95 * @q:  the request queue for the device
 96 *
 97 *  Notes:
 98 *	This is used to disable tagged queuing to a device, yet leave
 99 *	queue in function.
100 **/
101void blk_queue_free_tags(struct request_queue *q)
102{
103	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104}
105EXPORT_SYMBOL(blk_queue_free_tags);
106
107static int
108init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109{
110	struct request **tag_index;
111	unsigned long *tag_map;
112	int nr_ulongs;
113
114	if (q && depth > q->nr_requests * 2) {
115		depth = q->nr_requests * 2;
116		printk(KERN_ERR "%s: adjusted depth to %d\n",
117		       __func__, depth);
118	}
119
120	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121	if (!tag_index)
122		goto fail;
123
124	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126	if (!tag_map)
127		goto fail;
128
129	tags->real_max_depth = depth;
130	tags->max_depth = depth;
131	tags->tag_index = tag_index;
132	tags->tag_map = tag_map;
133
134	return 0;
135fail:
136	kfree(tag_index);
137	return -ENOMEM;
138}
139
140static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141						   int depth)
142{
143	struct blk_queue_tag *tags;
144
145	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146	if (!tags)
147		goto fail;
148
149	if (init_tag_map(q, tags, depth))
150		goto fail;
151
152	atomic_set(&tags->refcnt, 1);
153	return tags;
154fail:
155	kfree(tags);
156	return NULL;
157}
158
159/**
160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth:	the maximum queue depth supported
162 **/
163struct blk_queue_tag *blk_init_tags(int depth)
164{
165	return __blk_queue_init_tags(NULL, depth);
166}
167EXPORT_SYMBOL(blk_init_tags);
168
169/**
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q:  the request queue for the device
172 * @depth:  the maximum queue depth supported
173 * @tags: the tag to use
174 *
175 * Queue lock must be held here if the function is called to resize an
176 * existing map.
177 **/
178int blk_queue_init_tags(struct request_queue *q, int depth,
179			struct blk_queue_tag *tags)
180{
181	int rc;
182
183	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184
185	if (!tags && !q->queue_tags) {
186		tags = __blk_queue_init_tags(q, depth);
187
188		if (!tags)
189			return -ENOMEM;
190
191	} else if (q->queue_tags) {
192		rc = blk_queue_resize_tags(q, depth);
193		if (rc)
194			return rc;
195		queue_flag_set(QUEUE_FLAG_QUEUED, q);
196		return 0;
197	} else
198		atomic_inc(&tags->refcnt);
199
200	/*
201	 * assign it, all done
202	 */
203	q->queue_tags = tags;
204	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
205	INIT_LIST_HEAD(&q->tag_busy_list);
206	return 0;
 
 
 
207}
208EXPORT_SYMBOL(blk_queue_init_tags);
209
210/**
211 * blk_queue_resize_tags - change the queueing depth
212 * @q:  the request queue for the device
213 * @new_depth: the new max command queueing depth
214 *
215 *  Notes:
216 *    Must be called with the queue lock held.
217 **/
218int blk_queue_resize_tags(struct request_queue *q, int new_depth)
219{
220	struct blk_queue_tag *bqt = q->queue_tags;
221	struct request **tag_index;
222	unsigned long *tag_map;
223	int max_depth, nr_ulongs;
224
225	if (!bqt)
226		return -ENXIO;
227
228	/*
229	 * if we already have large enough real_max_depth.  just
230	 * adjust max_depth.  *NOTE* as requests with tag value
231	 * between new_depth and real_max_depth can be in-flight, tag
232	 * map can not be shrunk blindly here.
233	 */
234	if (new_depth <= bqt->real_max_depth) {
235		bqt->max_depth = new_depth;
236		return 0;
237	}
238
239	/*
240	 * Currently cannot replace a shared tag map with a new
241	 * one, so error out if this is the case
242	 */
243	if (atomic_read(&bqt->refcnt) != 1)
244		return -EBUSY;
245
246	/*
247	 * save the old state info, so we can copy it back
248	 */
249	tag_index = bqt->tag_index;
250	tag_map = bqt->tag_map;
251	max_depth = bqt->real_max_depth;
252
253	if (init_tag_map(q, bqt, new_depth))
254		return -ENOMEM;
255
256	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
257	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
258	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259
260	kfree(tag_index);
261	kfree(tag_map);
262	return 0;
263}
264EXPORT_SYMBOL(blk_queue_resize_tags);
265
266/**
267 * blk_queue_end_tag - end tag operations for a request
268 * @q:  the request queue for the device
269 * @rq: the request that has completed
270 *
271 *  Description:
272 *    Typically called when end_that_request_first() returns %0, meaning
273 *    all transfers have been done for a request. It's important to call
274 *    this function before end_that_request_last(), as that will put the
275 *    request back on the free list thus corrupting the internal tag list.
276 *
277 *  Notes:
278 *   queue lock must be held.
279 **/
280void blk_queue_end_tag(struct request_queue *q, struct request *rq)
281{
282	struct blk_queue_tag *bqt = q->queue_tags;
283	unsigned tag = rq->tag; /* negative tags invalid */
 
 
284
285	BUG_ON(tag >= bqt->real_max_depth);
 
 
 
 
 
286
287	list_del_init(&rq->queuelist);
288	rq->cmd_flags &= ~REQ_QUEUED;
289	rq->tag = -1;
290
291	if (unlikely(bqt->tag_index[tag] == NULL))
292		printk(KERN_ERR "%s: tag %d is missing\n",
293		       __func__, tag);
294
295	bqt->tag_index[tag] = NULL;
296
297	if (unlikely(!test_bit(tag, bqt->tag_map))) {
298		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
299		       __func__, tag);
300		return;
301	}
302	/*
303	 * The tag_map bit acts as a lock for tag_index[bit], so we need
304	 * unlock memory barrier semantics.
305	 */
306	clear_bit_unlock(tag, bqt->tag_map);
307}
308EXPORT_SYMBOL(blk_queue_end_tag);
309
310/**
311 * blk_queue_start_tag - find a free tag and assign it
312 * @q:  the request queue for the device
313 * @rq:  the block request that needs tagging
314 *
315 *  Description:
316 *    This can either be used as a stand-alone helper, or possibly be
317 *    assigned as the queue &prep_rq_fn (in which case &struct request
318 *    automagically gets a tag assigned). Note that this function
319 *    assumes that any type of request can be queued! if this is not
320 *    true for your device, you must check the request type before
321 *    calling this function.  The request will also be removed from
322 *    the request queue, so it's the drivers responsibility to readd
323 *    it if it should need to be restarted for some reason.
324 *
325 *  Notes:
326 *   queue lock must be held.
327 **/
328int blk_queue_start_tag(struct request_queue *q, struct request *rq)
329{
330	struct blk_queue_tag *bqt = q->queue_tags;
331	unsigned max_depth;
332	int tag;
333
334	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
335		printk(KERN_ERR
336		       "%s: request %p for device [%s] already tagged %d",
337		       __func__, rq,
338		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
339		BUG();
340	}
341
342	/*
343	 * Protect against shared tag maps, as we may not have exclusive
344	 * access to the tag map.
345	 *
346	 * We reserve a few tags just for sync IO, since we don't want
347	 * to starve sync IO on behalf of flooding async IO.
348	 */
349	max_depth = bqt->max_depth;
350	if (!rq_is_sync(rq) && max_depth > 1) {
351		switch (max_depth) {
352		case 2:
353			max_depth = 1;
354			break;
355		case 3:
356			max_depth = 2;
357			break;
358		default:
359			max_depth -= 2;
360		}
361		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
362			return 1;
363	}
364
365	do {
366		tag = find_first_zero_bit(bqt->tag_map, max_depth);
367		if (tag >= max_depth)
368			return 1;
369
370	} while (test_and_set_bit_lock(tag, bqt->tag_map));
371	/*
372	 * We need lock ordering semantics given by test_and_set_bit_lock.
373	 * See blk_queue_end_tag for details.
374	 */
375
376	rq->cmd_flags |= REQ_QUEUED;
377	rq->tag = tag;
378	bqt->tag_index[tag] = rq;
379	blk_start_request(rq);
380	list_add(&rq->queuelist, &q->tag_busy_list);
381	return 0;
382}
383EXPORT_SYMBOL(blk_queue_start_tag);
384
385/**
386 * blk_queue_invalidate_tags - invalidate all pending tags
387 * @q:  the request queue for the device
388 *
389 *  Description:
390 *   Hardware conditions may dictate a need to stop all pending requests.
391 *   In this case, we will safely clear the block side of the tag queue and
392 *   readd all requests to the request queue in the right order.
393 *
394 *  Notes:
395 *   queue lock must be held.
396 **/
397void blk_queue_invalidate_tags(struct request_queue *q)
398{
399	struct list_head *tmp, *n;
400
401	list_for_each_safe(tmp, n, &q->tag_busy_list)
402		blk_requeue_request(q, list_entry_rq(tmp));
403}
404EXPORT_SYMBOL(blk_queue_invalidate_tags);
v3.1
  1/*
  2 * Functions related to tagged command queuing
  3 */
  4#include <linux/kernel.h>
  5#include <linux/module.h>
  6#include <linux/bio.h>
  7#include <linux/blkdev.h>
  8#include <linux/slab.h>
  9
 10#include "blk.h"
 11
 12/**
 13 * blk_queue_find_tag - find a request by its tag and queue
 14 * @q:	 The request queue for the device
 15 * @tag: The tag of the request
 16 *
 17 * Notes:
 18 *    Should be used when a device returns a tag and you want to match
 19 *    it with a request.
 20 *
 21 *    no locks need be held.
 22 **/
 23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 24{
 25	return blk_map_queue_find_tag(q->queue_tags, tag);
 26}
 27EXPORT_SYMBOL(blk_queue_find_tag);
 28
 29/**
 30 * __blk_free_tags - release a given set of tag maintenance info
 31 * @bqt:	the tag map to free
 32 *
 33 * Tries to free the specified @bqt.  Returns true if it was
 34 * actually freed and false if there are still references using it
 35 */
 36static int __blk_free_tags(struct blk_queue_tag *bqt)
 37{
 38	int retval;
 39
 40	retval = atomic_dec_and_test(&bqt->refcnt);
 41	if (retval) {
 42		BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
 43							bqt->max_depth);
 44
 45		kfree(bqt->tag_index);
 46		bqt->tag_index = NULL;
 47
 48		kfree(bqt->tag_map);
 49		bqt->tag_map = NULL;
 50
 51		kfree(bqt);
 52	}
 53
 54	return retval;
 55}
 56
 57/**
 58 * __blk_queue_free_tags - release tag maintenance info
 59 * @q:  the request queue for the device
 60 *
 61 *  Notes:
 62 *    blk_cleanup_queue() will take care of calling this function, if tagging
 63 *    has been used. So there's no need to call this directly.
 64 **/
 65void __blk_queue_free_tags(struct request_queue *q)
 66{
 67	struct blk_queue_tag *bqt = q->queue_tags;
 68
 69	if (!bqt)
 70		return;
 71
 72	__blk_free_tags(bqt);
 73
 74	q->queue_tags = NULL;
 75	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 76}
 77
 78/**
 79 * blk_free_tags - release a given set of tag maintenance info
 80 * @bqt:	the tag map to free
 81 *
 82 * For externally managed @bqt frees the map.  Callers of this
 83 * function must guarantee to have released all the queues that
 84 * might have been using this tag map.
 85 */
 86void blk_free_tags(struct blk_queue_tag *bqt)
 87{
 88	if (unlikely(!__blk_free_tags(bqt)))
 89		BUG();
 90}
 91EXPORT_SYMBOL(blk_free_tags);
 92
 93/**
 94 * blk_queue_free_tags - release tag maintenance info
 95 * @q:  the request queue for the device
 96 *
 97 *  Notes:
 98 *	This is used to disable tagged queuing to a device, yet leave
 99 *	queue in function.
100 **/
101void blk_queue_free_tags(struct request_queue *q)
102{
103	queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104}
105EXPORT_SYMBOL(blk_queue_free_tags);
106
107static int
108init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109{
110	struct request **tag_index;
111	unsigned long *tag_map;
112	int nr_ulongs;
113
114	if (q && depth > q->nr_requests * 2) {
115		depth = q->nr_requests * 2;
116		printk(KERN_ERR "%s: adjusted depth to %d\n",
117		       __func__, depth);
118	}
119
120	tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121	if (!tag_index)
122		goto fail;
123
124	nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125	tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126	if (!tag_map)
127		goto fail;
128
129	tags->real_max_depth = depth;
130	tags->max_depth = depth;
131	tags->tag_index = tag_index;
132	tags->tag_map = tag_map;
133
134	return 0;
135fail:
136	kfree(tag_index);
137	return -ENOMEM;
138}
139
140static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141						   int depth)
142{
143	struct blk_queue_tag *tags;
144
145	tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146	if (!tags)
147		goto fail;
148
149	if (init_tag_map(q, tags, depth))
150		goto fail;
151
152	atomic_set(&tags->refcnt, 1);
153	return tags;
154fail:
155	kfree(tags);
156	return NULL;
157}
158
159/**
160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth:	the maximum queue depth supported
162 **/
163struct blk_queue_tag *blk_init_tags(int depth)
164{
165	return __blk_queue_init_tags(NULL, depth);
166}
167EXPORT_SYMBOL(blk_init_tags);
168
169/**
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q:  the request queue for the device
172 * @depth:  the maximum queue depth supported
173 * @tags: the tag to use
174 *
175 * Queue lock must be held here if the function is called to resize an
176 * existing map.
177 **/
178int blk_queue_init_tags(struct request_queue *q, int depth,
179			struct blk_queue_tag *tags)
180{
181	int rc;
182
183	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184
185	if (!tags && !q->queue_tags) {
186		tags = __blk_queue_init_tags(q, depth);
187
188		if (!tags)
189			goto fail;
 
190	} else if (q->queue_tags) {
191		rc = blk_queue_resize_tags(q, depth);
192		if (rc)
193			return rc;
194		queue_flag_set(QUEUE_FLAG_QUEUED, q);
195		return 0;
196	} else
197		atomic_inc(&tags->refcnt);
198
199	/*
200	 * assign it, all done
201	 */
202	q->queue_tags = tags;
203	queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
204	INIT_LIST_HEAD(&q->tag_busy_list);
205	return 0;
206fail:
207	kfree(tags);
208	return -ENOMEM;
209}
210EXPORT_SYMBOL(blk_queue_init_tags);
211
212/**
213 * blk_queue_resize_tags - change the queueing depth
214 * @q:  the request queue for the device
215 * @new_depth: the new max command queueing depth
216 *
217 *  Notes:
218 *    Must be called with the queue lock held.
219 **/
220int blk_queue_resize_tags(struct request_queue *q, int new_depth)
221{
222	struct blk_queue_tag *bqt = q->queue_tags;
223	struct request **tag_index;
224	unsigned long *tag_map;
225	int max_depth, nr_ulongs;
226
227	if (!bqt)
228		return -ENXIO;
229
230	/*
231	 * if we already have large enough real_max_depth.  just
232	 * adjust max_depth.  *NOTE* as requests with tag value
233	 * between new_depth and real_max_depth can be in-flight, tag
234	 * map can not be shrunk blindly here.
235	 */
236	if (new_depth <= bqt->real_max_depth) {
237		bqt->max_depth = new_depth;
238		return 0;
239	}
240
241	/*
242	 * Currently cannot replace a shared tag map with a new
243	 * one, so error out if this is the case
244	 */
245	if (atomic_read(&bqt->refcnt) != 1)
246		return -EBUSY;
247
248	/*
249	 * save the old state info, so we can copy it back
250	 */
251	tag_index = bqt->tag_index;
252	tag_map = bqt->tag_map;
253	max_depth = bqt->real_max_depth;
254
255	if (init_tag_map(q, bqt, new_depth))
256		return -ENOMEM;
257
258	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
259	nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
260	memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
261
262	kfree(tag_index);
263	kfree(tag_map);
264	return 0;
265}
266EXPORT_SYMBOL(blk_queue_resize_tags);
267
268/**
269 * blk_queue_end_tag - end tag operations for a request
270 * @q:  the request queue for the device
271 * @rq: the request that has completed
272 *
273 *  Description:
274 *    Typically called when end_that_request_first() returns %0, meaning
275 *    all transfers have been done for a request. It's important to call
276 *    this function before end_that_request_last(), as that will put the
277 *    request back on the free list thus corrupting the internal tag list.
278 *
279 *  Notes:
280 *   queue lock must be held.
281 **/
282void blk_queue_end_tag(struct request_queue *q, struct request *rq)
283{
284	struct blk_queue_tag *bqt = q->queue_tags;
285	int tag = rq->tag;
286
287	BUG_ON(tag == -1);
288
289	if (unlikely(tag >= bqt->real_max_depth))
290		/*
291		 * This can happen after tag depth has been reduced.
292		 * FIXME: how about a warning or info message here?
293		 */
294		return;
295
296	list_del_init(&rq->queuelist);
297	rq->cmd_flags &= ~REQ_QUEUED;
298	rq->tag = -1;
299
300	if (unlikely(bqt->tag_index[tag] == NULL))
301		printk(KERN_ERR "%s: tag %d is missing\n",
302		       __func__, tag);
303
304	bqt->tag_index[tag] = NULL;
305
306	if (unlikely(!test_bit(tag, bqt->tag_map))) {
307		printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
308		       __func__, tag);
309		return;
310	}
311	/*
312	 * The tag_map bit acts as a lock for tag_index[bit], so we need
313	 * unlock memory barrier semantics.
314	 */
315	clear_bit_unlock(tag, bqt->tag_map);
316}
317EXPORT_SYMBOL(blk_queue_end_tag);
318
319/**
320 * blk_queue_start_tag - find a free tag and assign it
321 * @q:  the request queue for the device
322 * @rq:  the block request that needs tagging
323 *
324 *  Description:
325 *    This can either be used as a stand-alone helper, or possibly be
326 *    assigned as the queue &prep_rq_fn (in which case &struct request
327 *    automagically gets a tag assigned). Note that this function
328 *    assumes that any type of request can be queued! if this is not
329 *    true for your device, you must check the request type before
330 *    calling this function.  The request will also be removed from
331 *    the request queue, so it's the drivers responsibility to readd
332 *    it if it should need to be restarted for some reason.
333 *
334 *  Notes:
335 *   queue lock must be held.
336 **/
337int blk_queue_start_tag(struct request_queue *q, struct request *rq)
338{
339	struct blk_queue_tag *bqt = q->queue_tags;
340	unsigned max_depth;
341	int tag;
342
343	if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
344		printk(KERN_ERR
345		       "%s: request %p for device [%s] already tagged %d",
346		       __func__, rq,
347		       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
348		BUG();
349	}
350
351	/*
352	 * Protect against shared tag maps, as we may not have exclusive
353	 * access to the tag map.
354	 *
355	 * We reserve a few tags just for sync IO, since we don't want
356	 * to starve sync IO on behalf of flooding async IO.
357	 */
358	max_depth = bqt->max_depth;
359	if (!rq_is_sync(rq) && max_depth > 1) {
360		max_depth -= 2;
361		if (!max_depth)
362			max_depth = 1;
 
 
 
 
 
 
 
363		if (q->in_flight[BLK_RW_ASYNC] > max_depth)
364			return 1;
365	}
366
367	do {
368		tag = find_first_zero_bit(bqt->tag_map, max_depth);
369		if (tag >= max_depth)
370			return 1;
371
372	} while (test_and_set_bit_lock(tag, bqt->tag_map));
373	/*
374	 * We need lock ordering semantics given by test_and_set_bit_lock.
375	 * See blk_queue_end_tag for details.
376	 */
377
378	rq->cmd_flags |= REQ_QUEUED;
379	rq->tag = tag;
380	bqt->tag_index[tag] = rq;
381	blk_start_request(rq);
382	list_add(&rq->queuelist, &q->tag_busy_list);
383	return 0;
384}
385EXPORT_SYMBOL(blk_queue_start_tag);
386
387/**
388 * blk_queue_invalidate_tags - invalidate all pending tags
389 * @q:  the request queue for the device
390 *
391 *  Description:
392 *   Hardware conditions may dictate a need to stop all pending requests.
393 *   In this case, we will safely clear the block side of the tag queue and
394 *   readd all requests to the request queue in the right order.
395 *
396 *  Notes:
397 *   queue lock must be held.
398 **/
399void blk_queue_invalidate_tags(struct request_queue *q)
400{
401	struct list_head *tmp, *n;
402
403	list_for_each_safe(tmp, n, &q->tag_busy_list)
404		blk_requeue_request(q, list_entry_rq(tmp));
405}
406EXPORT_SYMBOL(blk_queue_invalidate_tags);