Loading...
1/*
2 * Functions related to tagged command queuing
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/slab.h>
9
10#include "blk.h"
11
12/**
13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
15 * @tag: The tag of the request
16 *
17 * Notes:
18 * Should be used when a device returns a tag and you want to match
19 * it with a request.
20 *
21 * no locks need be held.
22 **/
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25 return blk_map_queue_find_tag(q->queue_tags, tag);
26}
27EXPORT_SYMBOL(blk_queue_find_tag);
28
29/**
30 * blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free
32 *
33 * Drop the reference count on @bqt and frees it when the last reference
34 * is dropped.
35 */
36void blk_free_tags(struct blk_queue_tag *bqt)
37{
38 if (atomic_dec_and_test(&bqt->refcnt)) {
39 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
40 bqt->max_depth);
41
42 kfree(bqt->tag_index);
43 bqt->tag_index = NULL;
44
45 kfree(bqt->tag_map);
46 bqt->tag_map = NULL;
47
48 kfree(bqt);
49 }
50}
51EXPORT_SYMBOL(blk_free_tags);
52
53/**
54 * __blk_queue_free_tags - release tag maintenance info
55 * @q: the request queue for the device
56 *
57 * Notes:
58 * blk_cleanup_queue() will take care of calling this function, if tagging
59 * has been used. So there's no need to call this directly.
60 **/
61void __blk_queue_free_tags(struct request_queue *q)
62{
63 struct blk_queue_tag *bqt = q->queue_tags;
64
65 if (!bqt)
66 return;
67
68 blk_free_tags(bqt);
69
70 q->queue_tags = NULL;
71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
72}
73
74/**
75 * blk_queue_free_tags - release tag maintenance info
76 * @q: the request queue for the device
77 *
78 * Notes:
79 * This is used to disable tagged queuing to a device, yet leave
80 * queue in function.
81 **/
82void blk_queue_free_tags(struct request_queue *q)
83{
84 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
85}
86EXPORT_SYMBOL(blk_queue_free_tags);
87
88static int
89init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
90{
91 struct request **tag_index;
92 unsigned long *tag_map;
93 int nr_ulongs;
94
95 if (q && depth > q->nr_requests * 2) {
96 depth = q->nr_requests * 2;
97 printk(KERN_ERR "%s: adjusted depth to %d\n",
98 __func__, depth);
99 }
100
101 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
102 if (!tag_index)
103 goto fail;
104
105 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
106 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
107 if (!tag_map)
108 goto fail;
109
110 tags->real_max_depth = depth;
111 tags->max_depth = depth;
112 tags->tag_index = tag_index;
113 tags->tag_map = tag_map;
114
115 return 0;
116fail:
117 kfree(tag_index);
118 return -ENOMEM;
119}
120
121static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
122 int depth, int alloc_policy)
123{
124 struct blk_queue_tag *tags;
125
126 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
127 if (!tags)
128 goto fail;
129
130 if (init_tag_map(q, tags, depth))
131 goto fail;
132
133 atomic_set(&tags->refcnt, 1);
134 tags->alloc_policy = alloc_policy;
135 tags->next_tag = 0;
136 return tags;
137fail:
138 kfree(tags);
139 return NULL;
140}
141
142/**
143 * blk_init_tags - initialize the tag info for an external tag map
144 * @depth: the maximum queue depth supported
145 * @alloc_policy: tag allocation policy
146 **/
147struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
148{
149 return __blk_queue_init_tags(NULL, depth, alloc_policy);
150}
151EXPORT_SYMBOL(blk_init_tags);
152
153/**
154 * blk_queue_init_tags - initialize the queue tag info
155 * @q: the request queue for the device
156 * @depth: the maximum queue depth supported
157 * @tags: the tag to use
158 * @alloc_policy: tag allocation policy
159 *
160 * Queue lock must be held here if the function is called to resize an
161 * existing map.
162 **/
163int blk_queue_init_tags(struct request_queue *q, int depth,
164 struct blk_queue_tag *tags, int alloc_policy)
165{
166 int rc;
167
168 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
169
170 if (!tags && !q->queue_tags) {
171 tags = __blk_queue_init_tags(q, depth, alloc_policy);
172
173 if (!tags)
174 return -ENOMEM;
175
176 } else if (q->queue_tags) {
177 rc = blk_queue_resize_tags(q, depth);
178 if (rc)
179 return rc;
180 queue_flag_set(QUEUE_FLAG_QUEUED, q);
181 return 0;
182 } else
183 atomic_inc(&tags->refcnt);
184
185 /*
186 * assign it, all done
187 */
188 q->queue_tags = tags;
189 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
190 INIT_LIST_HEAD(&q->tag_busy_list);
191 return 0;
192}
193EXPORT_SYMBOL(blk_queue_init_tags);
194
195/**
196 * blk_queue_resize_tags - change the queueing depth
197 * @q: the request queue for the device
198 * @new_depth: the new max command queueing depth
199 *
200 * Notes:
201 * Must be called with the queue lock held.
202 **/
203int blk_queue_resize_tags(struct request_queue *q, int new_depth)
204{
205 struct blk_queue_tag *bqt = q->queue_tags;
206 struct request **tag_index;
207 unsigned long *tag_map;
208 int max_depth, nr_ulongs;
209
210 if (!bqt)
211 return -ENXIO;
212
213 /*
214 * if we already have large enough real_max_depth. just
215 * adjust max_depth. *NOTE* as requests with tag value
216 * between new_depth and real_max_depth can be in-flight, tag
217 * map can not be shrunk blindly here.
218 */
219 if (new_depth <= bqt->real_max_depth) {
220 bqt->max_depth = new_depth;
221 return 0;
222 }
223
224 /*
225 * Currently cannot replace a shared tag map with a new
226 * one, so error out if this is the case
227 */
228 if (atomic_read(&bqt->refcnt) != 1)
229 return -EBUSY;
230
231 /*
232 * save the old state info, so we can copy it back
233 */
234 tag_index = bqt->tag_index;
235 tag_map = bqt->tag_map;
236 max_depth = bqt->real_max_depth;
237
238 if (init_tag_map(q, bqt, new_depth))
239 return -ENOMEM;
240
241 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
242 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
243 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
244
245 kfree(tag_index);
246 kfree(tag_map);
247 return 0;
248}
249EXPORT_SYMBOL(blk_queue_resize_tags);
250
251/**
252 * blk_queue_end_tag - end tag operations for a request
253 * @q: the request queue for the device
254 * @rq: the request that has completed
255 *
256 * Description:
257 * Typically called when end_that_request_first() returns %0, meaning
258 * all transfers have been done for a request. It's important to call
259 * this function before end_that_request_last(), as that will put the
260 * request back on the free list thus corrupting the internal tag list.
261 *
262 * Notes:
263 * queue lock must be held.
264 **/
265void blk_queue_end_tag(struct request_queue *q, struct request *rq)
266{
267 struct blk_queue_tag *bqt = q->queue_tags;
268 unsigned tag = rq->tag; /* negative tags invalid */
269
270 BUG_ON(tag >= bqt->real_max_depth);
271
272 list_del_init(&rq->queuelist);
273 rq->cmd_flags &= ~REQ_QUEUED;
274 rq->tag = -1;
275
276 if (unlikely(bqt->tag_index[tag] == NULL))
277 printk(KERN_ERR "%s: tag %d is missing\n",
278 __func__, tag);
279
280 bqt->tag_index[tag] = NULL;
281
282 if (unlikely(!test_bit(tag, bqt->tag_map))) {
283 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
284 __func__, tag);
285 return;
286 }
287 /*
288 * The tag_map bit acts as a lock for tag_index[bit], so we need
289 * unlock memory barrier semantics.
290 */
291 clear_bit_unlock(tag, bqt->tag_map);
292}
293EXPORT_SYMBOL(blk_queue_end_tag);
294
295/**
296 * blk_queue_start_tag - find a free tag and assign it
297 * @q: the request queue for the device
298 * @rq: the block request that needs tagging
299 *
300 * Description:
301 * This can either be used as a stand-alone helper, or possibly be
302 * assigned as the queue &prep_rq_fn (in which case &struct request
303 * automagically gets a tag assigned). Note that this function
304 * assumes that any type of request can be queued! if this is not
305 * true for your device, you must check the request type before
306 * calling this function. The request will also be removed from
307 * the request queue, so it's the drivers responsibility to readd
308 * it if it should need to be restarted for some reason.
309 *
310 * Notes:
311 * queue lock must be held.
312 **/
313int blk_queue_start_tag(struct request_queue *q, struct request *rq)
314{
315 struct blk_queue_tag *bqt = q->queue_tags;
316 unsigned max_depth;
317 int tag;
318
319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
320 printk(KERN_ERR
321 "%s: request %p for device [%s] already tagged %d",
322 __func__, rq,
323 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
324 BUG();
325 }
326
327 /*
328 * Protect against shared tag maps, as we may not have exclusive
329 * access to the tag map.
330 *
331 * We reserve a few tags just for sync IO, since we don't want
332 * to starve sync IO on behalf of flooding async IO.
333 */
334 max_depth = bqt->max_depth;
335 if (!rq_is_sync(rq) && max_depth > 1) {
336 switch (max_depth) {
337 case 2:
338 max_depth = 1;
339 break;
340 case 3:
341 max_depth = 2;
342 break;
343 default:
344 max_depth -= 2;
345 }
346 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
347 return 1;
348 }
349
350 do {
351 if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
352 tag = find_first_zero_bit(bqt->tag_map, max_depth);
353 if (tag >= max_depth)
354 return 1;
355 } else {
356 int start = bqt->next_tag;
357 int size = min_t(int, bqt->max_depth, max_depth + start);
358 tag = find_next_zero_bit(bqt->tag_map, size, start);
359 if (tag >= size && start + size > bqt->max_depth) {
360 size = start + size - bqt->max_depth;
361 tag = find_first_zero_bit(bqt->tag_map, size);
362 }
363 if (tag >= size)
364 return 1;
365 }
366
367 } while (test_and_set_bit_lock(tag, bqt->tag_map));
368 /*
369 * We need lock ordering semantics given by test_and_set_bit_lock.
370 * See blk_queue_end_tag for details.
371 */
372
373 bqt->next_tag = (tag + 1) % bqt->max_depth;
374 rq->cmd_flags |= REQ_QUEUED;
375 rq->tag = tag;
376 bqt->tag_index[tag] = rq;
377 blk_start_request(rq);
378 list_add(&rq->queuelist, &q->tag_busy_list);
379 return 0;
380}
381EXPORT_SYMBOL(blk_queue_start_tag);
382
383/**
384 * blk_queue_invalidate_tags - invalidate all pending tags
385 * @q: the request queue for the device
386 *
387 * Description:
388 * Hardware conditions may dictate a need to stop all pending requests.
389 * In this case, we will safely clear the block side of the tag queue and
390 * readd all requests to the request queue in the right order.
391 *
392 * Notes:
393 * queue lock must be held.
394 **/
395void blk_queue_invalidate_tags(struct request_queue *q)
396{
397 struct list_head *tmp, *n;
398
399 list_for_each_safe(tmp, n, &q->tag_busy_list)
400 blk_requeue_request(q, list_entry_rq(tmp));
401}
402EXPORT_SYMBOL(blk_queue_invalidate_tags);
1/*
2 * Functions related to tagged command queuing
3 */
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/bio.h>
7#include <linux/blkdev.h>
8#include <linux/slab.h>
9
10#include "blk.h"
11
12/**
13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
15 * @tag: The tag of the request
16 *
17 * Notes:
18 * Should be used when a device returns a tag and you want to match
19 * it with a request.
20 *
21 * no locks need be held.
22 **/
23struct request *blk_queue_find_tag(struct request_queue *q, int tag)
24{
25 return blk_map_queue_find_tag(q->queue_tags, tag);
26}
27EXPORT_SYMBOL(blk_queue_find_tag);
28
29/**
30 * __blk_free_tags - release a given set of tag maintenance info
31 * @bqt: the tag map to free
32 *
33 * Tries to free the specified @bqt. Returns true if it was
34 * actually freed and false if there are still references using it
35 */
36static int __blk_free_tags(struct blk_queue_tag *bqt)
37{
38 int retval;
39
40 retval = atomic_dec_and_test(&bqt->refcnt);
41 if (retval) {
42 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
43 bqt->max_depth);
44
45 kfree(bqt->tag_index);
46 bqt->tag_index = NULL;
47
48 kfree(bqt->tag_map);
49 bqt->tag_map = NULL;
50
51 kfree(bqt);
52 }
53
54 return retval;
55}
56
57/**
58 * __blk_queue_free_tags - release tag maintenance info
59 * @q: the request queue for the device
60 *
61 * Notes:
62 * blk_cleanup_queue() will take care of calling this function, if tagging
63 * has been used. So there's no need to call this directly.
64 **/
65void __blk_queue_free_tags(struct request_queue *q)
66{
67 struct blk_queue_tag *bqt = q->queue_tags;
68
69 if (!bqt)
70 return;
71
72 __blk_free_tags(bqt);
73
74 q->queue_tags = NULL;
75 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
76}
77
78/**
79 * blk_free_tags - release a given set of tag maintenance info
80 * @bqt: the tag map to free
81 *
82 * For externally managed @bqt frees the map. Callers of this
83 * function must guarantee to have released all the queues that
84 * might have been using this tag map.
85 */
86void blk_free_tags(struct blk_queue_tag *bqt)
87{
88 if (unlikely(!__blk_free_tags(bqt)))
89 BUG();
90}
91EXPORT_SYMBOL(blk_free_tags);
92
93/**
94 * blk_queue_free_tags - release tag maintenance info
95 * @q: the request queue for the device
96 *
97 * Notes:
98 * This is used to disable tagged queuing to a device, yet leave
99 * queue in function.
100 **/
101void blk_queue_free_tags(struct request_queue *q)
102{
103 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
104}
105EXPORT_SYMBOL(blk_queue_free_tags);
106
107static int
108init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
109{
110 struct request **tag_index;
111 unsigned long *tag_map;
112 int nr_ulongs;
113
114 if (q && depth > q->nr_requests * 2) {
115 depth = q->nr_requests * 2;
116 printk(KERN_ERR "%s: adjusted depth to %d\n",
117 __func__, depth);
118 }
119
120 tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
121 if (!tag_index)
122 goto fail;
123
124 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
125 tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
126 if (!tag_map)
127 goto fail;
128
129 tags->real_max_depth = depth;
130 tags->max_depth = depth;
131 tags->tag_index = tag_index;
132 tags->tag_map = tag_map;
133
134 return 0;
135fail:
136 kfree(tag_index);
137 return -ENOMEM;
138}
139
140static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
141 int depth)
142{
143 struct blk_queue_tag *tags;
144
145 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
146 if (!tags)
147 goto fail;
148
149 if (init_tag_map(q, tags, depth))
150 goto fail;
151
152 atomic_set(&tags->refcnt, 1);
153 return tags;
154fail:
155 kfree(tags);
156 return NULL;
157}
158
159/**
160 * blk_init_tags - initialize the tag info for an external tag map
161 * @depth: the maximum queue depth supported
162 **/
163struct blk_queue_tag *blk_init_tags(int depth)
164{
165 return __blk_queue_init_tags(NULL, depth);
166}
167EXPORT_SYMBOL(blk_init_tags);
168
169/**
170 * blk_queue_init_tags - initialize the queue tag info
171 * @q: the request queue for the device
172 * @depth: the maximum queue depth supported
173 * @tags: the tag to use
174 *
175 * Queue lock must be held here if the function is called to resize an
176 * existing map.
177 **/
178int blk_queue_init_tags(struct request_queue *q, int depth,
179 struct blk_queue_tag *tags)
180{
181 int rc;
182
183 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
184
185 if (!tags && !q->queue_tags) {
186 tags = __blk_queue_init_tags(q, depth);
187
188 if (!tags)
189 return -ENOMEM;
190
191 } else if (q->queue_tags) {
192 rc = blk_queue_resize_tags(q, depth);
193 if (rc)
194 return rc;
195 queue_flag_set(QUEUE_FLAG_QUEUED, q);
196 return 0;
197 } else
198 atomic_inc(&tags->refcnt);
199
200 /*
201 * assign it, all done
202 */
203 q->queue_tags = tags;
204 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
205 INIT_LIST_HEAD(&q->tag_busy_list);
206 return 0;
207}
208EXPORT_SYMBOL(blk_queue_init_tags);
209
210/**
211 * blk_queue_resize_tags - change the queueing depth
212 * @q: the request queue for the device
213 * @new_depth: the new max command queueing depth
214 *
215 * Notes:
216 * Must be called with the queue lock held.
217 **/
218int blk_queue_resize_tags(struct request_queue *q, int new_depth)
219{
220 struct blk_queue_tag *bqt = q->queue_tags;
221 struct request **tag_index;
222 unsigned long *tag_map;
223 int max_depth, nr_ulongs;
224
225 if (!bqt)
226 return -ENXIO;
227
228 /*
229 * if we already have large enough real_max_depth. just
230 * adjust max_depth. *NOTE* as requests with tag value
231 * between new_depth and real_max_depth can be in-flight, tag
232 * map can not be shrunk blindly here.
233 */
234 if (new_depth <= bqt->real_max_depth) {
235 bqt->max_depth = new_depth;
236 return 0;
237 }
238
239 /*
240 * Currently cannot replace a shared tag map with a new
241 * one, so error out if this is the case
242 */
243 if (atomic_read(&bqt->refcnt) != 1)
244 return -EBUSY;
245
246 /*
247 * save the old state info, so we can copy it back
248 */
249 tag_index = bqt->tag_index;
250 tag_map = bqt->tag_map;
251 max_depth = bqt->real_max_depth;
252
253 if (init_tag_map(q, bqt, new_depth))
254 return -ENOMEM;
255
256 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
257 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
258 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
259
260 kfree(tag_index);
261 kfree(tag_map);
262 return 0;
263}
264EXPORT_SYMBOL(blk_queue_resize_tags);
265
266/**
267 * blk_queue_end_tag - end tag operations for a request
268 * @q: the request queue for the device
269 * @rq: the request that has completed
270 *
271 * Description:
272 * Typically called when end_that_request_first() returns %0, meaning
273 * all transfers have been done for a request. It's important to call
274 * this function before end_that_request_last(), as that will put the
275 * request back on the free list thus corrupting the internal tag list.
276 *
277 * Notes:
278 * queue lock must be held.
279 **/
280void blk_queue_end_tag(struct request_queue *q, struct request *rq)
281{
282 struct blk_queue_tag *bqt = q->queue_tags;
283 unsigned tag = rq->tag; /* negative tags invalid */
284
285 BUG_ON(tag >= bqt->real_max_depth);
286
287 list_del_init(&rq->queuelist);
288 rq->cmd_flags &= ~REQ_QUEUED;
289 rq->tag = -1;
290
291 if (unlikely(bqt->tag_index[tag] == NULL))
292 printk(KERN_ERR "%s: tag %d is missing\n",
293 __func__, tag);
294
295 bqt->tag_index[tag] = NULL;
296
297 if (unlikely(!test_bit(tag, bqt->tag_map))) {
298 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
299 __func__, tag);
300 return;
301 }
302 /*
303 * The tag_map bit acts as a lock for tag_index[bit], so we need
304 * unlock memory barrier semantics.
305 */
306 clear_bit_unlock(tag, bqt->tag_map);
307}
308EXPORT_SYMBOL(blk_queue_end_tag);
309
310/**
311 * blk_queue_start_tag - find a free tag and assign it
312 * @q: the request queue for the device
313 * @rq: the block request that needs tagging
314 *
315 * Description:
316 * This can either be used as a stand-alone helper, or possibly be
317 * assigned as the queue &prep_rq_fn (in which case &struct request
318 * automagically gets a tag assigned). Note that this function
319 * assumes that any type of request can be queued! if this is not
320 * true for your device, you must check the request type before
321 * calling this function. The request will also be removed from
322 * the request queue, so it's the drivers responsibility to readd
323 * it if it should need to be restarted for some reason.
324 *
325 * Notes:
326 * queue lock must be held.
327 **/
328int blk_queue_start_tag(struct request_queue *q, struct request *rq)
329{
330 struct blk_queue_tag *bqt = q->queue_tags;
331 unsigned max_depth;
332 int tag;
333
334 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
335 printk(KERN_ERR
336 "%s: request %p for device [%s] already tagged %d",
337 __func__, rq,
338 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
339 BUG();
340 }
341
342 /*
343 * Protect against shared tag maps, as we may not have exclusive
344 * access to the tag map.
345 *
346 * We reserve a few tags just for sync IO, since we don't want
347 * to starve sync IO on behalf of flooding async IO.
348 */
349 max_depth = bqt->max_depth;
350 if (!rq_is_sync(rq) && max_depth > 1) {
351 switch (max_depth) {
352 case 2:
353 max_depth = 1;
354 break;
355 case 3:
356 max_depth = 2;
357 break;
358 default:
359 max_depth -= 2;
360 }
361 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
362 return 1;
363 }
364
365 do {
366 tag = find_first_zero_bit(bqt->tag_map, max_depth);
367 if (tag >= max_depth)
368 return 1;
369
370 } while (test_and_set_bit_lock(tag, bqt->tag_map));
371 /*
372 * We need lock ordering semantics given by test_and_set_bit_lock.
373 * See blk_queue_end_tag for details.
374 */
375
376 rq->cmd_flags |= REQ_QUEUED;
377 rq->tag = tag;
378 bqt->tag_index[tag] = rq;
379 blk_start_request(rq);
380 list_add(&rq->queuelist, &q->tag_busy_list);
381 return 0;
382}
383EXPORT_SYMBOL(blk_queue_start_tag);
384
385/**
386 * blk_queue_invalidate_tags - invalidate all pending tags
387 * @q: the request queue for the device
388 *
389 * Description:
390 * Hardware conditions may dictate a need to stop all pending requests.
391 * In this case, we will safely clear the block side of the tag queue and
392 * readd all requests to the request queue in the right order.
393 *
394 * Notes:
395 * queue lock must be held.
396 **/
397void blk_queue_invalidate_tags(struct request_queue *q)
398{
399 struct list_head *tmp, *n;
400
401 list_for_each_safe(tmp, n, &q->tag_busy_list)
402 blk_requeue_request(q, list_entry_rq(tmp));
403}
404EXPORT_SYMBOL(blk_queue_invalidate_tags);