Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
  3 * fairer distribution of tags between multiple submitters when a shared tag map
  4 * is used.
 
 
 
 
 
  5 *
  6 * Copyright (C) 2013-2014 Jens Axboe
  7 */
  8#include <linux/kernel.h>
  9#include <linux/module.h>
 
 10
 11#include <linux/blk-mq.h>
 12#include "blk.h"
 13#include "blk-mq.h"
 14#include "blk-mq-tag.h"
 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 16bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
 17{
 18	if (!tags)
 19		return true;
 20
 21	return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
 
 
 
 
 
 
 
 
 
 
 
 
 22}
 23
 24/*
 25 * If a previously inactive queue goes active, bump the active user count.
 26 */
 27bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 28{
 29	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 30	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 31		atomic_inc(&hctx->tags->active_queues);
 32
 33	return true;
 34}
 35
 36/*
 37 * Wakeup all potentially sleeping on tags
 38 */
 39void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 40{
 41	sbitmap_queue_wake_all(&tags->bitmap_tags);
 42	if (include_reserve)
 43		sbitmap_queue_wake_all(&tags->breserved_tags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44}
 45
 46/*
 47 * If a previously busy queue goes inactive, potential waiters could now
 48 * be allowed to queue. Wake them up and check.
 49 */
 50void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
 51{
 52	struct blk_mq_tags *tags = hctx->tags;
 53
 54	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 55		return;
 56
 57	atomic_dec(&tags->active_queues);
 58
 59	blk_mq_tag_wakeup_all(tags, false);
 60}
 61
 62/*
 63 * For shared tag users, we track the number of currently active users
 64 * and attempt to provide a fair share of the tag depth for each of them.
 65 */
 66static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
 67				  struct sbitmap_queue *bt)
 68{
 69	unsigned int depth, users;
 70
 71	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
 72		return true;
 73	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 74		return true;
 75
 76	/*
 77	 * Don't try dividing an ant
 78	 */
 79	if (bt->sb.depth == 1)
 80		return true;
 81
 82	users = atomic_read(&hctx->tags->active_queues);
 83	if (!users)
 84		return true;
 85
 86	/*
 87	 * Allow at least some tags
 88	 */
 89	depth = max((bt->sb.depth + users - 1) / users, 4U);
 90	return atomic_read(&hctx->nr_active) < depth;
 91}
 92
 93static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
 94			    struct sbitmap_queue *bt)
 95{
 96	if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
 97	    !hctx_may_queue(data->hctx, bt))
 98		return -1;
 99	if (data->shallow_depth)
100		return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
101	else
102		return __sbitmap_queue_get(bt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103}
104
105unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106{
107	struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
108	struct sbitmap_queue *bt;
109	struct sbq_wait_state *ws;
110	DEFINE_WAIT(wait);
111	unsigned int tag_offset;
112	bool drop_ctx;
113	int tag;
114
115	if (data->flags & BLK_MQ_REQ_RESERVED) {
116		if (unlikely(!tags->nr_reserved_tags)) {
117			WARN_ON_ONCE(1);
118			return BLK_MQ_TAG_FAIL;
 
 
 
 
 
119		}
120		bt = &tags->breserved_tags;
121		tag_offset = 0;
122	} else {
123		bt = &tags->bitmap_tags;
124		tag_offset = tags->nr_reserved_tags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125	}
126
127	tag = __blk_mq_get_tag(data, bt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128	if (tag != -1)
129		goto found_tag;
130
131	if (data->flags & BLK_MQ_REQ_NOWAIT)
132		return BLK_MQ_TAG_FAIL;
133
134	ws = bt_wait_ptr(bt, data->hctx);
135	drop_ctx = data->ctx == NULL;
136	do {
 
 
 
 
 
 
137		/*
138		 * We're out of tags on this hardware queue, kick any
139		 * pending IO submits before going to sleep waiting for
140		 * some to complete.
 
141		 */
142		blk_mq_run_hw_queue(data->hctx, false);
 
143
144		/*
145		 * Retry tag allocation after running the hardware queue,
146		 * as running the queue may also have found completions.
147		 */
148		tag = __blk_mq_get_tag(data, bt);
149		if (tag != -1)
150			break;
151
152		prepare_to_wait_exclusive(&ws->wait, &wait,
153						TASK_UNINTERRUPTIBLE);
154
155		tag = __blk_mq_get_tag(data, bt);
156		if (tag != -1)
157			break;
158
159		if (data->ctx)
160			blk_mq_put_ctx(data->ctx);
161
162		io_schedule();
163
164		data->ctx = blk_mq_get_ctx(data->q);
165		data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
166		tags = blk_mq_tags_from_data(data);
167		if (data->flags & BLK_MQ_REQ_RESERVED)
168			bt = &tags->breserved_tags;
169		else
170			bt = &tags->bitmap_tags;
171
172		finish_wait(&ws->wait, &wait);
173		ws = bt_wait_ptr(bt, data->hctx);
 
 
174	} while (1);
175
176	if (drop_ctx && data->ctx)
177		blk_mq_put_ctx(data->ctx);
 
178
179	finish_wait(&ws->wait, &wait);
 
 
180
181found_tag:
182	return tag + tag_offset;
 
 
 
 
183}
184
185void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
186		    struct blk_mq_ctx *ctx, unsigned int tag)
187{
188	if (!blk_mq_tag_is_reserved(tags, tag)) {
189		const int real_tag = tag - tags->nr_reserved_tags;
190
191		BUG_ON(real_tag >= tags->nr_tags);
192		sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
193	} else {
194		BUG_ON(tag >= tags->nr_reserved_tags);
195		sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
196	}
197}
198
199struct bt_iter_data {
200	struct blk_mq_hw_ctx *hctx;
201	busy_iter_fn *fn;
202	void *data;
203	bool reserved;
204};
205
206static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
207{
208	struct bt_iter_data *iter_data = data;
209	struct blk_mq_hw_ctx *hctx = iter_data->hctx;
210	struct blk_mq_tags *tags = hctx->tags;
211	bool reserved = iter_data->reserved;
212	struct request *rq;
213
214	if (!reserved)
215		bitnr += tags->nr_reserved_tags;
216	rq = tags->rqs[bitnr];
217
218	/*
219	 * We can hit rq == NULL here, because the tagging functions
220	 * test and set the bit before assining ->rqs[].
221	 */
222	if (rq && rq->q == hctx->queue)
223		iter_data->fn(hctx, rq, iter_data->data, reserved);
224	return true;
225}
226
227static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
228			busy_iter_fn *fn, void *data, bool reserved)
229{
230	struct bt_iter_data iter_data = {
231		.hctx = hctx,
232		.fn = fn,
233		.data = data,
234		.reserved = reserved,
235	};
236
237	sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
238}
 
239
240struct bt_tags_iter_data {
241	struct blk_mq_tags *tags;
242	busy_tag_iter_fn *fn;
243	void *data;
244	bool reserved;
245};
246
247static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
248{
249	struct bt_tags_iter_data *iter_data = data;
250	struct blk_mq_tags *tags = iter_data->tags;
251	bool reserved = iter_data->reserved;
252	struct request *rq;
253
254	if (!reserved)
255		bitnr += tags->nr_reserved_tags;
256
257	/*
258	 * We can hit rq == NULL here, because the tagging functions
259	 * test and set the bit before assining ->rqs[].
260	 */
261	rq = tags->rqs[bitnr];
262	if (rq)
263		iter_data->fn(rq, iter_data->data, reserved);
264
265	return true;
266}
267
268static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
269			     busy_tag_iter_fn *fn, void *data, bool reserved)
270{
271	struct bt_tags_iter_data iter_data = {
272		.tags = tags,
273		.fn = fn,
274		.data = data,
275		.reserved = reserved,
276	};
277
278	if (tags->rqs)
279		sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
280}
281
282static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
283		busy_tag_iter_fn *fn, void *priv)
284{
285	if (tags->nr_reserved_tags)
286		bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
287	bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
 
 
 
 
 
 
 
 
 
288}
289
290void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
291		busy_tag_iter_fn *fn, void *priv)
292{
293	int i;
294
295	for (i = 0; i < tagset->nr_hw_queues; i++) {
296		if (tagset->tags && tagset->tags[i])
297			blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
 
 
 
 
 
 
 
298	}
299}
300EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
301
302int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
303			 int (fn)(void *, struct request *))
 
304{
305	int i, j, ret = 0;
 
306
307	if (WARN_ON_ONCE(!fn))
308		goto out;
309
310	for (i = 0; i < set->nr_hw_queues; i++) {
311		struct blk_mq_tags *tags = set->tags[i];
 
 
 
 
 
312
313		if (!tags)
314			continue;
 
315
316		for (j = 0; j < tags->nr_tags; j++) {
317			if (!tags->static_rqs[j])
318				continue;
 
 
 
319
320			ret = fn(data, tags->static_rqs[j]);
321			if (ret)
322				goto out;
 
 
 
 
 
 
 
323		}
 
 
324	}
 
325
326out:
327	return ret;
 
 
 
 
 
328}
329EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);
330
331void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
332		void *priv)
333{
334	struct blk_mq_hw_ctx *hctx;
335	int i;
336
337
338	queue_for_each_hw_ctx(q, hctx, i) {
339		struct blk_mq_tags *tags = hctx->tags;
340
341		/*
342		 * If not software queues are currently mapped to this
343		 * hardware queue, there's nothing to check
344		 */
345		if (!blk_mq_hw_queue_mapped(hctx))
346			continue;
347
348		if (tags->nr_reserved_tags)
349			bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
350		bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351	}
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353}
354
355static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
356		    bool round_robin, int node)
357{
358	return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
359				       node);
360}
361
362static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
363						   int node, int alloc_policy)
364{
365	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
366	bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
367
368	if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
369		goto free_tags;
370	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
371		     node))
372		goto free_bitmap_tags;
 
373
374	return tags;
375free_bitmap_tags:
376	sbitmap_queue_free(&tags->bitmap_tags);
377free_tags:
378	kfree(tags);
379	return NULL;
380}
381
382struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
383				     unsigned int reserved_tags,
384				     int node, int alloc_policy)
385{
386	struct blk_mq_tags *tags;
387
388	if (total_tags > BLK_MQ_TAG_MAX) {
389		pr_err("blk-mq: tag depth too large\n");
390		return NULL;
391	}
392
393	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
394	if (!tags)
395		return NULL;
396
 
 
 
 
 
397	tags->nr_tags = total_tags;
398	tags->nr_reserved_tags = reserved_tags;
399
400	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
401}
402
403void blk_mq_free_tags(struct blk_mq_tags *tags)
404{
405	sbitmap_queue_free(&tags->bitmap_tags);
406	sbitmap_queue_free(&tags->breserved_tags);
 
407	kfree(tags);
408}
409
410int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
411			    struct blk_mq_tags **tagsptr, unsigned int tdepth,
412			    bool can_grow)
413{
414	struct blk_mq_tags *tags = *tagsptr;
415
416	if (tdepth <= tags->nr_reserved_tags)
417		return -EINVAL;
418
 
 
419	tdepth -= tags->nr_reserved_tags;
 
 
420
421	/*
422	 * If we are allowed to grow beyond the original size, allocate
423	 * a new set of tags before freeing the old one.
424	 */
425	if (tdepth > tags->nr_tags) {
426		struct blk_mq_tag_set *set = hctx->queue->tag_set;
427		struct blk_mq_tags *new;
428		bool ret;
429
430		if (!can_grow)
431			return -EINVAL;
432
433		/*
434		 * We need some sort of upper limit, set it high enough that
435		 * no valid use cases should require more.
436		 */
437		if (tdepth > 16 * BLKDEV_MAX_RQ)
438			return -EINVAL;
439
440		new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
441		if (!new)
442			return -ENOMEM;
443		ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
444		if (ret) {
445			blk_mq_free_rq_map(new);
446			return -ENOMEM;
447		}
448
449		blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
450		blk_mq_free_rq_map(*tagsptr);
451		*tagsptr = new;
452	} else {
453		/*
454		 * Don't need (or can't) update reserved tags here, they
455		 * remain static and should never need resizing.
456		 */
457		sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
458	}
459
460	return 0;
461}
462
463/**
464 * blk_mq_unique_tag() - return a tag that is unique queue-wide
465 * @rq: request for which to compute a unique tag
466 *
467 * The tag field in struct request is unique per hardware queue but not over
468 * all hardware queues. Hence this function that returns a tag with the
469 * hardware context index in the upper bits and the per hardware queue tag in
470 * the lower bits.
471 *
472 * Note: When called for a request that is queued on a non-multiqueue request
473 * queue, the hardware context index is set to zero.
474 */
475u32 blk_mq_unique_tag(struct request *rq)
476{
477	struct request_queue *q = rq->q;
478	struct blk_mq_hw_ctx *hctx;
479	int hwq = 0;
480
481	if (q->mq_ops) {
482		hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
483		hwq = hctx->queue_num;
484	}
485
486	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
487		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
488}
489EXPORT_SYMBOL(blk_mq_unique_tag);
v4.6
  1/*
  2 * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread
  3 * over multiple cachelines to avoid ping-pong between multiple submitters
  4 * or submitter and completer. Uses rolling wakeups to avoid falling of
  5 * the scaling cliff when we run out of tags and have to start putting
  6 * submitters to sleep.
  7 *
  8 * Uses active queue tracking to support fairer distribution of tags
  9 * between multiple submitters when a shared tag map is used.
 10 *
 11 * Copyright (C) 2013-2014 Jens Axboe
 12 */
 13#include <linux/kernel.h>
 14#include <linux/module.h>
 15#include <linux/random.h>
 16
 17#include <linux/blk-mq.h>
 18#include "blk.h"
 19#include "blk-mq.h"
 20#include "blk-mq-tag.h"
 21
 22static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt)
 23{
 24	int i;
 25
 26	for (i = 0; i < bt->map_nr; i++) {
 27		struct blk_align_bitmap *bm = &bt->map[i];
 28		int ret;
 29
 30		ret = find_first_zero_bit(&bm->word, bm->depth);
 31		if (ret < bm->depth)
 32			return true;
 33	}
 34
 35	return false;
 36}
 37
 38bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
 39{
 40	if (!tags)
 41		return true;
 42
 43	return bt_has_free_tags(&tags->bitmap_tags);
 44}
 45
 46static inline int bt_index_inc(int index)
 47{
 48	return (index + 1) & (BT_WAIT_QUEUES - 1);
 49}
 50
 51static inline void bt_index_atomic_inc(atomic_t *index)
 52{
 53	int old = atomic_read(index);
 54	int new = bt_index_inc(old);
 55	atomic_cmpxchg(index, old, new);
 56}
 57
 58/*
 59 * If a previously inactive queue goes active, bump the active user count.
 60 */
 61bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 62{
 63	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
 64	    !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
 65		atomic_inc(&hctx->tags->active_queues);
 66
 67	return true;
 68}
 69
 70/*
 71 * Wakeup all potentially sleeping on tags
 72 */
 73void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
 74{
 75	struct blk_mq_bitmap_tags *bt;
 76	int i, wake_index;
 77
 78	/*
 79	 * Make sure all changes prior to this are visible from other CPUs.
 80	 */
 81	smp_mb();
 82	bt = &tags->bitmap_tags;
 83	wake_index = atomic_read(&bt->wake_index);
 84	for (i = 0; i < BT_WAIT_QUEUES; i++) {
 85		struct bt_wait_state *bs = &bt->bs[wake_index];
 86
 87		if (waitqueue_active(&bs->wait))
 88			wake_up(&bs->wait);
 89
 90		wake_index = bt_index_inc(wake_index);
 91	}
 92
 93	if (include_reserve) {
 94		bt = &tags->breserved_tags;
 95		if (waitqueue_active(&bt->bs[0].wait))
 96			wake_up(&bt->bs[0].wait);
 97	}
 98}
 99
100/*
101 * If a previously busy queue goes inactive, potential waiters could now
102 * be allowed to queue. Wake them up and check.
103 */
104void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
105{
106	struct blk_mq_tags *tags = hctx->tags;
107
108	if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
109		return;
110
111	atomic_dec(&tags->active_queues);
112
113	blk_mq_tag_wakeup_all(tags, false);
114}
115
116/*
117 * For shared tag users, we track the number of currently active users
118 * and attempt to provide a fair share of the tag depth for each of them.
119 */
120static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
121				  struct blk_mq_bitmap_tags *bt)
122{
123	unsigned int depth, users;
124
125	if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
126		return true;
127	if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
128		return true;
129
130	/*
131	 * Don't try dividing an ant
132	 */
133	if (bt->depth == 1)
134		return true;
135
136	users = atomic_read(&hctx->tags->active_queues);
137	if (!users)
138		return true;
139
140	/*
141	 * Allow at least some tags
142	 */
143	depth = max((bt->depth + users - 1) / users, 4U);
144	return atomic_read(&hctx->nr_active) < depth;
145}
146
147static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag,
148			 bool nowrap)
149{
150	int tag, org_last_tag = last_tag;
151
152	while (1) {
153		tag = find_next_zero_bit(&bm->word, bm->depth, last_tag);
154		if (unlikely(tag >= bm->depth)) {
155			/*
156			 * We started with an offset, and we didn't reset the
157			 * offset to 0 in a failure case, so start from 0 to
158			 * exhaust the map.
159			 */
160			if (org_last_tag && last_tag && !nowrap) {
161				last_tag = org_last_tag = 0;
162				continue;
163			}
164			return -1;
165		}
166
167		if (!test_and_set_bit(tag, &bm->word))
168			break;
169
170		last_tag = tag + 1;
171		if (last_tag >= bm->depth - 1)
172			last_tag = 0;
173	}
174
175	return tag;
176}
177
178#define BT_ALLOC_RR(tags) (tags->alloc_policy == BLK_TAG_ALLOC_RR)
179
180/*
181 * Straight forward bitmap tag implementation, where each bit is a tag
182 * (cleared == free, and set == busy). The small twist is using per-cpu
183 * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue
184 * contexts. This enables us to drastically limit the space searched,
185 * without dirtying an extra shared cacheline like we would if we stored
186 * the cache value inside the shared blk_mq_bitmap_tags structure. On top
187 * of that, each word of tags is in a separate cacheline. This means that
188 * multiple users will tend to stick to different cachelines, at least
189 * until the map is exhausted.
190 */
191static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt,
192		    unsigned int *tag_cache, struct blk_mq_tags *tags)
193{
194	unsigned int last_tag, org_last_tag;
195	int index, i, tag;
196
197	if (!hctx_may_queue(hctx, bt))
198		return -1;
 
 
199
200	last_tag = org_last_tag = *tag_cache;
201	index = TAG_TO_INDEX(bt, last_tag);
202
203	for (i = 0; i < bt->map_nr; i++) {
204		tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag),
205				    BT_ALLOC_RR(tags));
206		if (tag != -1) {
207			tag += (index << bt->bits_per_word);
208			goto done;
209		}
210
211		/*
212		 * Jump to next index, and reset the last tag to be the
213		 * first tag of that index
214		 */
215		index++;
216		last_tag = (index << bt->bits_per_word);
217
218		if (index >= bt->map_nr) {
219			index = 0;
220			last_tag = 0;
221		}
222	}
223
224	*tag_cache = 0;
225	return -1;
226
227	/*
228	 * Only update the cache from the allocation path, if we ended
229	 * up using the specific cached tag.
230	 */
231done:
232	if (tag == org_last_tag || unlikely(BT_ALLOC_RR(tags))) {
233		last_tag = tag + 1;
234		if (last_tag >= bt->depth - 1)
235			last_tag = 0;
236
237		*tag_cache = last_tag;
238	}
239
240	return tag;
241}
242
243static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
244					 struct blk_mq_hw_ctx *hctx)
245{
246	struct bt_wait_state *bs;
247	int wait_index;
248
249	if (!hctx)
250		return &bt->bs[0];
251
252	wait_index = atomic_read(&hctx->wait_index);
253	bs = &bt->bs[wait_index];
254	bt_index_atomic_inc(&hctx->wait_index);
255	return bs;
256}
257
258static int bt_get(struct blk_mq_alloc_data *data,
259		struct blk_mq_bitmap_tags *bt,
260		struct blk_mq_hw_ctx *hctx,
261		unsigned int *last_tag, struct blk_mq_tags *tags)
262{
263	struct bt_wait_state *bs;
264	DEFINE_WAIT(wait);
265	int tag;
266
267	tag = __bt_get(hctx, bt, last_tag, tags);
268	if (tag != -1)
269		return tag;
270
271	if (data->flags & BLK_MQ_REQ_NOWAIT)
272		return -1;
273
274	bs = bt_wait_ptr(bt, hctx);
 
275	do {
276		prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
277
278		tag = __bt_get(hctx, bt, last_tag, tags);
279		if (tag != -1)
280			break;
281
282		/*
283		 * We're out of tags on this hardware queue, kick any
284		 * pending IO submits before going to sleep waiting for
285		 * some to complete. Note that hctx can be NULL here for
286		 * reserved tag allocation.
287		 */
288		if (hctx)
289			blk_mq_run_hw_queue(hctx, false);
290
291		/*
292		 * Retry tag allocation after running the hardware queue,
293		 * as running the queue may also have found completions.
294		 */
295		tag = __bt_get(hctx, bt, last_tag, tags);
296		if (tag != -1)
297			break;
298
299		blk_mq_put_ctx(data->ctx);
 
 
 
 
 
 
 
 
300
301		io_schedule();
302
303		data->ctx = blk_mq_get_ctx(data->q);
304		data->hctx = data->q->mq_ops->map_queue(data->q,
305				data->ctx->cpu);
306		if (data->flags & BLK_MQ_REQ_RESERVED) {
307			bt = &data->hctx->tags->breserved_tags;
308		} else {
309			last_tag = &data->ctx->last_tag;
310			hctx = data->hctx;
311			bt = &hctx->tags->bitmap_tags;
312		}
313		finish_wait(&bs->wait, &wait);
314		bs = bt_wait_ptr(bt, hctx);
315	} while (1);
316
317	finish_wait(&bs->wait, &wait);
318	return tag;
319}
320
321static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
322{
323	int tag;
324
325	tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
326			&data->ctx->last_tag, data->hctx->tags);
327	if (tag >= 0)
328		return tag + data->hctx->tags->nr_reserved_tags;
329
330	return BLK_MQ_TAG_FAIL;
331}
332
333static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
 
334{
335	int tag, zero = 0;
 
336
337	if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
338		WARN_ON_ONCE(1);
339		return BLK_MQ_TAG_FAIL;
 
 
340	}
 
 
 
 
 
 
 
 
341
342	tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero,
343		data->hctx->tags);
344	if (tag < 0)
345		return BLK_MQ_TAG_FAIL;
 
 
 
346
347	return tag;
348}
 
349
350unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
351{
352	if (data->flags & BLK_MQ_REQ_RESERVED)
353		return __blk_mq_get_reserved_tag(data);
354	return __blk_mq_get_tag(data);
 
 
355}
356
357static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
 
358{
359	int i, wake_index;
 
 
 
 
 
360
361	wake_index = atomic_read(&bt->wake_index);
362	for (i = 0; i < BT_WAIT_QUEUES; i++) {
363		struct bt_wait_state *bs = &bt->bs[wake_index];
364
365		if (waitqueue_active(&bs->wait)) {
366			int o = atomic_read(&bt->wake_index);
367			if (wake_index != o)
368				atomic_cmpxchg(&bt->wake_index, o, wake_index);
 
 
 
 
 
 
 
 
 
369
370			return bs;
371		}
372
373		wake_index = bt_index_inc(wake_index);
374	}
 
 
 
 
 
375
376	return NULL;
377}
378
379static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
 
380{
381	const int index = TAG_TO_INDEX(bt, tag);
382	struct bt_wait_state *bs;
383	int wait_cnt;
 
 
 
384
385	clear_bit(TAG_TO_BIT(bt, tag), &bt->map[index].word);
 
 
386
387	/* Ensure that the wait list checks occur after clear_bit(). */
388	smp_mb();
389
390	bs = bt_wake_ptr(bt);
391	if (!bs)
392		return;
393
394	wait_cnt = atomic_dec_return(&bs->wait_cnt);
395	if (unlikely(wait_cnt < 0))
396		wait_cnt = atomic_inc_return(&bs->wait_cnt);
397	if (wait_cnt == 0) {
398		atomic_add(bt->wake_cnt, &bs->wait_cnt);
399		bt_index_atomic_inc(&bt->wake_index);
400		wake_up(&bs->wait);
401	}
402}
403
404void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag,
405		    unsigned int *last_tag)
406{
407	struct blk_mq_tags *tags = hctx->tags;
408
409	if (tag >= tags->nr_reserved_tags) {
410		const int real_tag = tag - tags->nr_reserved_tags;
411
412		BUG_ON(real_tag >= tags->nr_tags);
413		bt_clear_tag(&tags->bitmap_tags, real_tag);
414		if (likely(tags->alloc_policy == BLK_TAG_ALLOC_FIFO))
415			*last_tag = real_tag;
416	} else {
417		BUG_ON(tag >= tags->nr_reserved_tags);
418		bt_clear_tag(&tags->breserved_tags, tag);
419	}
420}
 
421
422static void bt_for_each(struct blk_mq_hw_ctx *hctx,
423		struct blk_mq_bitmap_tags *bt, unsigned int off,
424		busy_iter_fn *fn, void *data, bool reserved)
425{
426	struct request *rq;
427	int bit, i;
428
429	for (i = 0; i < bt->map_nr; i++) {
430		struct blk_align_bitmap *bm = &bt->map[i];
431
432		for (bit = find_first_bit(&bm->word, bm->depth);
433		     bit < bm->depth;
434		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
435			rq = hctx->tags->rqs[off + bit];
436			if (rq->q == hctx->queue)
437				fn(hctx, rq, data, reserved);
438		}
439
440		off += (1 << bt->bits_per_word);
441	}
442}
443
444static void bt_tags_for_each(struct blk_mq_tags *tags,
445		struct blk_mq_bitmap_tags *bt, unsigned int off,
446		busy_tag_iter_fn *fn, void *data, bool reserved)
447{
448	struct request *rq;
449	int bit, i;
450
451	if (!tags->rqs)
452		return;
453	for (i = 0; i < bt->map_nr; i++) {
454		struct blk_align_bitmap *bm = &bt->map[i];
455
456		for (bit = find_first_bit(&bm->word, bm->depth);
457		     bit < bm->depth;
458		     bit = find_next_bit(&bm->word, bm->depth, bit + 1)) {
459			rq = tags->rqs[off + bit];
460			fn(rq, data, reserved);
461		}
462
463		off += (1 << bt->bits_per_word);
464	}
465}
466
467void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
468		void *priv)
469{
470	if (tags->nr_reserved_tags)
471		bt_tags_for_each(tags, &tags->breserved_tags, 0, fn, priv, true);
472	bt_tags_for_each(tags, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
473			false);
474}
475EXPORT_SYMBOL(blk_mq_all_tag_busy_iter);
476
477void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
478		void *priv)
479{
480	struct blk_mq_hw_ctx *hctx;
481	int i;
482
483
484	queue_for_each_hw_ctx(q, hctx, i) {
485		struct blk_mq_tags *tags = hctx->tags;
486
487		/*
488		 * If not software queues are currently mapped to this
489		 * hardware queue, there's nothing to check
490		 */
491		if (!blk_mq_hw_queue_mapped(hctx))
492			continue;
493
494		if (tags->nr_reserved_tags)
495			bt_for_each(hctx, &tags->breserved_tags, 0, fn, priv, true);
496		bt_for_each(hctx, &tags->bitmap_tags, tags->nr_reserved_tags, fn, priv,
497		      false);
498	}
499
500}
501
502static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt)
503{
504	unsigned int i, used;
505
506	for (i = 0, used = 0; i < bt->map_nr; i++) {
507		struct blk_align_bitmap *bm = &bt->map[i];
508
509		used += bitmap_weight(&bm->word, bm->depth);
510	}
511
512	return bt->depth - used;
513}
514
515static void bt_update_count(struct blk_mq_bitmap_tags *bt,
516			    unsigned int depth)
517{
518	unsigned int tags_per_word = 1U << bt->bits_per_word;
519	unsigned int map_depth = depth;
520
521	if (depth) {
522		int i;
523
524		for (i = 0; i < bt->map_nr; i++) {
525			bt->map[i].depth = min(map_depth, tags_per_word);
526			map_depth -= bt->map[i].depth;
527		}
528	}
529
530	bt->wake_cnt = BT_WAIT_BATCH;
531	if (bt->wake_cnt > depth / BT_WAIT_QUEUES)
532		bt->wake_cnt = max(1U, depth / BT_WAIT_QUEUES);
533
534	bt->depth = depth;
535}
536
537static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
538			int node, bool reserved)
539{
540	int i;
541
542	bt->bits_per_word = ilog2(BITS_PER_LONG);
543
544	/*
545	 * Depth can be zero for reserved tags, that's not a failure
546	 * condition.
547	 */
548	if (depth) {
549		unsigned int nr, tags_per_word;
550
551		tags_per_word = (1 << bt->bits_per_word);
552
553		/*
554		 * If the tag space is small, shrink the number of tags
555		 * per word so we spread over a few cachelines, at least.
556		 * If less than 4 tags, just forget about it, it's not
557		 * going to work optimally anyway.
558		 */
559		if (depth >= 4) {
560			while (tags_per_word * 4 > depth) {
561				bt->bits_per_word--;
562				tags_per_word = (1 << bt->bits_per_word);
563			}
564		}
565
566		nr = ALIGN(depth, tags_per_word) / tags_per_word;
567		bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap),
568						GFP_KERNEL, node);
569		if (!bt->map)
570			return -ENOMEM;
571
572		bt->map_nr = nr;
573	}
574
575	bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL);
576	if (!bt->bs) {
577		kfree(bt->map);
578		bt->map = NULL;
579		return -ENOMEM;
580	}
581
582	bt_update_count(bt, depth);
583
584	for (i = 0; i < BT_WAIT_QUEUES; i++) {
585		init_waitqueue_head(&bt->bs[i].wait);
586		atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
587	}
588
589	return 0;
590}
591
592static void bt_free(struct blk_mq_bitmap_tags *bt)
 
593{
594	kfree(bt->map);
595	kfree(bt->bs);
596}
597
598static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
599						   int node, int alloc_policy)
600{
601	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
 
602
603	tags->alloc_policy = alloc_policy;
604
605	if (bt_alloc(&tags->bitmap_tags, depth, node, false))
606		goto enomem;
607	if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true))
608		goto enomem;
609
610	return tags;
611enomem:
612	bt_free(&tags->bitmap_tags);
 
613	kfree(tags);
614	return NULL;
615}
616
617struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
618				     unsigned int reserved_tags,
619				     int node, int alloc_policy)
620{
621	struct blk_mq_tags *tags;
622
623	if (total_tags > BLK_MQ_TAG_MAX) {
624		pr_err("blk-mq: tag depth too large\n");
625		return NULL;
626	}
627
628	tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
629	if (!tags)
630		return NULL;
631
632	if (!zalloc_cpumask_var(&tags->cpumask, GFP_KERNEL)) {
633		kfree(tags);
634		return NULL;
635	}
636
637	tags->nr_tags = total_tags;
638	tags->nr_reserved_tags = reserved_tags;
639
640	return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
641}
642
643void blk_mq_free_tags(struct blk_mq_tags *tags)
644{
645	bt_free(&tags->bitmap_tags);
646	bt_free(&tags->breserved_tags);
647	free_cpumask_var(tags->cpumask);
648	kfree(tags);
649}
650
651void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
 
 
652{
653	unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
654
655	*tag = prandom_u32() % depth;
656}
657
658int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
659{
660	tdepth -= tags->nr_reserved_tags;
661	if (tdepth > tags->nr_tags)
662		return -EINVAL;
663
664	/*
665	 * Don't need (or can't) update reserved tags here, they remain
666	 * static and should never need resizing.
667	 */
668	bt_update_count(&tags->bitmap_tags, tdepth);
669	blk_mq_tag_wakeup_all(tags, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
670	return 0;
671}
672
673/**
674 * blk_mq_unique_tag() - return a tag that is unique queue-wide
675 * @rq: request for which to compute a unique tag
676 *
677 * The tag field in struct request is unique per hardware queue but not over
678 * all hardware queues. Hence this function that returns a tag with the
679 * hardware context index in the upper bits and the per hardware queue tag in
680 * the lower bits.
681 *
682 * Note: When called for a request that is queued on a non-multiqueue request
683 * queue, the hardware context index is set to zero.
684 */
685u32 blk_mq_unique_tag(struct request *rq)
686{
687	struct request_queue *q = rq->q;
688	struct blk_mq_hw_ctx *hctx;
689	int hwq = 0;
690
691	if (q->mq_ops) {
692		hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
693		hwq = hctx->queue_num;
694	}
695
696	return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
697		(rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
698}
699EXPORT_SYMBOL(blk_mq_unique_tag);
700
701ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
702{
703	char *orig_page = page;
704	unsigned int free, res;
705
706	if (!tags)
707		return 0;
708
709	page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
710			"bits_per_word=%u\n",
711			tags->nr_tags, tags->nr_reserved_tags,
712			tags->bitmap_tags.bits_per_word);
713
714	free = bt_unused_tags(&tags->bitmap_tags);
715	res = bt_unused_tags(&tags->breserved_tags);
716
717	page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
718	page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
719
720	return page - orig_page;
721}