Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Copyright (C) 2016 Facebook
  3 * Copyright (C) 2013-2014 Jens Axboe
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public
  7 * License v2 as published by the Free Software Foundation.
  8 *
  9 * This program is distributed in the hope that it will be useful,
 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 12 * General Public License for more details.
 13 *
 14 * You should have received a copy of the GNU General Public License
 15 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
 16 */
 17
 
 18#include <linux/random.h>
 19#include <linux/sbitmap.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20
 21int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 22		      gfp_t flags, int node)
 23{
 24	unsigned int bits_per_word;
 25	unsigned int i;
 26
 27	if (shift < 0) {
 28		shift = ilog2(BITS_PER_LONG);
 29		/*
 30		 * If the bitmap is small, shrink the number of bits per word so
 31		 * we spread over a few cachelines, at least. If less than 4
 32		 * bits, just forget about it, it's not going to work optimally
 33		 * anyway.
 34		 */
 35		if (depth >= 4) {
 36			while ((4U << shift) > depth)
 37				shift--;
 38		}
 39	}
 40	bits_per_word = 1U << shift;
 41	if (bits_per_word > BITS_PER_LONG)
 42		return -EINVAL;
 43
 44	sb->shift = shift;
 45	sb->depth = depth;
 46	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 47
 48	if (depth == 0) {
 49		sb->map = NULL;
 50		return 0;
 51	}
 52
 53	sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
 54	if (!sb->map)
 55		return -ENOMEM;
 56
 57	for (i = 0; i < sb->map_nr; i++) {
 58		sb->map[i].depth = min(depth, bits_per_word);
 59		depth -= sb->map[i].depth;
 
 60	}
 61	return 0;
 62}
 63EXPORT_SYMBOL_GPL(sbitmap_init_node);
 64
 65void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
 66{
 67	unsigned int bits_per_word = 1U << sb->shift;
 68	unsigned int i;
 69
 
 
 
 70	sb->depth = depth;
 71	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 72
 73	for (i = 0; i < sb->map_nr; i++) {
 74		sb->map[i].depth = min(depth, bits_per_word);
 75		depth -= sb->map[i].depth;
 76	}
 77}
 78EXPORT_SYMBOL_GPL(sbitmap_resize);
 79
 80static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint,
 81			      bool wrap)
 82{
 83	unsigned int orig_hint = hint;
 84	int nr;
 85
 86	while (1) {
 87		nr = find_next_zero_bit(&word->word, word->depth, hint);
 88		if (unlikely(nr >= word->depth)) {
 89			/*
 90			 * We started with an offset, and we didn't reset the
 91			 * offset to 0 in a failure case, so start from 0 to
 92			 * exhaust the map.
 93			 */
 94			if (orig_hint && hint && wrap) {
 95				hint = orig_hint = 0;
 96				continue;
 97			}
 98			return -1;
 99		}
100
101		if (!test_and_set_bit(nr, &word->word))
102			break;
103
104		hint = nr + 1;
105		if (hint >= word->depth - 1)
106			hint = 0;
107	}
108
109	return nr;
110}
111
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
113{
114	unsigned int i, index;
115	int nr = -1;
116
117	index = SB_NR_TO_INDEX(sb, alloc_hint);
118
 
 
 
 
 
 
 
 
 
 
119	for (i = 0; i < sb->map_nr; i++) {
120		nr = __sbitmap_get_word(&sb->map[index],
121					SB_NR_TO_BIT(sb, alloc_hint),
122					!round_robin);
123		if (nr != -1) {
124			nr += index << sb->shift;
125			break;
126		}
127
128		/* Jump to next index. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129		index++;
130		alloc_hint = index << sb->shift;
131
132		if (index >= sb->map_nr) {
133			index = 0;
134			alloc_hint = 0;
135		}
136	}
137
138	return nr;
139}
140EXPORT_SYMBOL_GPL(sbitmap_get);
141
142bool sbitmap_any_bit_set(const struct sbitmap *sb)
143{
144	unsigned int i;
145
146	for (i = 0; i < sb->map_nr; i++) {
147		if (sb->map[i].word)
148			return true;
149	}
150	return false;
151}
152EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
153
154bool sbitmap_any_bit_clear(const struct sbitmap *sb)
155{
156	unsigned int i;
157
158	for (i = 0; i < sb->map_nr; i++) {
159		const struct sbitmap_word *word = &sb->map[i];
160		unsigned long ret;
161
162		ret = find_first_zero_bit(&word->word, word->depth);
163		if (ret < word->depth)
164			return true;
 
165	}
166	return false;
167}
168EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
169
170unsigned int sbitmap_weight(const struct sbitmap *sb)
171{
172	unsigned int i, weight = 0;
 
173
174	for (i = 0; i < sb->map_nr; i++) {
175		const struct sbitmap_word *word = &sb->map[i];
 
 
176
177		weight += bitmap_weight(&word->word, word->depth);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178	}
179	return weight;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180}
181EXPORT_SYMBOL_GPL(sbitmap_weight);
182
183static unsigned int sbq_calc_wake_batch(unsigned int depth)
 
184{
185	unsigned int wake_batch;
 
186
187	/*
188	 * For each batch, we wake up one queue. We need to make sure that our
189	 * batch size is small enough that the full depth of the bitmap is
190	 * enough to wake up all of the queues.
 
 
 
 
 
 
 
 
 
 
 
191	 */
192	wake_batch = SBQ_WAKE_BATCH;
193	if (wake_batch > depth / SBQ_WAIT_QUEUES)
194		wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
 
 
195
196	return wake_batch;
197}
198
199int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
200			    int shift, bool round_robin, gfp_t flags, int node)
201{
202	int ret;
203	int i;
204
205	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
206	if (ret)
207		return ret;
208
209	sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
210	if (!sbq->alloc_hint) {
211		sbitmap_free(&sbq->sb);
212		return -ENOMEM;
213	}
214
215	if (depth && !round_robin) {
216		for_each_possible_cpu(i)
217			*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
218	}
219
220	sbq->wake_batch = sbq_calc_wake_batch(depth);
 
221	atomic_set(&sbq->wake_index, 0);
 
222
223	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
224	if (!sbq->ws) {
225		free_percpu(sbq->alloc_hint);
226		sbitmap_free(&sbq->sb);
227		return -ENOMEM;
228	}
229
230	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
231		init_waitqueue_head(&sbq->ws[i].wait);
232		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
233	}
234
235	sbq->round_robin = round_robin;
236	return 0;
237}
238EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
241{
242	sbq->wake_batch = sbq_calc_wake_batch(depth);
243	sbitmap_resize(&sbq->sb, depth);
244}
245EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
246
247int __sbitmap_queue_get(struct sbitmap_queue *sbq)
248{
249	unsigned int hint, depth;
250	int nr;
251
252	hint = this_cpu_read(*sbq->alloc_hint);
253	depth = READ_ONCE(sbq->sb.depth);
254	if (unlikely(hint >= depth)) {
255		hint = depth ? prandom_u32() % depth : 0;
256		this_cpu_write(*sbq->alloc_hint, hint);
257	}
258	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
259
260	if (nr == -1) {
261		/* If the map is full, a hint won't do us much good. */
262		this_cpu_write(*sbq->alloc_hint, 0);
263	} else if (nr == hint || unlikely(sbq->round_robin)) {
264		/* Only update the hint if we used it. */
265		hint = nr + 1;
266		if (hint >= depth - 1)
267			hint = 0;
268		this_cpu_write(*sbq->alloc_hint, hint);
269	}
270
271	return nr;
272}
273EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
274
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
276{
277	int i, wake_index;
278
 
 
 
279	wake_index = atomic_read(&sbq->wake_index);
280	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
281		struct sbq_wait_state *ws = &sbq->ws[wake_index];
282
283		if (waitqueue_active(&ws->wait)) {
284			int o = atomic_read(&sbq->wake_index);
285
286			if (wake_index != o)
287				atomic_cmpxchg(&sbq->wake_index, o, wake_index);
288			return ws;
289		}
290
291		wake_index = sbq_index_inc(wake_index);
292	}
293
294	return NULL;
295}
296
297static void sbq_wake_up(struct sbitmap_queue *sbq)
298{
299	struct sbq_wait_state *ws;
 
300	int wait_cnt;
301
302	/* Ensure that the wait list checks occur after clear_bit(). */
303	smp_mb();
304
305	ws = sbq_wake_ptr(sbq);
306	if (!ws)
307		return;
308
309	wait_cnt = atomic_dec_return(&ws->wait_cnt);
310	if (unlikely(wait_cnt < 0))
311		wait_cnt = atomic_inc_return(&ws->wait_cnt);
312	if (wait_cnt == 0) {
313		atomic_add(sbq->wake_batch, &ws->wait_cnt);
314		sbq_index_atomic_inc(&sbq->wake_index);
315		wake_up(&ws->wait);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316	}
 
 
317}
318
 
 
 
 
 
 
 
319void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
320			 unsigned int cpu)
321{
322	sbitmap_clear_bit(&sbq->sb, nr);
323	sbq_wake_up(sbq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324	if (likely(!sbq->round_robin && nr < sbq->sb.depth))
325		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
326}
327EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
328
329void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
330{
331	int i, wake_index;
332
333	/*
334	 * Make sure all changes prior to this are visible from other CPUs.
 
335	 */
336	smp_mb();
337	wake_index = atomic_read(&sbq->wake_index);
338	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
339		struct sbq_wait_state *ws = &sbq->ws[wake_index];
340
341		if (waitqueue_active(&ws->wait))
342			wake_up(&ws->wait);
343
344		wake_index = sbq_index_inc(wake_index);
345	}
346}
347EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2016 Facebook
  4 * Copyright (C) 2013-2014 Jens Axboe
 
 
 
 
 
 
 
 
 
 
 
 
  5 */
  6
  7#include <linux/sched.h>
  8#include <linux/random.h>
  9#include <linux/sbitmap.h>
 10#include <linux/seq_file.h>
 11
 12/*
 13 * See if we have deferred clears that we can batch move
 14 */
 15static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
 16{
 17	unsigned long mask, val;
 18	bool ret = false;
 19	unsigned long flags;
 20
 21	spin_lock_irqsave(&sb->map[index].swap_lock, flags);
 22
 23	if (!sb->map[index].cleared)
 24		goto out_unlock;
 25
 26	/*
 27	 * First get a stable cleared mask, setting the old mask to 0.
 28	 */
 29	mask = xchg(&sb->map[index].cleared, 0);
 30
 31	/*
 32	 * Now clear the masked bits in our free word
 33	 */
 34	do {
 35		val = sb->map[index].word;
 36	} while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
 37
 38	ret = true;
 39out_unlock:
 40	spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
 41	return ret;
 42}
 43
 44int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
 45		      gfp_t flags, int node)
 46{
 47	unsigned int bits_per_word;
 48	unsigned int i;
 49
 50	if (shift < 0) {
 51		shift = ilog2(BITS_PER_LONG);
 52		/*
 53		 * If the bitmap is small, shrink the number of bits per word so
 54		 * we spread over a few cachelines, at least. If less than 4
 55		 * bits, just forget about it, it's not going to work optimally
 56		 * anyway.
 57		 */
 58		if (depth >= 4) {
 59			while ((4U << shift) > depth)
 60				shift--;
 61		}
 62	}
 63	bits_per_word = 1U << shift;
 64	if (bits_per_word > BITS_PER_LONG)
 65		return -EINVAL;
 66
 67	sb->shift = shift;
 68	sb->depth = depth;
 69	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 70
 71	if (depth == 0) {
 72		sb->map = NULL;
 73		return 0;
 74	}
 75
 76	sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
 77	if (!sb->map)
 78		return -ENOMEM;
 79
 80	for (i = 0; i < sb->map_nr; i++) {
 81		sb->map[i].depth = min(depth, bits_per_word);
 82		depth -= sb->map[i].depth;
 83		spin_lock_init(&sb->map[i].swap_lock);
 84	}
 85	return 0;
 86}
 87EXPORT_SYMBOL_GPL(sbitmap_init_node);
 88
 89void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
 90{
 91	unsigned int bits_per_word = 1U << sb->shift;
 92	unsigned int i;
 93
 94	for (i = 0; i < sb->map_nr; i++)
 95		sbitmap_deferred_clear(sb, i);
 96
 97	sb->depth = depth;
 98	sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
 99
100	for (i = 0; i < sb->map_nr; i++) {
101		sb->map[i].depth = min(depth, bits_per_word);
102		depth -= sb->map[i].depth;
103	}
104}
105EXPORT_SYMBOL_GPL(sbitmap_resize);
106
107static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
108			      unsigned int hint, bool wrap)
109{
110	unsigned int orig_hint = hint;
111	int nr;
112
113	while (1) {
114		nr = find_next_zero_bit(word, depth, hint);
115		if (unlikely(nr >= depth)) {
116			/*
117			 * We started with an offset, and we didn't reset the
118			 * offset to 0 in a failure case, so start from 0 to
119			 * exhaust the map.
120			 */
121			if (orig_hint && hint && wrap) {
122				hint = orig_hint = 0;
123				continue;
124			}
125			return -1;
126		}
127
128		if (!test_and_set_bit_lock(nr, word))
129			break;
130
131		hint = nr + 1;
132		if (hint >= depth - 1)
133			hint = 0;
134	}
135
136	return nr;
137}
138
139static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
140				     unsigned int alloc_hint, bool round_robin)
141{
142	int nr;
143
144	do {
145		nr = __sbitmap_get_word(&sb->map[index].word,
146					sb->map[index].depth, alloc_hint,
147					!round_robin);
148		if (nr != -1)
149			break;
150		if (!sbitmap_deferred_clear(sb, index))
151			break;
152	} while (1);
153
154	return nr;
155}
156
157int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
158{
159	unsigned int i, index;
160	int nr = -1;
161
162	index = SB_NR_TO_INDEX(sb, alloc_hint);
163
164	/*
165	 * Unless we're doing round robin tag allocation, just use the
166	 * alloc_hint to find the right word index. No point in looping
167	 * twice in find_next_zero_bit() for that case.
168	 */
169	if (round_robin)
170		alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
171	else
172		alloc_hint = 0;
173
174	for (i = 0; i < sb->map_nr; i++) {
175		nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
176						round_robin);
 
177		if (nr != -1) {
178			nr += index << sb->shift;
179			break;
180		}
181
182		/* Jump to next index. */
183		alloc_hint = 0;
184		if (++index >= sb->map_nr)
185			index = 0;
186	}
187
188	return nr;
189}
190EXPORT_SYMBOL_GPL(sbitmap_get);
191
192int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
193			unsigned long shallow_depth)
194{
195	unsigned int i, index;
196	int nr = -1;
197
198	index = SB_NR_TO_INDEX(sb, alloc_hint);
199
200	for (i = 0; i < sb->map_nr; i++) {
201again:
202		nr = __sbitmap_get_word(&sb->map[index].word,
203					min(sb->map[index].depth, shallow_depth),
204					SB_NR_TO_BIT(sb, alloc_hint), true);
205		if (nr != -1) {
206			nr += index << sb->shift;
207			break;
208		}
209
210		if (sbitmap_deferred_clear(sb, index))
211			goto again;
212
213		/* Jump to next index. */
214		index++;
215		alloc_hint = index << sb->shift;
216
217		if (index >= sb->map_nr) {
218			index = 0;
219			alloc_hint = 0;
220		}
221	}
222
223	return nr;
224}
225EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
226
227bool sbitmap_any_bit_set(const struct sbitmap *sb)
228{
229	unsigned int i;
230
231	for (i = 0; i < sb->map_nr; i++) {
232		if (sb->map[i].word & ~sb->map[i].cleared)
233			return true;
234	}
235	return false;
236}
237EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
238
239static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
240{
241	unsigned int i, weight = 0;
242
243	for (i = 0; i < sb->map_nr; i++) {
244		const struct sbitmap_word *word = &sb->map[i];
 
245
246		if (set)
247			weight += bitmap_weight(&word->word, word->depth);
248		else
249			weight += bitmap_weight(&word->cleared, word->depth);
250	}
251	return weight;
252}
 
253
254static unsigned int sbitmap_weight(const struct sbitmap *sb)
255{
256	return __sbitmap_weight(sb, true);
257}
258
259static unsigned int sbitmap_cleared(const struct sbitmap *sb)
260{
261	return __sbitmap_weight(sb, false);
262}
263
264void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
265{
266	seq_printf(m, "depth=%u\n", sb->depth);
267	seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
268	seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
269	seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
270	seq_printf(m, "map_nr=%u\n", sb->map_nr);
271}
272EXPORT_SYMBOL_GPL(sbitmap_show);
273
274static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
275{
276	if ((offset & 0xf) == 0) {
277		if (offset != 0)
278			seq_putc(m, '\n');
279		seq_printf(m, "%08x:", offset);
280	}
281	if ((offset & 0x1) == 0)
282		seq_putc(m, ' ');
283	seq_printf(m, "%02x", byte);
284}
285
286void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
287{
288	u8 byte = 0;
289	unsigned int byte_bits = 0;
290	unsigned int offset = 0;
291	int i;
292
293	for (i = 0; i < sb->map_nr; i++) {
294		unsigned long word = READ_ONCE(sb->map[i].word);
295		unsigned long cleared = READ_ONCE(sb->map[i].cleared);
296		unsigned int word_bits = READ_ONCE(sb->map[i].depth);
297
298		word &= ~cleared;
299
300		while (word_bits > 0) {
301			unsigned int bits = min(8 - byte_bits, word_bits);
302
303			byte |= (word & (BIT(bits) - 1)) << byte_bits;
304			byte_bits += bits;
305			if (byte_bits == 8) {
306				emit_byte(m, offset, byte);
307				byte = 0;
308				byte_bits = 0;
309				offset++;
310			}
311			word >>= bits;
312			word_bits -= bits;
313		}
314	}
315	if (byte_bits) {
316		emit_byte(m, offset, byte);
317		offset++;
318	}
319	if (offset)
320		seq_putc(m, '\n');
321}
322EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
323
324static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
325					unsigned int depth)
326{
327	unsigned int wake_batch;
328	unsigned int shallow_depth;
329
330	/*
331	 * For each batch, we wake up one queue. We need to make sure that our
332	 * batch size is small enough that the full depth of the bitmap,
333	 * potentially limited by a shallow depth, is enough to wake up all of
334	 * the queues.
335	 *
336	 * Each full word of the bitmap has bits_per_word bits, and there might
337	 * be a partial word. There are depth / bits_per_word full words and
338	 * depth % bits_per_word bits left over. In bitwise arithmetic:
339	 *
340	 * bits_per_word = 1 << shift
341	 * depth / bits_per_word = depth >> shift
342	 * depth % bits_per_word = depth & ((1 << shift) - 1)
343	 *
344	 * Each word can be limited to sbq->min_shallow_depth bits.
345	 */
346	shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
347	depth = ((depth >> sbq->sb.shift) * shallow_depth +
348		 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
349	wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
350			     SBQ_WAKE_BATCH);
351
352	return wake_batch;
353}
354
355int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
356			    int shift, bool round_robin, gfp_t flags, int node)
357{
358	int ret;
359	int i;
360
361	ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
362	if (ret)
363		return ret;
364
365	sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
366	if (!sbq->alloc_hint) {
367		sbitmap_free(&sbq->sb);
368		return -ENOMEM;
369	}
370
371	if (depth && !round_robin) {
372		for_each_possible_cpu(i)
373			*per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
374	}
375
376	sbq->min_shallow_depth = UINT_MAX;
377	sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
378	atomic_set(&sbq->wake_index, 0);
379	atomic_set(&sbq->ws_active, 0);
380
381	sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
382	if (!sbq->ws) {
383		free_percpu(sbq->alloc_hint);
384		sbitmap_free(&sbq->sb);
385		return -ENOMEM;
386	}
387
388	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
389		init_waitqueue_head(&sbq->ws[i].wait);
390		atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
391	}
392
393	sbq->round_robin = round_robin;
394	return 0;
395}
396EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
397
398static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
399					    unsigned int depth)
400{
401	unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
402	int i;
403
404	if (sbq->wake_batch != wake_batch) {
405		WRITE_ONCE(sbq->wake_batch, wake_batch);
406		/*
407		 * Pairs with the memory barrier in sbitmap_queue_wake_up()
408		 * to ensure that the batch size is updated before the wait
409		 * counts.
410		 */
411		smp_mb();
412		for (i = 0; i < SBQ_WAIT_QUEUES; i++)
413			atomic_set(&sbq->ws[i].wait_cnt, 1);
414	}
415}
416
417void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
418{
419	sbitmap_queue_update_wake_batch(sbq, depth);
420	sbitmap_resize(&sbq->sb, depth);
421}
422EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
423
424int __sbitmap_queue_get(struct sbitmap_queue *sbq)
425{
426	unsigned int hint, depth;
427	int nr;
428
429	hint = this_cpu_read(*sbq->alloc_hint);
430	depth = READ_ONCE(sbq->sb.depth);
431	if (unlikely(hint >= depth)) {
432		hint = depth ? prandom_u32() % depth : 0;
433		this_cpu_write(*sbq->alloc_hint, hint);
434	}
435	nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
436
437	if (nr == -1) {
438		/* If the map is full, a hint won't do us much good. */
439		this_cpu_write(*sbq->alloc_hint, 0);
440	} else if (nr == hint || unlikely(sbq->round_robin)) {
441		/* Only update the hint if we used it. */
442		hint = nr + 1;
443		if (hint >= depth - 1)
444			hint = 0;
445		this_cpu_write(*sbq->alloc_hint, hint);
446	}
447
448	return nr;
449}
450EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
451
452int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
453				unsigned int shallow_depth)
454{
455	unsigned int hint, depth;
456	int nr;
457
458	WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
459
460	hint = this_cpu_read(*sbq->alloc_hint);
461	depth = READ_ONCE(sbq->sb.depth);
462	if (unlikely(hint >= depth)) {
463		hint = depth ? prandom_u32() % depth : 0;
464		this_cpu_write(*sbq->alloc_hint, hint);
465	}
466	nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
467
468	if (nr == -1) {
469		/* If the map is full, a hint won't do us much good. */
470		this_cpu_write(*sbq->alloc_hint, 0);
471	} else if (nr == hint || unlikely(sbq->round_robin)) {
472		/* Only update the hint if we used it. */
473		hint = nr + 1;
474		if (hint >= depth - 1)
475			hint = 0;
476		this_cpu_write(*sbq->alloc_hint, hint);
477	}
478
479	return nr;
480}
481EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
482
483void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
484				     unsigned int min_shallow_depth)
485{
486	sbq->min_shallow_depth = min_shallow_depth;
487	sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
488}
489EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
490
491static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
492{
493	int i, wake_index;
494
495	if (!atomic_read(&sbq->ws_active))
496		return NULL;
497
498	wake_index = atomic_read(&sbq->wake_index);
499	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
500		struct sbq_wait_state *ws = &sbq->ws[wake_index];
501
502		if (waitqueue_active(&ws->wait)) {
503			if (wake_index != atomic_read(&sbq->wake_index))
504				atomic_set(&sbq->wake_index, wake_index);
 
 
505			return ws;
506		}
507
508		wake_index = sbq_index_inc(wake_index);
509	}
510
511	return NULL;
512}
513
514static bool __sbq_wake_up(struct sbitmap_queue *sbq)
515{
516	struct sbq_wait_state *ws;
517	unsigned int wake_batch;
518	int wait_cnt;
519
 
 
 
520	ws = sbq_wake_ptr(sbq);
521	if (!ws)
522		return false;
523
524	wait_cnt = atomic_dec_return(&ws->wait_cnt);
525	if (wait_cnt <= 0) {
526		int ret;
527
528		wake_batch = READ_ONCE(sbq->wake_batch);
529
530		/*
531		 * Pairs with the memory barrier in sbitmap_queue_resize() to
532		 * ensure that we see the batch size update before the wait
533		 * count is reset.
534		 */
535		smp_mb__before_atomic();
536
537		/*
538		 * For concurrent callers of this, the one that failed the
539		 * atomic_cmpxhcg() race should call this function again
540		 * to wakeup a new batch on a different 'ws'.
541		 */
542		ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
543		if (ret == wait_cnt) {
544			sbq_index_atomic_inc(&sbq->wake_index);
545			wake_up_nr(&ws->wait, wake_batch);
546			return false;
547		}
548
549		return true;
550	}
551
552	return false;
553}
554
555void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
556{
557	while (__sbq_wake_up(sbq))
558		;
559}
560EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
561
562void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
563			 unsigned int cpu)
564{
565	/*
566	 * Once the clear bit is set, the bit may be allocated out.
567	 *
568	 * Orders READ/WRITE on the asssociated instance(such as request
569	 * of blk_mq) by this bit for avoiding race with re-allocation,
570	 * and its pair is the memory barrier implied in __sbitmap_get_word.
571	 *
572	 * One invariant is that the clear bit has to be zero when the bit
573	 * is in use.
574	 */
575	smp_mb__before_atomic();
576	sbitmap_deferred_clear_bit(&sbq->sb, nr);
577
578	/*
579	 * Pairs with the memory barrier in set_current_state() to ensure the
580	 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
581	 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
582	 * waiter. See the comment on waitqueue_active().
583	 */
584	smp_mb__after_atomic();
585	sbitmap_queue_wake_up(sbq);
586
587	if (likely(!sbq->round_robin && nr < sbq->sb.depth))
588		*per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
589}
590EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
591
592void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
593{
594	int i, wake_index;
595
596	/*
597	 * Pairs with the memory barrier in set_current_state() like in
598	 * sbitmap_queue_wake_up().
599	 */
600	smp_mb();
601	wake_index = atomic_read(&sbq->wake_index);
602	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
603		struct sbq_wait_state *ws = &sbq->ws[wake_index];
604
605		if (waitqueue_active(&ws->wait))
606			wake_up(&ws->wait);
607
608		wake_index = sbq_index_inc(wake_index);
609	}
610}
611EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
612
613void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
614{
615	bool first;
616	int i;
617
618	sbitmap_show(&sbq->sb, m);
619
620	seq_puts(m, "alloc_hint={");
621	first = true;
622	for_each_possible_cpu(i) {
623		if (!first)
624			seq_puts(m, ", ");
625		first = false;
626		seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
627	}
628	seq_puts(m, "}\n");
629
630	seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
631	seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
632	seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
633
634	seq_puts(m, "ws={\n");
635	for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
636		struct sbq_wait_state *ws = &sbq->ws[i];
637
638		seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
639			   atomic_read(&ws->wait_cnt),
640			   waitqueue_active(&ws->wait) ? "active" : "inactive");
641	}
642	seq_puts(m, "}\n");
643
644	seq_printf(m, "round_robin=%d\n", sbq->round_robin);
645	seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
646}
647EXPORT_SYMBOL_GPL(sbitmap_queue_show);
648
649void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
650			    struct sbq_wait_state *ws,
651			    struct sbq_wait *sbq_wait)
652{
653	if (!sbq_wait->sbq) {
654		sbq_wait->sbq = sbq;
655		atomic_inc(&sbq->ws_active);
656		add_wait_queue(&ws->wait, &sbq_wait->wait);
657	}
658}
659EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
660
661void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
662{
663	list_del_init(&sbq_wait->wait.entry);
664	if (sbq_wait->sbq) {
665		atomic_dec(&sbq_wait->sbq->ws_active);
666		sbq_wait->sbq = NULL;
667	}
668}
669EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
670
671void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
672			     struct sbq_wait_state *ws,
673			     struct sbq_wait *sbq_wait, int state)
674{
675	if (!sbq_wait->sbq) {
676		atomic_inc(&sbq->ws_active);
677		sbq_wait->sbq = sbq;
678	}
679	prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
680}
681EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
682
683void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
684			 struct sbq_wait *sbq_wait)
685{
686	finish_wait(&ws->wait, &sbq_wait->wait);
687	if (sbq_wait->sbq) {
688		atomic_dec(&sbq->ws_active);
689		sbq_wait->sbq = NULL;
690	}
691}
692EXPORT_SYMBOL_GPL(sbitmap_finish_wait);