Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
5 */
6
7#include <linux/sched.h>
8#include <linux/random.h>
9#include <linux/sbitmap.h>
10#include <linux/seq_file.h>
11
12static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
13{
14 unsigned depth = sb->depth;
15
16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
17 if (!sb->alloc_hint)
18 return -ENOMEM;
19
20 if (depth && !sb->round_robin) {
21 int i;
22
23 for_each_possible_cpu(i)
24 *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth;
25 }
26 return 0;
27}
28
29static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
30 unsigned int depth)
31{
32 unsigned hint;
33
34 hint = this_cpu_read(*sb->alloc_hint);
35 if (unlikely(hint >= depth)) {
36 hint = depth ? prandom_u32() % depth : 0;
37 this_cpu_write(*sb->alloc_hint, hint);
38 }
39
40 return hint;
41}
42
43static inline void update_alloc_hint_after_get(struct sbitmap *sb,
44 unsigned int depth,
45 unsigned int hint,
46 unsigned int nr)
47{
48 if (nr == -1) {
49 /* If the map is full, a hint won't do us much good. */
50 this_cpu_write(*sb->alloc_hint, 0);
51 } else if (nr == hint || unlikely(sb->round_robin)) {
52 /* Only update the hint if we used it. */
53 hint = nr + 1;
54 if (hint >= depth - 1)
55 hint = 0;
56 this_cpu_write(*sb->alloc_hint, hint);
57 }
58}
59
60/*
61 * See if we have deferred clears that we can batch move
62 */
63static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
64{
65 unsigned long mask;
66
67 if (!READ_ONCE(map->cleared))
68 return false;
69
70 /*
71 * First get a stable cleared mask, setting the old mask to 0.
72 */
73 mask = xchg(&map->cleared, 0);
74
75 /*
76 * Now clear the masked bits in our free word
77 */
78 atomic_long_andnot(mask, (atomic_long_t *)&map->word);
79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
80 return true;
81}
82
83int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
84 gfp_t flags, int node, bool round_robin,
85 bool alloc_hint)
86{
87 unsigned int bits_per_word;
88 unsigned int i;
89
90 if (shift < 0)
91 shift = sbitmap_calculate_shift(depth);
92
93 bits_per_word = 1U << shift;
94 if (bits_per_word > BITS_PER_LONG)
95 return -EINVAL;
96
97 sb->shift = shift;
98 sb->depth = depth;
99 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
100 sb->round_robin = round_robin;
101
102 if (depth == 0) {
103 sb->map = NULL;
104 return 0;
105 }
106
107 if (alloc_hint) {
108 if (init_alloc_hint(sb, flags))
109 return -ENOMEM;
110 } else {
111 sb->alloc_hint = NULL;
112 }
113
114 sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
115 if (!sb->map) {
116 free_percpu(sb->alloc_hint);
117 return -ENOMEM;
118 }
119
120 for (i = 0; i < sb->map_nr; i++) {
121 sb->map[i].depth = min(depth, bits_per_word);
122 depth -= sb->map[i].depth;
123 }
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sbitmap_init_node);
127
128void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
129{
130 unsigned int bits_per_word = 1U << sb->shift;
131 unsigned int i;
132
133 for (i = 0; i < sb->map_nr; i++)
134 sbitmap_deferred_clear(&sb->map[i]);
135
136 sb->depth = depth;
137 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
138
139 for (i = 0; i < sb->map_nr; i++) {
140 sb->map[i].depth = min(depth, bits_per_word);
141 depth -= sb->map[i].depth;
142 }
143}
144EXPORT_SYMBOL_GPL(sbitmap_resize);
145
146static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
147 unsigned int hint, bool wrap)
148{
149 int nr;
150
151 /* don't wrap if starting from 0 */
152 wrap = wrap && hint;
153
154 while (1) {
155 nr = find_next_zero_bit(word, depth, hint);
156 if (unlikely(nr >= depth)) {
157 /*
158 * We started with an offset, and we didn't reset the
159 * offset to 0 in a failure case, so start from 0 to
160 * exhaust the map.
161 */
162 if (hint && wrap) {
163 hint = 0;
164 continue;
165 }
166 return -1;
167 }
168
169 if (!test_and_set_bit_lock(nr, word))
170 break;
171
172 hint = nr + 1;
173 if (hint >= depth - 1)
174 hint = 0;
175 }
176
177 return nr;
178}
179
180static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
181 unsigned int alloc_hint)
182{
183 struct sbitmap_word *map = &sb->map[index];
184 int nr;
185
186 do {
187 nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint,
188 !sb->round_robin);
189 if (nr != -1)
190 break;
191 if (!sbitmap_deferred_clear(map))
192 break;
193 } while (1);
194
195 return nr;
196}
197
198static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
199{
200 unsigned int i, index;
201 int nr = -1;
202
203 index = SB_NR_TO_INDEX(sb, alloc_hint);
204
205 /*
206 * Unless we're doing round robin tag allocation, just use the
207 * alloc_hint to find the right word index. No point in looping
208 * twice in find_next_zero_bit() for that case.
209 */
210 if (sb->round_robin)
211 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
212 else
213 alloc_hint = 0;
214
215 for (i = 0; i < sb->map_nr; i++) {
216 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
217 if (nr != -1) {
218 nr += index << sb->shift;
219 break;
220 }
221
222 /* Jump to next index. */
223 alloc_hint = 0;
224 if (++index >= sb->map_nr)
225 index = 0;
226 }
227
228 return nr;
229}
230
231int sbitmap_get(struct sbitmap *sb)
232{
233 int nr;
234 unsigned int hint, depth;
235
236 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
237 return -1;
238
239 depth = READ_ONCE(sb->depth);
240 hint = update_alloc_hint_before_get(sb, depth);
241 nr = __sbitmap_get(sb, hint);
242 update_alloc_hint_after_get(sb, depth, hint, nr);
243
244 return nr;
245}
246EXPORT_SYMBOL_GPL(sbitmap_get);
247
248static int __sbitmap_get_shallow(struct sbitmap *sb,
249 unsigned int alloc_hint,
250 unsigned long shallow_depth)
251{
252 unsigned int i, index;
253 int nr = -1;
254
255 index = SB_NR_TO_INDEX(sb, alloc_hint);
256
257 for (i = 0; i < sb->map_nr; i++) {
258again:
259 nr = __sbitmap_get_word(&sb->map[index].word,
260 min(sb->map[index].depth, shallow_depth),
261 SB_NR_TO_BIT(sb, alloc_hint), true);
262 if (nr != -1) {
263 nr += index << sb->shift;
264 break;
265 }
266
267 if (sbitmap_deferred_clear(&sb->map[index]))
268 goto again;
269
270 /* Jump to next index. */
271 index++;
272 alloc_hint = index << sb->shift;
273
274 if (index >= sb->map_nr) {
275 index = 0;
276 alloc_hint = 0;
277 }
278 }
279
280 return nr;
281}
282
283int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
284{
285 int nr;
286 unsigned int hint, depth;
287
288 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
289 return -1;
290
291 depth = READ_ONCE(sb->depth);
292 hint = update_alloc_hint_before_get(sb, depth);
293 nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
294 update_alloc_hint_after_get(sb, depth, hint, nr);
295
296 return nr;
297}
298EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
299
300bool sbitmap_any_bit_set(const struct sbitmap *sb)
301{
302 unsigned int i;
303
304 for (i = 0; i < sb->map_nr; i++) {
305 if (sb->map[i].word & ~sb->map[i].cleared)
306 return true;
307 }
308 return false;
309}
310EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
311
312static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
313{
314 unsigned int i, weight = 0;
315
316 for (i = 0; i < sb->map_nr; i++) {
317 const struct sbitmap_word *word = &sb->map[i];
318
319 if (set)
320 weight += bitmap_weight(&word->word, word->depth);
321 else
322 weight += bitmap_weight(&word->cleared, word->depth);
323 }
324 return weight;
325}
326
327static unsigned int sbitmap_cleared(const struct sbitmap *sb)
328{
329 return __sbitmap_weight(sb, false);
330}
331
332unsigned int sbitmap_weight(const struct sbitmap *sb)
333{
334 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
335}
336EXPORT_SYMBOL_GPL(sbitmap_weight);
337
338void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
339{
340 seq_printf(m, "depth=%u\n", sb->depth);
341 seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
342 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
343 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
344 seq_printf(m, "map_nr=%u\n", sb->map_nr);
345}
346EXPORT_SYMBOL_GPL(sbitmap_show);
347
348static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
349{
350 if ((offset & 0xf) == 0) {
351 if (offset != 0)
352 seq_putc(m, '\n');
353 seq_printf(m, "%08x:", offset);
354 }
355 if ((offset & 0x1) == 0)
356 seq_putc(m, ' ');
357 seq_printf(m, "%02x", byte);
358}
359
360void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
361{
362 u8 byte = 0;
363 unsigned int byte_bits = 0;
364 unsigned int offset = 0;
365 int i;
366
367 for (i = 0; i < sb->map_nr; i++) {
368 unsigned long word = READ_ONCE(sb->map[i].word);
369 unsigned long cleared = READ_ONCE(sb->map[i].cleared);
370 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
371
372 word &= ~cleared;
373
374 while (word_bits > 0) {
375 unsigned int bits = min(8 - byte_bits, word_bits);
376
377 byte |= (word & (BIT(bits) - 1)) << byte_bits;
378 byte_bits += bits;
379 if (byte_bits == 8) {
380 emit_byte(m, offset, byte);
381 byte = 0;
382 byte_bits = 0;
383 offset++;
384 }
385 word >>= bits;
386 word_bits -= bits;
387 }
388 }
389 if (byte_bits) {
390 emit_byte(m, offset, byte);
391 offset++;
392 }
393 if (offset)
394 seq_putc(m, '\n');
395}
396EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
397
398static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
399 unsigned int depth)
400{
401 unsigned int wake_batch;
402 unsigned int shallow_depth;
403
404 /*
405 * For each batch, we wake up one queue. We need to make sure that our
406 * batch size is small enough that the full depth of the bitmap,
407 * potentially limited by a shallow depth, is enough to wake up all of
408 * the queues.
409 *
410 * Each full word of the bitmap has bits_per_word bits, and there might
411 * be a partial word. There are depth / bits_per_word full words and
412 * depth % bits_per_word bits left over. In bitwise arithmetic:
413 *
414 * bits_per_word = 1 << shift
415 * depth / bits_per_word = depth >> shift
416 * depth % bits_per_word = depth & ((1 << shift) - 1)
417 *
418 * Each word can be limited to sbq->min_shallow_depth bits.
419 */
420 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
421 depth = ((depth >> sbq->sb.shift) * shallow_depth +
422 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
423 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
424 SBQ_WAKE_BATCH);
425
426 return wake_batch;
427}
428
429int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
430 int shift, bool round_robin, gfp_t flags, int node)
431{
432 int ret;
433 int i;
434
435 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
436 round_robin, true);
437 if (ret)
438 return ret;
439
440 sbq->min_shallow_depth = UINT_MAX;
441 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
442 atomic_set(&sbq->wake_index, 0);
443 atomic_set(&sbq->ws_active, 0);
444
445 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
446 if (!sbq->ws) {
447 sbitmap_free(&sbq->sb);
448 return -ENOMEM;
449 }
450
451 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
452 init_waitqueue_head(&sbq->ws[i].wait);
453 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
454 }
455
456 return 0;
457}
458EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
459
460static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
461 unsigned int depth)
462{
463 unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
464 int i;
465
466 if (sbq->wake_batch != wake_batch) {
467 WRITE_ONCE(sbq->wake_batch, wake_batch);
468 /*
469 * Pairs with the memory barrier in sbitmap_queue_wake_up()
470 * to ensure that the batch size is updated before the wait
471 * counts.
472 */
473 smp_mb();
474 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
475 atomic_set(&sbq->ws[i].wait_cnt, 1);
476 }
477}
478
479void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
480{
481 sbitmap_queue_update_wake_batch(sbq, depth);
482 sbitmap_resize(&sbq->sb, depth);
483}
484EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
485
486int __sbitmap_queue_get(struct sbitmap_queue *sbq)
487{
488 return sbitmap_get(&sbq->sb);
489}
490EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
491
492int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
493 unsigned int shallow_depth)
494{
495 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
496
497 return sbitmap_get_shallow(&sbq->sb, shallow_depth);
498}
499EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
500
501void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
502 unsigned int min_shallow_depth)
503{
504 sbq->min_shallow_depth = min_shallow_depth;
505 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
506}
507EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
508
509static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
510{
511 int i, wake_index;
512
513 if (!atomic_read(&sbq->ws_active))
514 return NULL;
515
516 wake_index = atomic_read(&sbq->wake_index);
517 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
518 struct sbq_wait_state *ws = &sbq->ws[wake_index];
519
520 if (waitqueue_active(&ws->wait)) {
521 if (wake_index != atomic_read(&sbq->wake_index))
522 atomic_set(&sbq->wake_index, wake_index);
523 return ws;
524 }
525
526 wake_index = sbq_index_inc(wake_index);
527 }
528
529 return NULL;
530}
531
532static bool __sbq_wake_up(struct sbitmap_queue *sbq)
533{
534 struct sbq_wait_state *ws;
535 unsigned int wake_batch;
536 int wait_cnt;
537
538 ws = sbq_wake_ptr(sbq);
539 if (!ws)
540 return false;
541
542 wait_cnt = atomic_dec_return(&ws->wait_cnt);
543 if (wait_cnt <= 0) {
544 int ret;
545
546 wake_batch = READ_ONCE(sbq->wake_batch);
547
548 /*
549 * Pairs with the memory barrier in sbitmap_queue_resize() to
550 * ensure that we see the batch size update before the wait
551 * count is reset.
552 */
553 smp_mb__before_atomic();
554
555 /*
556 * For concurrent callers of this, the one that failed the
557 * atomic_cmpxhcg() race should call this function again
558 * to wakeup a new batch on a different 'ws'.
559 */
560 ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
561 if (ret == wait_cnt) {
562 sbq_index_atomic_inc(&sbq->wake_index);
563 wake_up_nr(&ws->wait, wake_batch);
564 return false;
565 }
566
567 return true;
568 }
569
570 return false;
571}
572
573void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
574{
575 while (__sbq_wake_up(sbq))
576 ;
577}
578EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
579
580void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
581 unsigned int cpu)
582{
583 /*
584 * Once the clear bit is set, the bit may be allocated out.
585 *
586 * Orders READ/WRITE on the associated instance(such as request
587 * of blk_mq) by this bit for avoiding race with re-allocation,
588 * and its pair is the memory barrier implied in __sbitmap_get_word.
589 *
590 * One invariant is that the clear bit has to be zero when the bit
591 * is in use.
592 */
593 smp_mb__before_atomic();
594 sbitmap_deferred_clear_bit(&sbq->sb, nr);
595
596 /*
597 * Pairs with the memory barrier in set_current_state() to ensure the
598 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
599 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
600 * waiter. See the comment on waitqueue_active().
601 */
602 smp_mb__after_atomic();
603 sbitmap_queue_wake_up(sbq);
604
605 if (likely(!sbq->sb.round_robin && nr < sbq->sb.depth))
606 *per_cpu_ptr(sbq->sb.alloc_hint, cpu) = nr;
607}
608EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
609
610void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
611{
612 int i, wake_index;
613
614 /*
615 * Pairs with the memory barrier in set_current_state() like in
616 * sbitmap_queue_wake_up().
617 */
618 smp_mb();
619 wake_index = atomic_read(&sbq->wake_index);
620 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
621 struct sbq_wait_state *ws = &sbq->ws[wake_index];
622
623 if (waitqueue_active(&ws->wait))
624 wake_up(&ws->wait);
625
626 wake_index = sbq_index_inc(wake_index);
627 }
628}
629EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
630
631void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
632{
633 bool first;
634 int i;
635
636 sbitmap_show(&sbq->sb, m);
637
638 seq_puts(m, "alloc_hint={");
639 first = true;
640 for_each_possible_cpu(i) {
641 if (!first)
642 seq_puts(m, ", ");
643 first = false;
644 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
645 }
646 seq_puts(m, "}\n");
647
648 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
649 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
650 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
651
652 seq_puts(m, "ws={\n");
653 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
654 struct sbq_wait_state *ws = &sbq->ws[i];
655
656 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
657 atomic_read(&ws->wait_cnt),
658 waitqueue_active(&ws->wait) ? "active" : "inactive");
659 }
660 seq_puts(m, "}\n");
661
662 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
663 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
664}
665EXPORT_SYMBOL_GPL(sbitmap_queue_show);
666
667void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
668 struct sbq_wait_state *ws,
669 struct sbq_wait *sbq_wait)
670{
671 if (!sbq_wait->sbq) {
672 sbq_wait->sbq = sbq;
673 atomic_inc(&sbq->ws_active);
674 add_wait_queue(&ws->wait, &sbq_wait->wait);
675 }
676}
677EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
678
679void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
680{
681 list_del_init(&sbq_wait->wait.entry);
682 if (sbq_wait->sbq) {
683 atomic_dec(&sbq_wait->sbq->ws_active);
684 sbq_wait->sbq = NULL;
685 }
686}
687EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
688
689void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
690 struct sbq_wait_state *ws,
691 struct sbq_wait *sbq_wait, int state)
692{
693 if (!sbq_wait->sbq) {
694 atomic_inc(&sbq->ws_active);
695 sbq_wait->sbq = sbq;
696 }
697 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
698}
699EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
700
701void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
702 struct sbq_wait *sbq_wait)
703{
704 finish_wait(&ws->wait, &sbq_wait->wait);
705 if (sbq_wait->sbq) {
706 atomic_dec(&sbq->ws_active);
707 sbq_wait->sbq = NULL;
708 }
709}
710EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
1/*
2 * Copyright (C) 2016 Facebook
3 * Copyright (C) 2013-2014 Jens Axboe
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <https://www.gnu.org/licenses/>.
16 */
17
18#include <linux/sched.h>
19#include <linux/random.h>
20#include <linux/sbitmap.h>
21#include <linux/seq_file.h>
22
23int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
24 gfp_t flags, int node)
25{
26 unsigned int bits_per_word;
27 unsigned int i;
28
29 if (shift < 0) {
30 shift = ilog2(BITS_PER_LONG);
31 /*
32 * If the bitmap is small, shrink the number of bits per word so
33 * we spread over a few cachelines, at least. If less than 4
34 * bits, just forget about it, it's not going to work optimally
35 * anyway.
36 */
37 if (depth >= 4) {
38 while ((4U << shift) > depth)
39 shift--;
40 }
41 }
42 bits_per_word = 1U << shift;
43 if (bits_per_word > BITS_PER_LONG)
44 return -EINVAL;
45
46 sb->shift = shift;
47 sb->depth = depth;
48 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
49
50 if (depth == 0) {
51 sb->map = NULL;
52 return 0;
53 }
54
55 sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
56 if (!sb->map)
57 return -ENOMEM;
58
59 for (i = 0; i < sb->map_nr; i++) {
60 sb->map[i].depth = min(depth, bits_per_word);
61 depth -= sb->map[i].depth;
62 }
63 return 0;
64}
65EXPORT_SYMBOL_GPL(sbitmap_init_node);
66
67void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
68{
69 unsigned int bits_per_word = 1U << sb->shift;
70 unsigned int i;
71
72 sb->depth = depth;
73 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
74
75 for (i = 0; i < sb->map_nr; i++) {
76 sb->map[i].depth = min(depth, bits_per_word);
77 depth -= sb->map[i].depth;
78 }
79}
80EXPORT_SYMBOL_GPL(sbitmap_resize);
81
82static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
83 unsigned int hint, bool wrap)
84{
85 unsigned int orig_hint = hint;
86 int nr;
87
88 while (1) {
89 nr = find_next_zero_bit(word, depth, hint);
90 if (unlikely(nr >= depth)) {
91 /*
92 * We started with an offset, and we didn't reset the
93 * offset to 0 in a failure case, so start from 0 to
94 * exhaust the map.
95 */
96 if (orig_hint && hint && wrap) {
97 hint = orig_hint = 0;
98 continue;
99 }
100 return -1;
101 }
102
103 if (!test_and_set_bit_lock(nr, word))
104 break;
105
106 hint = nr + 1;
107 if (hint >= depth - 1)
108 hint = 0;
109 }
110
111 return nr;
112}
113
114int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
115{
116 unsigned int i, index;
117 int nr = -1;
118
119 index = SB_NR_TO_INDEX(sb, alloc_hint);
120
121 for (i = 0; i < sb->map_nr; i++) {
122 nr = __sbitmap_get_word(&sb->map[index].word,
123 sb->map[index].depth,
124 SB_NR_TO_BIT(sb, alloc_hint),
125 !round_robin);
126 if (nr != -1) {
127 nr += index << sb->shift;
128 break;
129 }
130
131 /* Jump to next index. */
132 index++;
133 alloc_hint = index << sb->shift;
134
135 if (index >= sb->map_nr) {
136 index = 0;
137 alloc_hint = 0;
138 }
139 }
140
141 return nr;
142}
143EXPORT_SYMBOL_GPL(sbitmap_get);
144
145int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
146 unsigned long shallow_depth)
147{
148 unsigned int i, index;
149 int nr = -1;
150
151 index = SB_NR_TO_INDEX(sb, alloc_hint);
152
153 for (i = 0; i < sb->map_nr; i++) {
154 nr = __sbitmap_get_word(&sb->map[index].word,
155 min(sb->map[index].depth, shallow_depth),
156 SB_NR_TO_BIT(sb, alloc_hint), true);
157 if (nr != -1) {
158 nr += index << sb->shift;
159 break;
160 }
161
162 /* Jump to next index. */
163 index++;
164 alloc_hint = index << sb->shift;
165
166 if (index >= sb->map_nr) {
167 index = 0;
168 alloc_hint = 0;
169 }
170 }
171
172 return nr;
173}
174EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
175
176bool sbitmap_any_bit_set(const struct sbitmap *sb)
177{
178 unsigned int i;
179
180 for (i = 0; i < sb->map_nr; i++) {
181 if (sb->map[i].word)
182 return true;
183 }
184 return false;
185}
186EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
187
188bool sbitmap_any_bit_clear(const struct sbitmap *sb)
189{
190 unsigned int i;
191
192 for (i = 0; i < sb->map_nr; i++) {
193 const struct sbitmap_word *word = &sb->map[i];
194 unsigned long ret;
195
196 ret = find_first_zero_bit(&word->word, word->depth);
197 if (ret < word->depth)
198 return true;
199 }
200 return false;
201}
202EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
203
204unsigned int sbitmap_weight(const struct sbitmap *sb)
205{
206 unsigned int i, weight = 0;
207
208 for (i = 0; i < sb->map_nr; i++) {
209 const struct sbitmap_word *word = &sb->map[i];
210
211 weight += bitmap_weight(&word->word, word->depth);
212 }
213 return weight;
214}
215EXPORT_SYMBOL_GPL(sbitmap_weight);
216
217void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
218{
219 seq_printf(m, "depth=%u\n", sb->depth);
220 seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
221 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
222 seq_printf(m, "map_nr=%u\n", sb->map_nr);
223}
224EXPORT_SYMBOL_GPL(sbitmap_show);
225
226static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
227{
228 if ((offset & 0xf) == 0) {
229 if (offset != 0)
230 seq_putc(m, '\n');
231 seq_printf(m, "%08x:", offset);
232 }
233 if ((offset & 0x1) == 0)
234 seq_putc(m, ' ');
235 seq_printf(m, "%02x", byte);
236}
237
238void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
239{
240 u8 byte = 0;
241 unsigned int byte_bits = 0;
242 unsigned int offset = 0;
243 int i;
244
245 for (i = 0; i < sb->map_nr; i++) {
246 unsigned long word = READ_ONCE(sb->map[i].word);
247 unsigned int word_bits = READ_ONCE(sb->map[i].depth);
248
249 while (word_bits > 0) {
250 unsigned int bits = min(8 - byte_bits, word_bits);
251
252 byte |= (word & (BIT(bits) - 1)) << byte_bits;
253 byte_bits += bits;
254 if (byte_bits == 8) {
255 emit_byte(m, offset, byte);
256 byte = 0;
257 byte_bits = 0;
258 offset++;
259 }
260 word >>= bits;
261 word_bits -= bits;
262 }
263 }
264 if (byte_bits) {
265 emit_byte(m, offset, byte);
266 offset++;
267 }
268 if (offset)
269 seq_putc(m, '\n');
270}
271EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
272
273static unsigned int sbq_calc_wake_batch(unsigned int depth)
274{
275 unsigned int wake_batch;
276
277 /*
278 * For each batch, we wake up one queue. We need to make sure that our
279 * batch size is small enough that the full depth of the bitmap is
280 * enough to wake up all of the queues.
281 */
282 wake_batch = SBQ_WAKE_BATCH;
283 if (wake_batch > depth / SBQ_WAIT_QUEUES)
284 wake_batch = max(1U, depth / SBQ_WAIT_QUEUES);
285
286 return wake_batch;
287}
288
289int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
290 int shift, bool round_robin, gfp_t flags, int node)
291{
292 int ret;
293 int i;
294
295 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
296 if (ret)
297 return ret;
298
299 sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
300 if (!sbq->alloc_hint) {
301 sbitmap_free(&sbq->sb);
302 return -ENOMEM;
303 }
304
305 if (depth && !round_robin) {
306 for_each_possible_cpu(i)
307 *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
308 }
309
310 sbq->wake_batch = sbq_calc_wake_batch(depth);
311 atomic_set(&sbq->wake_index, 0);
312
313 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
314 if (!sbq->ws) {
315 free_percpu(sbq->alloc_hint);
316 sbitmap_free(&sbq->sb);
317 return -ENOMEM;
318 }
319
320 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
321 init_waitqueue_head(&sbq->ws[i].wait);
322 atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
323 }
324
325 sbq->round_robin = round_robin;
326 return 0;
327}
328EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
329
330void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
331{
332 unsigned int wake_batch = sbq_calc_wake_batch(depth);
333 int i;
334
335 if (sbq->wake_batch != wake_batch) {
336 WRITE_ONCE(sbq->wake_batch, wake_batch);
337 /*
338 * Pairs with the memory barrier in sbq_wake_up() to ensure that
339 * the batch size is updated before the wait counts.
340 */
341 smp_mb__before_atomic();
342 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
343 atomic_set(&sbq->ws[i].wait_cnt, 1);
344 }
345 sbitmap_resize(&sbq->sb, depth);
346}
347EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
348
349int __sbitmap_queue_get(struct sbitmap_queue *sbq)
350{
351 unsigned int hint, depth;
352 int nr;
353
354 hint = this_cpu_read(*sbq->alloc_hint);
355 depth = READ_ONCE(sbq->sb.depth);
356 if (unlikely(hint >= depth)) {
357 hint = depth ? prandom_u32() % depth : 0;
358 this_cpu_write(*sbq->alloc_hint, hint);
359 }
360 nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
361
362 if (nr == -1) {
363 /* If the map is full, a hint won't do us much good. */
364 this_cpu_write(*sbq->alloc_hint, 0);
365 } else if (nr == hint || unlikely(sbq->round_robin)) {
366 /* Only update the hint if we used it. */
367 hint = nr + 1;
368 if (hint >= depth - 1)
369 hint = 0;
370 this_cpu_write(*sbq->alloc_hint, hint);
371 }
372
373 return nr;
374}
375EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
376
377int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
378 unsigned int shallow_depth)
379{
380 unsigned int hint, depth;
381 int nr;
382
383 hint = this_cpu_read(*sbq->alloc_hint);
384 depth = READ_ONCE(sbq->sb.depth);
385 if (unlikely(hint >= depth)) {
386 hint = depth ? prandom_u32() % depth : 0;
387 this_cpu_write(*sbq->alloc_hint, hint);
388 }
389 nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
390
391 if (nr == -1) {
392 /* If the map is full, a hint won't do us much good. */
393 this_cpu_write(*sbq->alloc_hint, 0);
394 } else if (nr == hint || unlikely(sbq->round_robin)) {
395 /* Only update the hint if we used it. */
396 hint = nr + 1;
397 if (hint >= depth - 1)
398 hint = 0;
399 this_cpu_write(*sbq->alloc_hint, hint);
400 }
401
402 return nr;
403}
404EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
405
406static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
407{
408 int i, wake_index;
409
410 wake_index = atomic_read(&sbq->wake_index);
411 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
412 struct sbq_wait_state *ws = &sbq->ws[wake_index];
413
414 if (waitqueue_active(&ws->wait)) {
415 int o = atomic_read(&sbq->wake_index);
416
417 if (wake_index != o)
418 atomic_cmpxchg(&sbq->wake_index, o, wake_index);
419 return ws;
420 }
421
422 wake_index = sbq_index_inc(wake_index);
423 }
424
425 return NULL;
426}
427
428static void sbq_wake_up(struct sbitmap_queue *sbq)
429{
430 struct sbq_wait_state *ws;
431 unsigned int wake_batch;
432 int wait_cnt;
433
434 /*
435 * Pairs with the memory barrier in set_current_state() to ensure the
436 * proper ordering of clear_bit()/waitqueue_active() in the waker and
437 * test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
438 * waiter. See the comment on waitqueue_active(). This is __after_atomic
439 * because we just did clear_bit_unlock() in the caller.
440 */
441 smp_mb__after_atomic();
442
443 ws = sbq_wake_ptr(sbq);
444 if (!ws)
445 return;
446
447 wait_cnt = atomic_dec_return(&ws->wait_cnt);
448 if (wait_cnt <= 0) {
449 wake_batch = READ_ONCE(sbq->wake_batch);
450 /*
451 * Pairs with the memory barrier in sbitmap_queue_resize() to
452 * ensure that we see the batch size update before the wait
453 * count is reset.
454 */
455 smp_mb__before_atomic();
456 /*
457 * If there are concurrent callers to sbq_wake_up(), the last
458 * one to decrement the wait count below zero will bump it back
459 * up. If there is a concurrent resize, the count reset will
460 * either cause the cmpxchg to fail or overwrite after the
461 * cmpxchg.
462 */
463 atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wait_cnt + wake_batch);
464 sbq_index_atomic_inc(&sbq->wake_index);
465 wake_up_nr(&ws->wait, wake_batch);
466 }
467}
468
469void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
470 unsigned int cpu)
471{
472 sbitmap_clear_bit_unlock(&sbq->sb, nr);
473 sbq_wake_up(sbq);
474 if (likely(!sbq->round_robin && nr < sbq->sb.depth))
475 *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
476}
477EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
478
479void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
480{
481 int i, wake_index;
482
483 /*
484 * Pairs with the memory barrier in set_current_state() like in
485 * sbq_wake_up().
486 */
487 smp_mb();
488 wake_index = atomic_read(&sbq->wake_index);
489 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
490 struct sbq_wait_state *ws = &sbq->ws[wake_index];
491
492 if (waitqueue_active(&ws->wait))
493 wake_up(&ws->wait);
494
495 wake_index = sbq_index_inc(wake_index);
496 }
497}
498EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
499
500void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
501{
502 bool first;
503 int i;
504
505 sbitmap_show(&sbq->sb, m);
506
507 seq_puts(m, "alloc_hint={");
508 first = true;
509 for_each_possible_cpu(i) {
510 if (!first)
511 seq_puts(m, ", ");
512 first = false;
513 seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
514 }
515 seq_puts(m, "}\n");
516
517 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
518 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
519
520 seq_puts(m, "ws={\n");
521 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
522 struct sbq_wait_state *ws = &sbq->ws[i];
523
524 seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
525 atomic_read(&ws->wait_cnt),
526 waitqueue_active(&ws->wait) ? "active" : "inactive");
527 }
528 seq_puts(m, "}\n");
529
530 seq_printf(m, "round_robin=%d\n", sbq->round_robin);
531}
532EXPORT_SYMBOL_GPL(sbitmap_queue_show);