Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_MISC_H
4#define BTRFS_MISC_H
5
6#include <linux/sched.h>
7#include <linux/wait.h>
8#include <asm/div64.h>
9
10#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
11
12static inline void cond_wake_up(struct wait_queue_head *wq)
13{
14 /*
15 * This implies a full smp_mb barrier, see comments for
16 * waitqueue_active why.
17 */
18 if (wq_has_sleeper(wq))
19 wake_up(wq);
20}
21
22static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
23{
24 /*
25 * Special case for conditional wakeup where the barrier required for
26 * waitqueue_active is implied by some of the preceding code. Eg. one
27 * of such atomic operations (atomic_dec_and_return, ...), or a
28 * unlock/lock sequence, etc.
29 */
30 if (waitqueue_active(wq))
31 wake_up(wq);
32}
33
34static inline u64 div_factor(u64 num, int factor)
35{
36 if (factor == 10)
37 return num;
38 num *= factor;
39 return div_u64(num, 10);
40}
41
42static inline u64 div_factor_fine(u64 num, int factor)
43{
44 if (factor == 100)
45 return num;
46 num *= factor;
47 return div_u64(num, 100);
48}
49
50#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef BTRFS_MISC_H
4#define BTRFS_MISC_H
5
6#include <linux/sched.h>
7#include <linux/wait.h>
8#include <asm/div64.h>
9#include <linux/rbtree.h>
10
11#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
12
13static inline void cond_wake_up(struct wait_queue_head *wq)
14{
15 /*
16 * This implies a full smp_mb barrier, see comments for
17 * waitqueue_active why.
18 */
19 if (wq_has_sleeper(wq))
20 wake_up(wq);
21}
22
23static inline void cond_wake_up_nomb(struct wait_queue_head *wq)
24{
25 /*
26 * Special case for conditional wakeup where the barrier required for
27 * waitqueue_active is implied by some of the preceding code. Eg. one
28 * of such atomic operations (atomic_dec_and_return, ...), or a
29 * unlock/lock sequence, etc.
30 */
31 if (waitqueue_active(wq))
32 wake_up(wq);
33}
34
35static inline u64 div_factor(u64 num, int factor)
36{
37 if (factor == 10)
38 return num;
39 num *= factor;
40 return div_u64(num, 10);
41}
42
43static inline u64 div_factor_fine(u64 num, int factor)
44{
45 if (factor == 100)
46 return num;
47 num *= factor;
48 return div_u64(num, 100);
49}
50
51/* Copy of is_power_of_two that is 64bit safe */
52static inline bool is_power_of_two_u64(u64 n)
53{
54 return n != 0 && (n & (n - 1)) == 0;
55}
56
57static inline bool has_single_bit_set(u64 n)
58{
59 return is_power_of_two_u64(n);
60}
61
62/*
63 * Simple bytenr based rb_tree relate structures
64 *
65 * Any structure wants to use bytenr as single search index should have their
66 * structure start with these members.
67 */
68struct rb_simple_node {
69 struct rb_node rb_node;
70 u64 bytenr;
71};
72
73static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
74{
75 struct rb_node *node = root->rb_node;
76 struct rb_simple_node *entry;
77
78 while (node) {
79 entry = rb_entry(node, struct rb_simple_node, rb_node);
80
81 if (bytenr < entry->bytenr)
82 node = node->rb_left;
83 else if (bytenr > entry->bytenr)
84 node = node->rb_right;
85 else
86 return node;
87 }
88 return NULL;
89}
90
91static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
92 struct rb_node *node)
93{
94 struct rb_node **p = &root->rb_node;
95 struct rb_node *parent = NULL;
96 struct rb_simple_node *entry;
97
98 while (*p) {
99 parent = *p;
100 entry = rb_entry(parent, struct rb_simple_node, rb_node);
101
102 if (bytenr < entry->bytenr)
103 p = &(*p)->rb_left;
104 else if (bytenr > entry->bytenr)
105 p = &(*p)->rb_right;
106 else
107 return parent;
108 }
109
110 rb_link_node(node, parent, p);
111 rb_insert_color(node, root);
112 return NULL;
113}
114
115#endif