Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _BCACHE_UTIL_H
4#define _BCACHE_UTIL_H
5
6#include <linux/blkdev.h>
7#include <linux/closure.h>
8#include <linux/errno.h>
9#include <linux/kernel.h>
10#include <linux/sched/clock.h>
11#include <linux/llist.h>
12#include <linux/ratelimit.h>
13#include <linux/vmalloc.h>
14#include <linux/workqueue.h>
15#include <linux/crc64.h>
16
17struct closure;
18
19#ifdef CONFIG_BCACHE_DEBUG
20
21#define EBUG_ON(cond) BUG_ON(cond)
22#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
23#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
24
25#else /* DEBUG */
26
27#define EBUG_ON(cond) do { if (cond) do {} while (0); } while (0)
28#define atomic_dec_bug(v) atomic_dec(v)
29#define atomic_inc_bug(v, i) atomic_inc(v)
30
31#endif
32
33#define DECLARE_HEAP(type, name) \
34 struct { \
35 size_t size, used; \
36 type *data; \
37 } name
38
39#define init_heap(heap, _size, gfp) \
40({ \
41 size_t _bytes; \
42 (heap)->used = 0; \
43 (heap)->size = (_size); \
44 _bytes = (heap)->size * sizeof(*(heap)->data); \
45 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
46 (heap)->data; \
47})
48
49#define free_heap(heap) \
50do { \
51 kvfree((heap)->data); \
52 (heap)->data = NULL; \
53} while (0)
54
55#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
56
57#define heap_sift(h, i, cmp) \
58do { \
59 size_t _r, _j = i; \
60 \
61 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
62 _r = _j * 2 + 1; \
63 if (_r + 1 < (h)->used && \
64 cmp((h)->data[_r], (h)->data[_r + 1])) \
65 _r++; \
66 \
67 if (cmp((h)->data[_r], (h)->data[_j])) \
68 break; \
69 heap_swap(h, _r, _j); \
70 } \
71} while (0)
72
73#define heap_sift_down(h, i, cmp) \
74do { \
75 while (i) { \
76 size_t p = (i - 1) / 2; \
77 if (cmp((h)->data[i], (h)->data[p])) \
78 break; \
79 heap_swap(h, i, p); \
80 i = p; \
81 } \
82} while (0)
83
84#define heap_add(h, d, cmp) \
85({ \
86 bool _r = !heap_full(h); \
87 if (_r) { \
88 size_t _i = (h)->used++; \
89 (h)->data[_i] = d; \
90 \
91 heap_sift_down(h, _i, cmp); \
92 heap_sift(h, _i, cmp); \
93 } \
94 _r; \
95})
96
97#define heap_pop(h, d, cmp) \
98({ \
99 bool _r = (h)->used; \
100 if (_r) { \
101 (d) = (h)->data[0]; \
102 (h)->used--; \
103 heap_swap(h, 0, (h)->used); \
104 heap_sift(h, 0, cmp); \
105 } \
106 _r; \
107})
108
109#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
110
111#define heap_full(h) ((h)->used == (h)->size)
112
113#define DECLARE_FIFO(type, name) \
114 struct { \
115 size_t front, back, size, mask; \
116 type *data; \
117 } name
118
119#define fifo_for_each(c, fifo, iter) \
120 for (iter = (fifo)->front; \
121 c = (fifo)->data[iter], iter != (fifo)->back; \
122 iter = (iter + 1) & (fifo)->mask)
123
124#define __init_fifo(fifo, gfp) \
125({ \
126 size_t _allocated_size, _bytes; \
127 BUG_ON(!(fifo)->size); \
128 \
129 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
130 _bytes = _allocated_size * sizeof(*(fifo)->data); \
131 \
132 (fifo)->mask = _allocated_size - 1; \
133 (fifo)->front = (fifo)->back = 0; \
134 \
135 (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
136 (fifo)->data; \
137})
138
139#define init_fifo_exact(fifo, _size, gfp) \
140({ \
141 (fifo)->size = (_size); \
142 __init_fifo(fifo, gfp); \
143})
144
145#define init_fifo(fifo, _size, gfp) \
146({ \
147 (fifo)->size = (_size); \
148 if ((fifo)->size > 4) \
149 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
150 __init_fifo(fifo, gfp); \
151})
152
153#define free_fifo(fifo) \
154do { \
155 kvfree((fifo)->data); \
156 (fifo)->data = NULL; \
157} while (0)
158
159#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
160#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
161
162#define fifo_empty(fifo) (!fifo_used(fifo))
163#define fifo_full(fifo) (!fifo_free(fifo))
164
165#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
166#define fifo_back(fifo) \
167 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
168
169#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
170
171#define fifo_push_back(fifo, i) \
172({ \
173 bool _r = !fifo_full((fifo)); \
174 if (_r) { \
175 (fifo)->data[(fifo)->back++] = (i); \
176 (fifo)->back &= (fifo)->mask; \
177 } \
178 _r; \
179})
180
181#define fifo_pop_front(fifo, i) \
182({ \
183 bool _r = !fifo_empty((fifo)); \
184 if (_r) { \
185 (i) = (fifo)->data[(fifo)->front++]; \
186 (fifo)->front &= (fifo)->mask; \
187 } \
188 _r; \
189})
190
191#define fifo_push_front(fifo, i) \
192({ \
193 bool _r = !fifo_full((fifo)); \
194 if (_r) { \
195 --(fifo)->front; \
196 (fifo)->front &= (fifo)->mask; \
197 (fifo)->data[(fifo)->front] = (i); \
198 } \
199 _r; \
200})
201
202#define fifo_pop_back(fifo, i) \
203({ \
204 bool _r = !fifo_empty((fifo)); \
205 if (_r) { \
206 --(fifo)->back; \
207 (fifo)->back &= (fifo)->mask; \
208 (i) = (fifo)->data[(fifo)->back] \
209 } \
210 _r; \
211})
212
213#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
214#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
215
216#define fifo_swap(l, r) \
217do { \
218 swap((l)->front, (r)->front); \
219 swap((l)->back, (r)->back); \
220 swap((l)->size, (r)->size); \
221 swap((l)->mask, (r)->mask); \
222 swap((l)->data, (r)->data); \
223} while (0)
224
225#define fifo_move(dest, src) \
226do { \
227 typeof(*((dest)->data)) _t; \
228 while (!fifo_full(dest) && \
229 fifo_pop(src, _t)) \
230 fifo_push(dest, _t); \
231} while (0)
232
233/*
234 * Simple array based allocator - preallocates a number of elements and you can
235 * never allocate more than that, also has no locking.
236 *
237 * Handy because if you know you only need a fixed number of elements you don't
238 * have to worry about memory allocation failure, and sometimes a mempool isn't
239 * what you want.
240 *
241 * We treat the free elements as entries in a singly linked list, and the
242 * freelist as a stack - allocating and freeing push and pop off the freelist.
243 */
244
245#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
246 struct { \
247 type *freelist; \
248 type data[size]; \
249 } name
250
251#define array_alloc(array) \
252({ \
253 typeof((array)->freelist) _ret = (array)->freelist; \
254 \
255 if (_ret) \
256 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
257 \
258 _ret; \
259})
260
261#define array_free(array, ptr) \
262do { \
263 typeof((array)->freelist) _ptr = ptr; \
264 \
265 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
266 (array)->freelist = _ptr; \
267} while (0)
268
269#define array_allocator_init(array) \
270do { \
271 typeof((array)->freelist) _i; \
272 \
273 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
274 (array)->freelist = NULL; \
275 \
276 for (_i = (array)->data; \
277 _i < (array)->data + ARRAY_SIZE((array)->data); \
278 _i++) \
279 array_free(array, _i); \
280} while (0)
281
282#define array_freelist_empty(array) ((array)->freelist == NULL)
283
284#define ANYSINT_MAX(t) \
285 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
286
287int bch_strtoint_h(const char *cp, int *res);
288int bch_strtouint_h(const char *cp, unsigned int *res);
289int bch_strtoll_h(const char *cp, long long *res);
290int bch_strtoull_h(const char *cp, unsigned long long *res);
291
292static inline int bch_strtol_h(const char *cp, long *res)
293{
294#if BITS_PER_LONG == 32
295 return bch_strtoint_h(cp, (int *) res);
296#else
297 return bch_strtoll_h(cp, (long long *) res);
298#endif
299}
300
301static inline int bch_strtoul_h(const char *cp, long *res)
302{
303#if BITS_PER_LONG == 32
304 return bch_strtouint_h(cp, (unsigned int *) res);
305#else
306 return bch_strtoull_h(cp, (unsigned long long *) res);
307#endif
308}
309
310#define strtoi_h(cp, res) \
311 (__builtin_types_compatible_p(typeof(*res), int) \
312 ? bch_strtoint_h(cp, (void *) res) \
313 : __builtin_types_compatible_p(typeof(*res), long) \
314 ? bch_strtol_h(cp, (void *) res) \
315 : __builtin_types_compatible_p(typeof(*res), long long) \
316 ? bch_strtoll_h(cp, (void *) res) \
317 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
318 ? bch_strtouint_h(cp, (void *) res) \
319 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
320 ? bch_strtoul_h(cp, (void *) res) \
321 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
322 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
323
324#define strtoul_safe(cp, var) \
325({ \
326 unsigned long _v; \
327 int _r = kstrtoul(cp, 10, &_v); \
328 if (!_r) \
329 var = _v; \
330 _r; \
331})
332
333#define strtoul_safe_clamp(cp, var, min, max) \
334({ \
335 unsigned long _v; \
336 int _r = kstrtoul(cp, 10, &_v); \
337 if (!_r) \
338 var = clamp_t(typeof(var), _v, min, max); \
339 _r; \
340})
341
342ssize_t bch_hprint(char *buf, int64_t v);
343
344bool bch_is_zero(const char *p, size_t n);
345int bch_parse_uuid(const char *s, char *uuid);
346
347struct time_stats {
348 spinlock_t lock;
349 /*
350 * all fields are in nanoseconds, averages are ewmas stored left shifted
351 * by 8
352 */
353 uint64_t max_duration;
354 uint64_t average_duration;
355 uint64_t average_frequency;
356 uint64_t last;
357};
358
359void bch_time_stats_update(struct time_stats *stats, uint64_t time);
360
361static inline unsigned int local_clock_us(void)
362{
363 return local_clock() >> 10;
364}
365
366#define NSEC_PER_ns 1L
367#define NSEC_PER_us NSEC_PER_USEC
368#define NSEC_PER_ms NSEC_PER_MSEC
369#define NSEC_PER_sec NSEC_PER_SEC
370
371#define __print_time_stat(stats, name, stat, units) \
372 sysfs_print(name ## _ ## stat ## _ ## units, \
373 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
374
375#define sysfs_print_time_stats(stats, name, \
376 frequency_units, \
377 duration_units) \
378do { \
379 __print_time_stat(stats, name, \
380 average_frequency, frequency_units); \
381 __print_time_stat(stats, name, \
382 average_duration, duration_units); \
383 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
384 div_u64((stats)->max_duration, \
385 NSEC_PER_ ## duration_units)); \
386 \
387 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
388 ? div_s64(local_clock() - (stats)->last, \
389 NSEC_PER_ ## frequency_units) \
390 : -1LL); \
391} while (0)
392
393#define sysfs_time_stats_attribute(name, \
394 frequency_units, \
395 duration_units) \
396read_attribute(name ## _average_frequency_ ## frequency_units); \
397read_attribute(name ## _average_duration_ ## duration_units); \
398read_attribute(name ## _max_duration_ ## duration_units); \
399read_attribute(name ## _last_ ## frequency_units)
400
401#define sysfs_time_stats_attribute_list(name, \
402 frequency_units, \
403 duration_units) \
404&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
405&sysfs_ ## name ## _average_duration_ ## duration_units, \
406&sysfs_ ## name ## _max_duration_ ## duration_units, \
407&sysfs_ ## name ## _last_ ## frequency_units,
408
409#define ewma_add(ewma, val, weight, factor) \
410({ \
411 (ewma) *= (weight) - 1; \
412 (ewma) += (val) << factor; \
413 (ewma) /= (weight); \
414 (ewma) >> factor; \
415})
416
417struct bch_ratelimit {
418 /* Next time we want to do some work, in nanoseconds */
419 uint64_t next;
420
421 /*
422 * Rate at which we want to do work, in units per second
423 * The units here correspond to the units passed to bch_next_delay()
424 */
425 atomic_long_t rate;
426};
427
428static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
429{
430 d->next = local_clock();
431}
432
433uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
434
435#define __DIV_SAFE(n, d, zero) \
436({ \
437 typeof(n) _n = (n); \
438 typeof(d) _d = (d); \
439 _d ? _n / _d : zero; \
440})
441
442#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
443
444#define container_of_or_null(ptr, type, member) \
445({ \
446 typeof(ptr) _ptr = ptr; \
447 _ptr ? container_of(_ptr, type, member) : NULL; \
448})
449
450#define RB_INSERT(root, new, member, cmp) \
451({ \
452 __label__ dup; \
453 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
454 typeof(new) this; \
455 int res, ret = -1; \
456 \
457 while (*n) { \
458 parent = *n; \
459 this = container_of(*n, typeof(*(new)), member); \
460 res = cmp(new, this); \
461 if (!res) \
462 goto dup; \
463 n = res < 0 \
464 ? &(*n)->rb_left \
465 : &(*n)->rb_right; \
466 } \
467 \
468 rb_link_node(&(new)->member, parent, n); \
469 rb_insert_color(&(new)->member, root); \
470 ret = 0; \
471dup: \
472 ret; \
473})
474
475#define RB_SEARCH(root, search, member, cmp) \
476({ \
477 struct rb_node *n = (root)->rb_node; \
478 typeof(&(search)) this, ret = NULL; \
479 int res; \
480 \
481 while (n) { \
482 this = container_of(n, typeof(search), member); \
483 res = cmp(&(search), this); \
484 if (!res) { \
485 ret = this; \
486 break; \
487 } \
488 n = res < 0 \
489 ? n->rb_left \
490 : n->rb_right; \
491 } \
492 ret; \
493})
494
495#define RB_GREATER(root, search, member, cmp) \
496({ \
497 struct rb_node *n = (root)->rb_node; \
498 typeof(&(search)) this, ret = NULL; \
499 int res; \
500 \
501 while (n) { \
502 this = container_of(n, typeof(search), member); \
503 res = cmp(&(search), this); \
504 if (res < 0) { \
505 ret = this; \
506 n = n->rb_left; \
507 } else \
508 n = n->rb_right; \
509 } \
510 ret; \
511})
512
513#define RB_FIRST(root, type, member) \
514 container_of_or_null(rb_first(root), type, member)
515
516#define RB_LAST(root, type, member) \
517 container_of_or_null(rb_last(root), type, member)
518
519#define RB_NEXT(ptr, member) \
520 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
521
522#define RB_PREV(ptr, member) \
523 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
524
525static inline uint64_t bch_crc64(const void *p, size_t len)
526{
527 uint64_t crc = 0xffffffffffffffffULL;
528
529 crc = crc64_be(crc, p, len);
530 return crc ^ 0xffffffffffffffffULL;
531}
532
533/*
534 * A stepwise-linear pseudo-exponential. This returns 1 << (x >>
535 * frac_bits), with the less-significant bits filled in by linear
536 * interpolation.
537 *
538 * This can also be interpreted as a floating-point number format,
539 * where the low frac_bits are the mantissa (with implicit leading
540 * 1 bit), and the more significant bits are the exponent.
541 * The return value is 1.mantissa * 2^exponent.
542 *
543 * The way this is used, fract_bits is 6 and the largest possible
544 * input is CONGESTED_MAX-1 = 1023 (exponent 16, mantissa 0x1.fc),
545 * so the maximum output is 0x1fc00.
546 */
547static inline unsigned int fract_exp_two(unsigned int x,
548 unsigned int fract_bits)
549{
550 unsigned int mantissa = 1 << fract_bits; /* Implicit bit */
551
552 mantissa += x & (mantissa - 1);
553 x >>= fract_bits; /* The exponent */
554 /* Largest intermediate value 0x7f0000 */
555 return mantissa << x >> fract_bits;
556}
557
558void bch_bio_map(struct bio *bio, void *base);
559int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
560
561#endif /* _BCACHE_UTIL_H */
1
2#ifndef _BCACHE_UTIL_H
3#define _BCACHE_UTIL_H
4
5#include <linux/blkdev.h>
6#include <linux/errno.h>
7#include <linux/kernel.h>
8#include <linux/llist.h>
9#include <linux/ratelimit.h>
10#include <linux/vmalloc.h>
11#include <linux/workqueue.h>
12
13#include "closure.h"
14
15#define PAGE_SECTORS (PAGE_SIZE / 512)
16
17struct closure;
18
19#ifdef CONFIG_BCACHE_DEBUG
20
21#define EBUG_ON(cond) BUG_ON(cond)
22#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
23#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
24
25#else /* DEBUG */
26
27#define EBUG_ON(cond) do { if (cond); } while (0)
28#define atomic_dec_bug(v) atomic_dec(v)
29#define atomic_inc_bug(v, i) atomic_inc(v)
30
31#endif
32
33#define DECLARE_HEAP(type, name) \
34 struct { \
35 size_t size, used; \
36 type *data; \
37 } name
38
39#define init_heap(heap, _size, gfp) \
40({ \
41 size_t _bytes; \
42 (heap)->used = 0; \
43 (heap)->size = (_size); \
44 _bytes = (heap)->size * sizeof(*(heap)->data); \
45 (heap)->data = NULL; \
46 if (_bytes < KMALLOC_MAX_SIZE) \
47 (heap)->data = kmalloc(_bytes, (gfp)); \
48 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
49 (heap)->data = vmalloc(_bytes); \
50 (heap)->data; \
51})
52
53#define free_heap(heap) \
54do { \
55 if (is_vmalloc_addr((heap)->data)) \
56 vfree((heap)->data); \
57 else \
58 kfree((heap)->data); \
59 (heap)->data = NULL; \
60} while (0)
61
62#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
63
64#define heap_sift(h, i, cmp) \
65do { \
66 size_t _r, _j = i; \
67 \
68 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
69 _r = _j * 2 + 1; \
70 if (_r + 1 < (h)->used && \
71 cmp((h)->data[_r], (h)->data[_r + 1])) \
72 _r++; \
73 \
74 if (cmp((h)->data[_r], (h)->data[_j])) \
75 break; \
76 heap_swap(h, _r, _j); \
77 } \
78} while (0)
79
80#define heap_sift_down(h, i, cmp) \
81do { \
82 while (i) { \
83 size_t p = (i - 1) / 2; \
84 if (cmp((h)->data[i], (h)->data[p])) \
85 break; \
86 heap_swap(h, i, p); \
87 i = p; \
88 } \
89} while (0)
90
91#define heap_add(h, d, cmp) \
92({ \
93 bool _r = !heap_full(h); \
94 if (_r) { \
95 size_t _i = (h)->used++; \
96 (h)->data[_i] = d; \
97 \
98 heap_sift_down(h, _i, cmp); \
99 heap_sift(h, _i, cmp); \
100 } \
101 _r; \
102})
103
104#define heap_pop(h, d, cmp) \
105({ \
106 bool _r = (h)->used; \
107 if (_r) { \
108 (d) = (h)->data[0]; \
109 (h)->used--; \
110 heap_swap(h, 0, (h)->used); \
111 heap_sift(h, 0, cmp); \
112 } \
113 _r; \
114})
115
116#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
117
118#define heap_full(h) ((h)->used == (h)->size)
119
120#define DECLARE_FIFO(type, name) \
121 struct { \
122 size_t front, back, size, mask; \
123 type *data; \
124 } name
125
126#define fifo_for_each(c, fifo, iter) \
127 for (iter = (fifo)->front; \
128 c = (fifo)->data[iter], iter != (fifo)->back; \
129 iter = (iter + 1) & (fifo)->mask)
130
131#define __init_fifo(fifo, gfp) \
132({ \
133 size_t _allocated_size, _bytes; \
134 BUG_ON(!(fifo)->size); \
135 \
136 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
137 _bytes = _allocated_size * sizeof(*(fifo)->data); \
138 \
139 (fifo)->mask = _allocated_size - 1; \
140 (fifo)->front = (fifo)->back = 0; \
141 (fifo)->data = NULL; \
142 \
143 if (_bytes < KMALLOC_MAX_SIZE) \
144 (fifo)->data = kmalloc(_bytes, (gfp)); \
145 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
146 (fifo)->data = vmalloc(_bytes); \
147 (fifo)->data; \
148})
149
150#define init_fifo_exact(fifo, _size, gfp) \
151({ \
152 (fifo)->size = (_size); \
153 __init_fifo(fifo, gfp); \
154})
155
156#define init_fifo(fifo, _size, gfp) \
157({ \
158 (fifo)->size = (_size); \
159 if ((fifo)->size > 4) \
160 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
161 __init_fifo(fifo, gfp); \
162})
163
164#define free_fifo(fifo) \
165do { \
166 if (is_vmalloc_addr((fifo)->data)) \
167 vfree((fifo)->data); \
168 else \
169 kfree((fifo)->data); \
170 (fifo)->data = NULL; \
171} while (0)
172
173#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
174#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
175
176#define fifo_empty(fifo) (!fifo_used(fifo))
177#define fifo_full(fifo) (!fifo_free(fifo))
178
179#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
180#define fifo_back(fifo) \
181 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
182
183#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
184
185#define fifo_push_back(fifo, i) \
186({ \
187 bool _r = !fifo_full((fifo)); \
188 if (_r) { \
189 (fifo)->data[(fifo)->back++] = (i); \
190 (fifo)->back &= (fifo)->mask; \
191 } \
192 _r; \
193})
194
195#define fifo_pop_front(fifo, i) \
196({ \
197 bool _r = !fifo_empty((fifo)); \
198 if (_r) { \
199 (i) = (fifo)->data[(fifo)->front++]; \
200 (fifo)->front &= (fifo)->mask; \
201 } \
202 _r; \
203})
204
205#define fifo_push_front(fifo, i) \
206({ \
207 bool _r = !fifo_full((fifo)); \
208 if (_r) { \
209 --(fifo)->front; \
210 (fifo)->front &= (fifo)->mask; \
211 (fifo)->data[(fifo)->front] = (i); \
212 } \
213 _r; \
214})
215
216#define fifo_pop_back(fifo, i) \
217({ \
218 bool _r = !fifo_empty((fifo)); \
219 if (_r) { \
220 --(fifo)->back; \
221 (fifo)->back &= (fifo)->mask; \
222 (i) = (fifo)->data[(fifo)->back] \
223 } \
224 _r; \
225})
226
227#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
228#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
229
230#define fifo_swap(l, r) \
231do { \
232 swap((l)->front, (r)->front); \
233 swap((l)->back, (r)->back); \
234 swap((l)->size, (r)->size); \
235 swap((l)->mask, (r)->mask); \
236 swap((l)->data, (r)->data); \
237} while (0)
238
239#define fifo_move(dest, src) \
240do { \
241 typeof(*((dest)->data)) _t; \
242 while (!fifo_full(dest) && \
243 fifo_pop(src, _t)) \
244 fifo_push(dest, _t); \
245} while (0)
246
247/*
248 * Simple array based allocator - preallocates a number of elements and you can
249 * never allocate more than that, also has no locking.
250 *
251 * Handy because if you know you only need a fixed number of elements you don't
252 * have to worry about memory allocation failure, and sometimes a mempool isn't
253 * what you want.
254 *
255 * We treat the free elements as entries in a singly linked list, and the
256 * freelist as a stack - allocating and freeing push and pop off the freelist.
257 */
258
259#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
260 struct { \
261 type *freelist; \
262 type data[size]; \
263 } name
264
265#define array_alloc(array) \
266({ \
267 typeof((array)->freelist) _ret = (array)->freelist; \
268 \
269 if (_ret) \
270 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
271 \
272 _ret; \
273})
274
275#define array_free(array, ptr) \
276do { \
277 typeof((array)->freelist) _ptr = ptr; \
278 \
279 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
280 (array)->freelist = _ptr; \
281} while (0)
282
283#define array_allocator_init(array) \
284do { \
285 typeof((array)->freelist) _i; \
286 \
287 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
288 (array)->freelist = NULL; \
289 \
290 for (_i = (array)->data; \
291 _i < (array)->data + ARRAY_SIZE((array)->data); \
292 _i++) \
293 array_free(array, _i); \
294} while (0)
295
296#define array_freelist_empty(array) ((array)->freelist == NULL)
297
298#define ANYSINT_MAX(t) \
299 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
300
301int bch_strtoint_h(const char *, int *);
302int bch_strtouint_h(const char *, unsigned int *);
303int bch_strtoll_h(const char *, long long *);
304int bch_strtoull_h(const char *, unsigned long long *);
305
306static inline int bch_strtol_h(const char *cp, long *res)
307{
308#if BITS_PER_LONG == 32
309 return bch_strtoint_h(cp, (int *) res);
310#else
311 return bch_strtoll_h(cp, (long long *) res);
312#endif
313}
314
315static inline int bch_strtoul_h(const char *cp, long *res)
316{
317#if BITS_PER_LONG == 32
318 return bch_strtouint_h(cp, (unsigned int *) res);
319#else
320 return bch_strtoull_h(cp, (unsigned long long *) res);
321#endif
322}
323
324#define strtoi_h(cp, res) \
325 (__builtin_types_compatible_p(typeof(*res), int) \
326 ? bch_strtoint_h(cp, (void *) res) \
327 : __builtin_types_compatible_p(typeof(*res), long) \
328 ? bch_strtol_h(cp, (void *) res) \
329 : __builtin_types_compatible_p(typeof(*res), long long) \
330 ? bch_strtoll_h(cp, (void *) res) \
331 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
332 ? bch_strtouint_h(cp, (void *) res) \
333 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
334 ? bch_strtoul_h(cp, (void *) res) \
335 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
336 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
337
338#define strtoul_safe(cp, var) \
339({ \
340 unsigned long _v; \
341 int _r = kstrtoul(cp, 10, &_v); \
342 if (!_r) \
343 var = _v; \
344 _r; \
345})
346
347#define strtoul_safe_clamp(cp, var, min, max) \
348({ \
349 unsigned long _v; \
350 int _r = kstrtoul(cp, 10, &_v); \
351 if (!_r) \
352 var = clamp_t(typeof(var), _v, min, max); \
353 _r; \
354})
355
356#define snprint(buf, size, var) \
357 snprintf(buf, size, \
358 __builtin_types_compatible_p(typeof(var), int) \
359 ? "%i\n" : \
360 __builtin_types_compatible_p(typeof(var), unsigned) \
361 ? "%u\n" : \
362 __builtin_types_compatible_p(typeof(var), long) \
363 ? "%li\n" : \
364 __builtin_types_compatible_p(typeof(var), unsigned long)\
365 ? "%lu\n" : \
366 __builtin_types_compatible_p(typeof(var), int64_t) \
367 ? "%lli\n" : \
368 __builtin_types_compatible_p(typeof(var), uint64_t) \
369 ? "%llu\n" : \
370 __builtin_types_compatible_p(typeof(var), const char *) \
371 ? "%s\n" : "%i\n", var)
372
373ssize_t bch_hprint(char *buf, int64_t v);
374
375bool bch_is_zero(const char *p, size_t n);
376int bch_parse_uuid(const char *s, char *uuid);
377
378ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
379 size_t selected);
380
381ssize_t bch_read_string_list(const char *buf, const char * const list[]);
382
383struct time_stats {
384 spinlock_t lock;
385 /*
386 * all fields are in nanoseconds, averages are ewmas stored left shifted
387 * by 8
388 */
389 uint64_t max_duration;
390 uint64_t average_duration;
391 uint64_t average_frequency;
392 uint64_t last;
393};
394
395void bch_time_stats_update(struct time_stats *stats, uint64_t time);
396
397static inline unsigned local_clock_us(void)
398{
399 return local_clock() >> 10;
400}
401
402#define NSEC_PER_ns 1L
403#define NSEC_PER_us NSEC_PER_USEC
404#define NSEC_PER_ms NSEC_PER_MSEC
405#define NSEC_PER_sec NSEC_PER_SEC
406
407#define __print_time_stat(stats, name, stat, units) \
408 sysfs_print(name ## _ ## stat ## _ ## units, \
409 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
410
411#define sysfs_print_time_stats(stats, name, \
412 frequency_units, \
413 duration_units) \
414do { \
415 __print_time_stat(stats, name, \
416 average_frequency, frequency_units); \
417 __print_time_stat(stats, name, \
418 average_duration, duration_units); \
419 __print_time_stat(stats, name, \
420 max_duration, duration_units); \
421 \
422 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
423 ? div_s64(local_clock() - (stats)->last, \
424 NSEC_PER_ ## frequency_units) \
425 : -1LL); \
426} while (0)
427
428#define sysfs_time_stats_attribute(name, \
429 frequency_units, \
430 duration_units) \
431read_attribute(name ## _average_frequency_ ## frequency_units); \
432read_attribute(name ## _average_duration_ ## duration_units); \
433read_attribute(name ## _max_duration_ ## duration_units); \
434read_attribute(name ## _last_ ## frequency_units)
435
436#define sysfs_time_stats_attribute_list(name, \
437 frequency_units, \
438 duration_units) \
439&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
440&sysfs_ ## name ## _average_duration_ ## duration_units, \
441&sysfs_ ## name ## _max_duration_ ## duration_units, \
442&sysfs_ ## name ## _last_ ## frequency_units,
443
444#define ewma_add(ewma, val, weight, factor) \
445({ \
446 (ewma) *= (weight) - 1; \
447 (ewma) += (val) << factor; \
448 (ewma) /= (weight); \
449 (ewma) >> factor; \
450})
451
452struct bch_ratelimit {
453 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next;
455
456 /*
457 * Rate at which we want to do work, in units per nanosecond
458 * The units here correspond to the units passed to bch_next_delay()
459 */
460 unsigned rate;
461};
462
463static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
464{
465 d->next = local_clock();
466}
467
468uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
469
470#define __DIV_SAFE(n, d, zero) \
471({ \
472 typeof(n) _n = (n); \
473 typeof(d) _d = (d); \
474 _d ? _n / _d : zero; \
475})
476
477#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
478
479#define container_of_or_null(ptr, type, member) \
480({ \
481 typeof(ptr) _ptr = ptr; \
482 _ptr ? container_of(_ptr, type, member) : NULL; \
483})
484
485#define RB_INSERT(root, new, member, cmp) \
486({ \
487 __label__ dup; \
488 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
489 typeof(new) this; \
490 int res, ret = -1; \
491 \
492 while (*n) { \
493 parent = *n; \
494 this = container_of(*n, typeof(*(new)), member); \
495 res = cmp(new, this); \
496 if (!res) \
497 goto dup; \
498 n = res < 0 \
499 ? &(*n)->rb_left \
500 : &(*n)->rb_right; \
501 } \
502 \
503 rb_link_node(&(new)->member, parent, n); \
504 rb_insert_color(&(new)->member, root); \
505 ret = 0; \
506dup: \
507 ret; \
508})
509
510#define RB_SEARCH(root, search, member, cmp) \
511({ \
512 struct rb_node *n = (root)->rb_node; \
513 typeof(&(search)) this, ret = NULL; \
514 int res; \
515 \
516 while (n) { \
517 this = container_of(n, typeof(search), member); \
518 res = cmp(&(search), this); \
519 if (!res) { \
520 ret = this; \
521 break; \
522 } \
523 n = res < 0 \
524 ? n->rb_left \
525 : n->rb_right; \
526 } \
527 ret; \
528})
529
530#define RB_GREATER(root, search, member, cmp) \
531({ \
532 struct rb_node *n = (root)->rb_node; \
533 typeof(&(search)) this, ret = NULL; \
534 int res; \
535 \
536 while (n) { \
537 this = container_of(n, typeof(search), member); \
538 res = cmp(&(search), this); \
539 if (res < 0) { \
540 ret = this; \
541 n = n->rb_left; \
542 } else \
543 n = n->rb_right; \
544 } \
545 ret; \
546})
547
548#define RB_FIRST(root, type, member) \
549 container_of_or_null(rb_first(root), type, member)
550
551#define RB_LAST(root, type, member) \
552 container_of_or_null(rb_last(root), type, member)
553
554#define RB_NEXT(ptr, member) \
555 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
556
557#define RB_PREV(ptr, member) \
558 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
559
560/* Does linear interpolation between powers of two */
561static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
562{
563 unsigned fract = x & ~(~0 << fract_bits);
564
565 x >>= fract_bits;
566 x = 1 << x;
567 x += (x * fract) >> fract_bits;
568
569 return x;
570}
571
572void bch_bio_map(struct bio *bio, void *base);
573
574static inline sector_t bdev_sectors(struct block_device *bdev)
575{
576 return bdev->bd_inode->i_size >> 9;
577}
578
579#define closure_bio_submit(bio, cl, dev) \
580do { \
581 closure_get(cl); \
582 bch_generic_make_request(bio, &(dev)->bio_split_hook); \
583} while (0)
584
585uint64_t bch_crc64_update(uint64_t, const void *, size_t);
586uint64_t bch_crc64(const void *, size_t);
587
588#endif /* _BCACHE_UTIL_H */