Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2
3#ifndef _BCACHE_UTIL_H
4#define _BCACHE_UTIL_H
5
6#include <linux/blkdev.h>
7#include <linux/closure.h>
8#include <linux/errno.h>
9#include <linux/kernel.h>
10#include <linux/sched/clock.h>
11#include <linux/llist.h>
12#include <linux/ratelimit.h>
13#include <linux/vmalloc.h>
14#include <linux/workqueue.h>
15#include <linux/crc64.h>
16
17struct closure;
18
19#ifdef CONFIG_BCACHE_DEBUG
20
21#define EBUG_ON(cond) BUG_ON(cond)
22#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
23#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
24
25#else /* DEBUG */
26
27#define EBUG_ON(cond) do { if (cond) do {} while (0); } while (0)
28#define atomic_dec_bug(v) atomic_dec(v)
29#define atomic_inc_bug(v, i) atomic_inc(v)
30
31#endif
32
33#define DECLARE_HEAP(type, name) \
34 struct { \
35 size_t size, used; \
36 type *data; \
37 } name
38
39#define init_heap(heap, _size, gfp) \
40({ \
41 size_t _bytes; \
42 (heap)->used = 0; \
43 (heap)->size = (_size); \
44 _bytes = (heap)->size * sizeof(*(heap)->data); \
45 (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
46 (heap)->data; \
47})
48
49#define free_heap(heap) \
50do { \
51 kvfree((heap)->data); \
52 (heap)->data = NULL; \
53} while (0)
54
55#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
56
57#define heap_sift(h, i, cmp) \
58do { \
59 size_t _r, _j = i; \
60 \
61 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
62 _r = _j * 2 + 1; \
63 if (_r + 1 < (h)->used && \
64 cmp((h)->data[_r], (h)->data[_r + 1])) \
65 _r++; \
66 \
67 if (cmp((h)->data[_r], (h)->data[_j])) \
68 break; \
69 heap_swap(h, _r, _j); \
70 } \
71} while (0)
72
73#define heap_sift_down(h, i, cmp) \
74do { \
75 while (i) { \
76 size_t p = (i - 1) / 2; \
77 if (cmp((h)->data[i], (h)->data[p])) \
78 break; \
79 heap_swap(h, i, p); \
80 i = p; \
81 } \
82} while (0)
83
84#define heap_add(h, d, cmp) \
85({ \
86 bool _r = !heap_full(h); \
87 if (_r) { \
88 size_t _i = (h)->used++; \
89 (h)->data[_i] = d; \
90 \
91 heap_sift_down(h, _i, cmp); \
92 heap_sift(h, _i, cmp); \
93 } \
94 _r; \
95})
96
97#define heap_pop(h, d, cmp) \
98({ \
99 bool _r = (h)->used; \
100 if (_r) { \
101 (d) = (h)->data[0]; \
102 (h)->used--; \
103 heap_swap(h, 0, (h)->used); \
104 heap_sift(h, 0, cmp); \
105 } \
106 _r; \
107})
108
109#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
110
111#define heap_full(h) ((h)->used == (h)->size)
112
113#define DECLARE_FIFO(type, name) \
114 struct { \
115 size_t front, back, size, mask; \
116 type *data; \
117 } name
118
119#define fifo_for_each(c, fifo, iter) \
120 for (iter = (fifo)->front; \
121 c = (fifo)->data[iter], iter != (fifo)->back; \
122 iter = (iter + 1) & (fifo)->mask)
123
124#define __init_fifo(fifo, gfp) \
125({ \
126 size_t _allocated_size, _bytes; \
127 BUG_ON(!(fifo)->size); \
128 \
129 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
130 _bytes = _allocated_size * sizeof(*(fifo)->data); \
131 \
132 (fifo)->mask = _allocated_size - 1; \
133 (fifo)->front = (fifo)->back = 0; \
134 \
135 (fifo)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
136 (fifo)->data; \
137})
138
139#define init_fifo_exact(fifo, _size, gfp) \
140({ \
141 (fifo)->size = (_size); \
142 __init_fifo(fifo, gfp); \
143})
144
145#define init_fifo(fifo, _size, gfp) \
146({ \
147 (fifo)->size = (_size); \
148 if ((fifo)->size > 4) \
149 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
150 __init_fifo(fifo, gfp); \
151})
152
153#define free_fifo(fifo) \
154do { \
155 kvfree((fifo)->data); \
156 (fifo)->data = NULL; \
157} while (0)
158
159#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
160#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
161
162#define fifo_empty(fifo) (!fifo_used(fifo))
163#define fifo_full(fifo) (!fifo_free(fifo))
164
165#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
166#define fifo_back(fifo) \
167 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
168
169#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
170
171#define fifo_push_back(fifo, i) \
172({ \
173 bool _r = !fifo_full((fifo)); \
174 if (_r) { \
175 (fifo)->data[(fifo)->back++] = (i); \
176 (fifo)->back &= (fifo)->mask; \
177 } \
178 _r; \
179})
180
181#define fifo_pop_front(fifo, i) \
182({ \
183 bool _r = !fifo_empty((fifo)); \
184 if (_r) { \
185 (i) = (fifo)->data[(fifo)->front++]; \
186 (fifo)->front &= (fifo)->mask; \
187 } \
188 _r; \
189})
190
191#define fifo_push_front(fifo, i) \
192({ \
193 bool _r = !fifo_full((fifo)); \
194 if (_r) { \
195 --(fifo)->front; \
196 (fifo)->front &= (fifo)->mask; \
197 (fifo)->data[(fifo)->front] = (i); \
198 } \
199 _r; \
200})
201
202#define fifo_pop_back(fifo, i) \
203({ \
204 bool _r = !fifo_empty((fifo)); \
205 if (_r) { \
206 --(fifo)->back; \
207 (fifo)->back &= (fifo)->mask; \
208 (i) = (fifo)->data[(fifo)->back] \
209 } \
210 _r; \
211})
212
213#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
214#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
215
216#define fifo_swap(l, r) \
217do { \
218 swap((l)->front, (r)->front); \
219 swap((l)->back, (r)->back); \
220 swap((l)->size, (r)->size); \
221 swap((l)->mask, (r)->mask); \
222 swap((l)->data, (r)->data); \
223} while (0)
224
225#define fifo_move(dest, src) \
226do { \
227 typeof(*((dest)->data)) _t; \
228 while (!fifo_full(dest) && \
229 fifo_pop(src, _t)) \
230 fifo_push(dest, _t); \
231} while (0)
232
233/*
234 * Simple array based allocator - preallocates a number of elements and you can
235 * never allocate more than that, also has no locking.
236 *
237 * Handy because if you know you only need a fixed number of elements you don't
238 * have to worry about memory allocation failure, and sometimes a mempool isn't
239 * what you want.
240 *
241 * We treat the free elements as entries in a singly linked list, and the
242 * freelist as a stack - allocating and freeing push and pop off the freelist.
243 */
244
245#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
246 struct { \
247 type *freelist; \
248 type data[size]; \
249 } name
250
251#define array_alloc(array) \
252({ \
253 typeof((array)->freelist) _ret = (array)->freelist; \
254 \
255 if (_ret) \
256 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
257 \
258 _ret; \
259})
260
261#define array_free(array, ptr) \
262do { \
263 typeof((array)->freelist) _ptr = ptr; \
264 \
265 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
266 (array)->freelist = _ptr; \
267} while (0)
268
269#define array_allocator_init(array) \
270do { \
271 typeof((array)->freelist) _i; \
272 \
273 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
274 (array)->freelist = NULL; \
275 \
276 for (_i = (array)->data; \
277 _i < (array)->data + ARRAY_SIZE((array)->data); \
278 _i++) \
279 array_free(array, _i); \
280} while (0)
281
282#define array_freelist_empty(array) ((array)->freelist == NULL)
283
284#define ANYSINT_MAX(t) \
285 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
286
287int bch_strtoint_h(const char *cp, int *res);
288int bch_strtouint_h(const char *cp, unsigned int *res);
289int bch_strtoll_h(const char *cp, long long *res);
290int bch_strtoull_h(const char *cp, unsigned long long *res);
291
292static inline int bch_strtol_h(const char *cp, long *res)
293{
294#if BITS_PER_LONG == 32
295 return bch_strtoint_h(cp, (int *) res);
296#else
297 return bch_strtoll_h(cp, (long long *) res);
298#endif
299}
300
301static inline int bch_strtoul_h(const char *cp, long *res)
302{
303#if BITS_PER_LONG == 32
304 return bch_strtouint_h(cp, (unsigned int *) res);
305#else
306 return bch_strtoull_h(cp, (unsigned long long *) res);
307#endif
308}
309
310#define strtoi_h(cp, res) \
311 (__builtin_types_compatible_p(typeof(*res), int) \
312 ? bch_strtoint_h(cp, (void *) res) \
313 : __builtin_types_compatible_p(typeof(*res), long) \
314 ? bch_strtol_h(cp, (void *) res) \
315 : __builtin_types_compatible_p(typeof(*res), long long) \
316 ? bch_strtoll_h(cp, (void *) res) \
317 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
318 ? bch_strtouint_h(cp, (void *) res) \
319 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
320 ? bch_strtoul_h(cp, (void *) res) \
321 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
322 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
323
324#define strtoul_safe(cp, var) \
325({ \
326 unsigned long _v; \
327 int _r = kstrtoul(cp, 10, &_v); \
328 if (!_r) \
329 var = _v; \
330 _r; \
331})
332
333#define strtoul_safe_clamp(cp, var, min, max) \
334({ \
335 unsigned long _v; \
336 int _r = kstrtoul(cp, 10, &_v); \
337 if (!_r) \
338 var = clamp_t(typeof(var), _v, min, max); \
339 _r; \
340})
341
342ssize_t bch_hprint(char *buf, int64_t v);
343
344bool bch_is_zero(const char *p, size_t n);
345int bch_parse_uuid(const char *s, char *uuid);
346
347struct time_stats {
348 spinlock_t lock;
349 /*
350 * all fields are in nanoseconds, averages are ewmas stored left shifted
351 * by 8
352 */
353 uint64_t max_duration;
354 uint64_t average_duration;
355 uint64_t average_frequency;
356 uint64_t last;
357};
358
359void bch_time_stats_update(struct time_stats *stats, uint64_t time);
360
361static inline unsigned int local_clock_us(void)
362{
363 return local_clock() >> 10;
364}
365
366#define NSEC_PER_ns 1L
367#define NSEC_PER_us NSEC_PER_USEC
368#define NSEC_PER_ms NSEC_PER_MSEC
369#define NSEC_PER_sec NSEC_PER_SEC
370
371#define __print_time_stat(stats, name, stat, units) \
372 sysfs_print(name ## _ ## stat ## _ ## units, \
373 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
374
375#define sysfs_print_time_stats(stats, name, \
376 frequency_units, \
377 duration_units) \
378do { \
379 __print_time_stat(stats, name, \
380 average_frequency, frequency_units); \
381 __print_time_stat(stats, name, \
382 average_duration, duration_units); \
383 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
384 div_u64((stats)->max_duration, \
385 NSEC_PER_ ## duration_units)); \
386 \
387 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
388 ? div_s64(local_clock() - (stats)->last, \
389 NSEC_PER_ ## frequency_units) \
390 : -1LL); \
391} while (0)
392
393#define sysfs_time_stats_attribute(name, \
394 frequency_units, \
395 duration_units) \
396read_attribute(name ## _average_frequency_ ## frequency_units); \
397read_attribute(name ## _average_duration_ ## duration_units); \
398read_attribute(name ## _max_duration_ ## duration_units); \
399read_attribute(name ## _last_ ## frequency_units)
400
401#define sysfs_time_stats_attribute_list(name, \
402 frequency_units, \
403 duration_units) \
404&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
405&sysfs_ ## name ## _average_duration_ ## duration_units, \
406&sysfs_ ## name ## _max_duration_ ## duration_units, \
407&sysfs_ ## name ## _last_ ## frequency_units,
408
409#define ewma_add(ewma, val, weight, factor) \
410({ \
411 (ewma) *= (weight) - 1; \
412 (ewma) += (val) << factor; \
413 (ewma) /= (weight); \
414 (ewma) >> factor; \
415})
416
417struct bch_ratelimit {
418 /* Next time we want to do some work, in nanoseconds */
419 uint64_t next;
420
421 /*
422 * Rate at which we want to do work, in units per second
423 * The units here correspond to the units passed to bch_next_delay()
424 */
425 atomic_long_t rate;
426};
427
428static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
429{
430 d->next = local_clock();
431}
432
433uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
434
435#define __DIV_SAFE(n, d, zero) \
436({ \
437 typeof(n) _n = (n); \
438 typeof(d) _d = (d); \
439 _d ? _n / _d : zero; \
440})
441
442#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
443
444#define container_of_or_null(ptr, type, member) \
445({ \
446 typeof(ptr) _ptr = ptr; \
447 _ptr ? container_of(_ptr, type, member) : NULL; \
448})
449
450#define RB_INSERT(root, new, member, cmp) \
451({ \
452 __label__ dup; \
453 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
454 typeof(new) this; \
455 int res, ret = -1; \
456 \
457 while (*n) { \
458 parent = *n; \
459 this = container_of(*n, typeof(*(new)), member); \
460 res = cmp(new, this); \
461 if (!res) \
462 goto dup; \
463 n = res < 0 \
464 ? &(*n)->rb_left \
465 : &(*n)->rb_right; \
466 } \
467 \
468 rb_link_node(&(new)->member, parent, n); \
469 rb_insert_color(&(new)->member, root); \
470 ret = 0; \
471dup: \
472 ret; \
473})
474
475#define RB_SEARCH(root, search, member, cmp) \
476({ \
477 struct rb_node *n = (root)->rb_node; \
478 typeof(&(search)) this, ret = NULL; \
479 int res; \
480 \
481 while (n) { \
482 this = container_of(n, typeof(search), member); \
483 res = cmp(&(search), this); \
484 if (!res) { \
485 ret = this; \
486 break; \
487 } \
488 n = res < 0 \
489 ? n->rb_left \
490 : n->rb_right; \
491 } \
492 ret; \
493})
494
495#define RB_GREATER(root, search, member, cmp) \
496({ \
497 struct rb_node *n = (root)->rb_node; \
498 typeof(&(search)) this, ret = NULL; \
499 int res; \
500 \
501 while (n) { \
502 this = container_of(n, typeof(search), member); \
503 res = cmp(&(search), this); \
504 if (res < 0) { \
505 ret = this; \
506 n = n->rb_left; \
507 } else \
508 n = n->rb_right; \
509 } \
510 ret; \
511})
512
513#define RB_FIRST(root, type, member) \
514 container_of_or_null(rb_first(root), type, member)
515
516#define RB_LAST(root, type, member) \
517 container_of_or_null(rb_last(root), type, member)
518
519#define RB_NEXT(ptr, member) \
520 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
521
522#define RB_PREV(ptr, member) \
523 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
524
525static inline uint64_t bch_crc64(const void *p, size_t len)
526{
527 uint64_t crc = 0xffffffffffffffffULL;
528
529 crc = crc64_be(crc, p, len);
530 return crc ^ 0xffffffffffffffffULL;
531}
532
533/*
534 * A stepwise-linear pseudo-exponential. This returns 1 << (x >>
535 * frac_bits), with the less-significant bits filled in by linear
536 * interpolation.
537 *
538 * This can also be interpreted as a floating-point number format,
539 * where the low frac_bits are the mantissa (with implicit leading
540 * 1 bit), and the more significant bits are the exponent.
541 * The return value is 1.mantissa * 2^exponent.
542 *
543 * The way this is used, fract_bits is 6 and the largest possible
544 * input is CONGESTED_MAX-1 = 1023 (exponent 16, mantissa 0x1.fc),
545 * so the maximum output is 0x1fc00.
546 */
547static inline unsigned int fract_exp_two(unsigned int x,
548 unsigned int fract_bits)
549{
550 unsigned int mantissa = 1 << fract_bits; /* Implicit bit */
551
552 mantissa += x & (mantissa - 1);
553 x >>= fract_bits; /* The exponent */
554 /* Largest intermediate value 0x7f0000 */
555 return mantissa << x >> fract_bits;
556}
557
558void bch_bio_map(struct bio *bio, void *base);
559int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
560
561#endif /* _BCACHE_UTIL_H */
1
2#ifndef _BCACHE_UTIL_H
3#define _BCACHE_UTIL_H
4
5#include <linux/blkdev.h>
6#include <linux/errno.h>
7#include <linux/blkdev.h>
8#include <linux/kernel.h>
9#include <linux/llist.h>
10#include <linux/ratelimit.h>
11#include <linux/vmalloc.h>
12#include <linux/workqueue.h>
13
14#include "closure.h"
15
16#define PAGE_SECTORS (PAGE_SIZE / 512)
17
18struct closure;
19
20#ifdef CONFIG_BCACHE_DEBUG
21
22#define EBUG_ON(cond) BUG_ON(cond)
23#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
24#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
25
26#else /* DEBUG */
27
28#define EBUG_ON(cond) do { if (cond); } while (0)
29#define atomic_dec_bug(v) atomic_dec(v)
30#define atomic_inc_bug(v, i) atomic_inc(v)
31
32#endif
33
34#define DECLARE_HEAP(type, name) \
35 struct { \
36 size_t size, used; \
37 type *data; \
38 } name
39
40#define init_heap(heap, _size, gfp) \
41({ \
42 size_t _bytes; \
43 (heap)->used = 0; \
44 (heap)->size = (_size); \
45 _bytes = (heap)->size * sizeof(*(heap)->data); \
46 (heap)->data = NULL; \
47 if (_bytes < KMALLOC_MAX_SIZE) \
48 (heap)->data = kmalloc(_bytes, (gfp)); \
49 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
50 (heap)->data = vmalloc(_bytes); \
51 (heap)->data; \
52})
53
54#define free_heap(heap) \
55do { \
56 kvfree((heap)->data); \
57 (heap)->data = NULL; \
58} while (0)
59
60#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
61
62#define heap_sift(h, i, cmp) \
63do { \
64 size_t _r, _j = i; \
65 \
66 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
67 _r = _j * 2 + 1; \
68 if (_r + 1 < (h)->used && \
69 cmp((h)->data[_r], (h)->data[_r + 1])) \
70 _r++; \
71 \
72 if (cmp((h)->data[_r], (h)->data[_j])) \
73 break; \
74 heap_swap(h, _r, _j); \
75 } \
76} while (0)
77
78#define heap_sift_down(h, i, cmp) \
79do { \
80 while (i) { \
81 size_t p = (i - 1) / 2; \
82 if (cmp((h)->data[i], (h)->data[p])) \
83 break; \
84 heap_swap(h, i, p); \
85 i = p; \
86 } \
87} while (0)
88
89#define heap_add(h, d, cmp) \
90({ \
91 bool _r = !heap_full(h); \
92 if (_r) { \
93 size_t _i = (h)->used++; \
94 (h)->data[_i] = d; \
95 \
96 heap_sift_down(h, _i, cmp); \
97 heap_sift(h, _i, cmp); \
98 } \
99 _r; \
100})
101
102#define heap_pop(h, d, cmp) \
103({ \
104 bool _r = (h)->used; \
105 if (_r) { \
106 (d) = (h)->data[0]; \
107 (h)->used--; \
108 heap_swap(h, 0, (h)->used); \
109 heap_sift(h, 0, cmp); \
110 } \
111 _r; \
112})
113
114#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
115
116#define heap_full(h) ((h)->used == (h)->size)
117
118#define DECLARE_FIFO(type, name) \
119 struct { \
120 size_t front, back, size, mask; \
121 type *data; \
122 } name
123
124#define fifo_for_each(c, fifo, iter) \
125 for (iter = (fifo)->front; \
126 c = (fifo)->data[iter], iter != (fifo)->back; \
127 iter = (iter + 1) & (fifo)->mask)
128
129#define __init_fifo(fifo, gfp) \
130({ \
131 size_t _allocated_size, _bytes; \
132 BUG_ON(!(fifo)->size); \
133 \
134 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
135 _bytes = _allocated_size * sizeof(*(fifo)->data); \
136 \
137 (fifo)->mask = _allocated_size - 1; \
138 (fifo)->front = (fifo)->back = 0; \
139 (fifo)->data = NULL; \
140 \
141 if (_bytes < KMALLOC_MAX_SIZE) \
142 (fifo)->data = kmalloc(_bytes, (gfp)); \
143 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
144 (fifo)->data = vmalloc(_bytes); \
145 (fifo)->data; \
146})
147
148#define init_fifo_exact(fifo, _size, gfp) \
149({ \
150 (fifo)->size = (_size); \
151 __init_fifo(fifo, gfp); \
152})
153
154#define init_fifo(fifo, _size, gfp) \
155({ \
156 (fifo)->size = (_size); \
157 if ((fifo)->size > 4) \
158 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
159 __init_fifo(fifo, gfp); \
160})
161
162#define free_fifo(fifo) \
163do { \
164 kvfree((fifo)->data); \
165 (fifo)->data = NULL; \
166} while (0)
167
168#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
169#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
170
171#define fifo_empty(fifo) (!fifo_used(fifo))
172#define fifo_full(fifo) (!fifo_free(fifo))
173
174#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
175#define fifo_back(fifo) \
176 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
177
178#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
179
180#define fifo_push_back(fifo, i) \
181({ \
182 bool _r = !fifo_full((fifo)); \
183 if (_r) { \
184 (fifo)->data[(fifo)->back++] = (i); \
185 (fifo)->back &= (fifo)->mask; \
186 } \
187 _r; \
188})
189
190#define fifo_pop_front(fifo, i) \
191({ \
192 bool _r = !fifo_empty((fifo)); \
193 if (_r) { \
194 (i) = (fifo)->data[(fifo)->front++]; \
195 (fifo)->front &= (fifo)->mask; \
196 } \
197 _r; \
198})
199
200#define fifo_push_front(fifo, i) \
201({ \
202 bool _r = !fifo_full((fifo)); \
203 if (_r) { \
204 --(fifo)->front; \
205 (fifo)->front &= (fifo)->mask; \
206 (fifo)->data[(fifo)->front] = (i); \
207 } \
208 _r; \
209})
210
211#define fifo_pop_back(fifo, i) \
212({ \
213 bool _r = !fifo_empty((fifo)); \
214 if (_r) { \
215 --(fifo)->back; \
216 (fifo)->back &= (fifo)->mask; \
217 (i) = (fifo)->data[(fifo)->back] \
218 } \
219 _r; \
220})
221
222#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
223#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
224
225#define fifo_swap(l, r) \
226do { \
227 swap((l)->front, (r)->front); \
228 swap((l)->back, (r)->back); \
229 swap((l)->size, (r)->size); \
230 swap((l)->mask, (r)->mask); \
231 swap((l)->data, (r)->data); \
232} while (0)
233
234#define fifo_move(dest, src) \
235do { \
236 typeof(*((dest)->data)) _t; \
237 while (!fifo_full(dest) && \
238 fifo_pop(src, _t)) \
239 fifo_push(dest, _t); \
240} while (0)
241
242/*
243 * Simple array based allocator - preallocates a number of elements and you can
244 * never allocate more than that, also has no locking.
245 *
246 * Handy because if you know you only need a fixed number of elements you don't
247 * have to worry about memory allocation failure, and sometimes a mempool isn't
248 * what you want.
249 *
250 * We treat the free elements as entries in a singly linked list, and the
251 * freelist as a stack - allocating and freeing push and pop off the freelist.
252 */
253
254#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
255 struct { \
256 type *freelist; \
257 type data[size]; \
258 } name
259
260#define array_alloc(array) \
261({ \
262 typeof((array)->freelist) _ret = (array)->freelist; \
263 \
264 if (_ret) \
265 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
266 \
267 _ret; \
268})
269
270#define array_free(array, ptr) \
271do { \
272 typeof((array)->freelist) _ptr = ptr; \
273 \
274 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
275 (array)->freelist = _ptr; \
276} while (0)
277
278#define array_allocator_init(array) \
279do { \
280 typeof((array)->freelist) _i; \
281 \
282 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
283 (array)->freelist = NULL; \
284 \
285 for (_i = (array)->data; \
286 _i < (array)->data + ARRAY_SIZE((array)->data); \
287 _i++) \
288 array_free(array, _i); \
289} while (0)
290
291#define array_freelist_empty(array) ((array)->freelist == NULL)
292
293#define ANYSINT_MAX(t) \
294 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
295
296int bch_strtoint_h(const char *, int *);
297int bch_strtouint_h(const char *, unsigned int *);
298int bch_strtoll_h(const char *, long long *);
299int bch_strtoull_h(const char *, unsigned long long *);
300
301static inline int bch_strtol_h(const char *cp, long *res)
302{
303#if BITS_PER_LONG == 32
304 return bch_strtoint_h(cp, (int *) res);
305#else
306 return bch_strtoll_h(cp, (long long *) res);
307#endif
308}
309
310static inline int bch_strtoul_h(const char *cp, long *res)
311{
312#if BITS_PER_LONG == 32
313 return bch_strtouint_h(cp, (unsigned int *) res);
314#else
315 return bch_strtoull_h(cp, (unsigned long long *) res);
316#endif
317}
318
319#define strtoi_h(cp, res) \
320 (__builtin_types_compatible_p(typeof(*res), int) \
321 ? bch_strtoint_h(cp, (void *) res) \
322 : __builtin_types_compatible_p(typeof(*res), long) \
323 ? bch_strtol_h(cp, (void *) res) \
324 : __builtin_types_compatible_p(typeof(*res), long long) \
325 ? bch_strtoll_h(cp, (void *) res) \
326 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
327 ? bch_strtouint_h(cp, (void *) res) \
328 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
329 ? bch_strtoul_h(cp, (void *) res) \
330 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
331 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
332
333#define strtoul_safe(cp, var) \
334({ \
335 unsigned long _v; \
336 int _r = kstrtoul(cp, 10, &_v); \
337 if (!_r) \
338 var = _v; \
339 _r; \
340})
341
342#define strtoul_safe_clamp(cp, var, min, max) \
343({ \
344 unsigned long _v; \
345 int _r = kstrtoul(cp, 10, &_v); \
346 if (!_r) \
347 var = clamp_t(typeof(var), _v, min, max); \
348 _r; \
349})
350
351#define snprint(buf, size, var) \
352 snprintf(buf, size, \
353 __builtin_types_compatible_p(typeof(var), int) \
354 ? "%i\n" : \
355 __builtin_types_compatible_p(typeof(var), unsigned) \
356 ? "%u\n" : \
357 __builtin_types_compatible_p(typeof(var), long) \
358 ? "%li\n" : \
359 __builtin_types_compatible_p(typeof(var), unsigned long)\
360 ? "%lu\n" : \
361 __builtin_types_compatible_p(typeof(var), int64_t) \
362 ? "%lli\n" : \
363 __builtin_types_compatible_p(typeof(var), uint64_t) \
364 ? "%llu\n" : \
365 __builtin_types_compatible_p(typeof(var), const char *) \
366 ? "%s\n" : "%i\n", var)
367
368ssize_t bch_hprint(char *buf, int64_t v);
369
370bool bch_is_zero(const char *p, size_t n);
371int bch_parse_uuid(const char *s, char *uuid);
372
373ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
374 size_t selected);
375
376ssize_t bch_read_string_list(const char *buf, const char * const list[]);
377
378struct time_stats {
379 spinlock_t lock;
380 /*
381 * all fields are in nanoseconds, averages are ewmas stored left shifted
382 * by 8
383 */
384 uint64_t max_duration;
385 uint64_t average_duration;
386 uint64_t average_frequency;
387 uint64_t last;
388};
389
390void bch_time_stats_update(struct time_stats *stats, uint64_t time);
391
392static inline unsigned local_clock_us(void)
393{
394 return local_clock() >> 10;
395}
396
397#define NSEC_PER_ns 1L
398#define NSEC_PER_us NSEC_PER_USEC
399#define NSEC_PER_ms NSEC_PER_MSEC
400#define NSEC_PER_sec NSEC_PER_SEC
401
402#define __print_time_stat(stats, name, stat, units) \
403 sysfs_print(name ## _ ## stat ## _ ## units, \
404 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
405
406#define sysfs_print_time_stats(stats, name, \
407 frequency_units, \
408 duration_units) \
409do { \
410 __print_time_stat(stats, name, \
411 average_frequency, frequency_units); \
412 __print_time_stat(stats, name, \
413 average_duration, duration_units); \
414 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
415 div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
416 \
417 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
418 ? div_s64(local_clock() - (stats)->last, \
419 NSEC_PER_ ## frequency_units) \
420 : -1LL); \
421} while (0)
422
423#define sysfs_time_stats_attribute(name, \
424 frequency_units, \
425 duration_units) \
426read_attribute(name ## _average_frequency_ ## frequency_units); \
427read_attribute(name ## _average_duration_ ## duration_units); \
428read_attribute(name ## _max_duration_ ## duration_units); \
429read_attribute(name ## _last_ ## frequency_units)
430
431#define sysfs_time_stats_attribute_list(name, \
432 frequency_units, \
433 duration_units) \
434&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
435&sysfs_ ## name ## _average_duration_ ## duration_units, \
436&sysfs_ ## name ## _max_duration_ ## duration_units, \
437&sysfs_ ## name ## _last_ ## frequency_units,
438
439#define ewma_add(ewma, val, weight, factor) \
440({ \
441 (ewma) *= (weight) - 1; \
442 (ewma) += (val) << factor; \
443 (ewma) /= (weight); \
444 (ewma) >> factor; \
445})
446
447struct bch_ratelimit {
448 /* Next time we want to do some work, in nanoseconds */
449 uint64_t next;
450
451 /*
452 * Rate at which we want to do work, in units per nanosecond
453 * The units here correspond to the units passed to bch_next_delay()
454 */
455 unsigned rate;
456};
457
458static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{
460 d->next = local_clock();
461}
462
463uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464
465#define __DIV_SAFE(n, d, zero) \
466({ \
467 typeof(n) _n = (n); \
468 typeof(d) _d = (d); \
469 _d ? _n / _d : zero; \
470})
471
472#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
473
474#define container_of_or_null(ptr, type, member) \
475({ \
476 typeof(ptr) _ptr = ptr; \
477 _ptr ? container_of(_ptr, type, member) : NULL; \
478})
479
480#define RB_INSERT(root, new, member, cmp) \
481({ \
482 __label__ dup; \
483 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
484 typeof(new) this; \
485 int res, ret = -1; \
486 \
487 while (*n) { \
488 parent = *n; \
489 this = container_of(*n, typeof(*(new)), member); \
490 res = cmp(new, this); \
491 if (!res) \
492 goto dup; \
493 n = res < 0 \
494 ? &(*n)->rb_left \
495 : &(*n)->rb_right; \
496 } \
497 \
498 rb_link_node(&(new)->member, parent, n); \
499 rb_insert_color(&(new)->member, root); \
500 ret = 0; \
501dup: \
502 ret; \
503})
504
505#define RB_SEARCH(root, search, member, cmp) \
506({ \
507 struct rb_node *n = (root)->rb_node; \
508 typeof(&(search)) this, ret = NULL; \
509 int res; \
510 \
511 while (n) { \
512 this = container_of(n, typeof(search), member); \
513 res = cmp(&(search), this); \
514 if (!res) { \
515 ret = this; \
516 break; \
517 } \
518 n = res < 0 \
519 ? n->rb_left \
520 : n->rb_right; \
521 } \
522 ret; \
523})
524
525#define RB_GREATER(root, search, member, cmp) \
526({ \
527 struct rb_node *n = (root)->rb_node; \
528 typeof(&(search)) this, ret = NULL; \
529 int res; \
530 \
531 while (n) { \
532 this = container_of(n, typeof(search), member); \
533 res = cmp(&(search), this); \
534 if (res < 0) { \
535 ret = this; \
536 n = n->rb_left; \
537 } else \
538 n = n->rb_right; \
539 } \
540 ret; \
541})
542
543#define RB_FIRST(root, type, member) \
544 container_of_or_null(rb_first(root), type, member)
545
546#define RB_LAST(root, type, member) \
547 container_of_or_null(rb_last(root), type, member)
548
549#define RB_NEXT(ptr, member) \
550 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
551
552#define RB_PREV(ptr, member) \
553 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
554
555/* Does linear interpolation between powers of two */
556static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
557{
558 unsigned fract = x & ~(~0 << fract_bits);
559
560 x >>= fract_bits;
561 x = 1 << x;
562 x += (x * fract) >> fract_bits;
563
564 return x;
565}
566
567void bch_bio_map(struct bio *bio, void *base);
568
569static inline sector_t bdev_sectors(struct block_device *bdev)
570{
571 return bdev->bd_inode->i_size >> 9;
572}
573
574#define closure_bio_submit(bio, cl) \
575do { \
576 closure_get(cl); \
577 generic_make_request(bio); \
578} while (0)
579
580uint64_t bch_crc64_update(uint64_t, const void *, size_t);
581uint64_t bch_crc64(const void *, size_t);
582
583#endif /* _BCACHE_UTIL_H */