Loading...
1
2#ifndef _BCACHE_UTIL_H
3#define _BCACHE_UTIL_H
4
5#include <linux/blkdev.h>
6#include <linux/errno.h>
7#include <linux/kernel.h>
8#include <linux/llist.h>
9#include <linux/ratelimit.h>
10#include <linux/vmalloc.h>
11#include <linux/workqueue.h>
12
13#include "closure.h"
14
15#define PAGE_SECTORS (PAGE_SIZE / 512)
16
17struct closure;
18
19#ifdef CONFIG_BCACHE_DEBUG
20
21#define EBUG_ON(cond) BUG_ON(cond)
22#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
23#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
24
25#else /* DEBUG */
26
27#define EBUG_ON(cond) do { if (cond); } while (0)
28#define atomic_dec_bug(v) atomic_dec(v)
29#define atomic_inc_bug(v, i) atomic_inc(v)
30
31#endif
32
33#define DECLARE_HEAP(type, name) \
34 struct { \
35 size_t size, used; \
36 type *data; \
37 } name
38
39#define init_heap(heap, _size, gfp) \
40({ \
41 size_t _bytes; \
42 (heap)->used = 0; \
43 (heap)->size = (_size); \
44 _bytes = (heap)->size * sizeof(*(heap)->data); \
45 (heap)->data = NULL; \
46 if (_bytes < KMALLOC_MAX_SIZE) \
47 (heap)->data = kmalloc(_bytes, (gfp)); \
48 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
49 (heap)->data = vmalloc(_bytes); \
50 (heap)->data; \
51})
52
53#define free_heap(heap) \
54do { \
55 if (is_vmalloc_addr((heap)->data)) \
56 vfree((heap)->data); \
57 else \
58 kfree((heap)->data); \
59 (heap)->data = NULL; \
60} while (0)
61
62#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
63
64#define heap_sift(h, i, cmp) \
65do { \
66 size_t _r, _j = i; \
67 \
68 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
69 _r = _j * 2 + 1; \
70 if (_r + 1 < (h)->used && \
71 cmp((h)->data[_r], (h)->data[_r + 1])) \
72 _r++; \
73 \
74 if (cmp((h)->data[_r], (h)->data[_j])) \
75 break; \
76 heap_swap(h, _r, _j); \
77 } \
78} while (0)
79
80#define heap_sift_down(h, i, cmp) \
81do { \
82 while (i) { \
83 size_t p = (i - 1) / 2; \
84 if (cmp((h)->data[i], (h)->data[p])) \
85 break; \
86 heap_swap(h, i, p); \
87 i = p; \
88 } \
89} while (0)
90
91#define heap_add(h, d, cmp) \
92({ \
93 bool _r = !heap_full(h); \
94 if (_r) { \
95 size_t _i = (h)->used++; \
96 (h)->data[_i] = d; \
97 \
98 heap_sift_down(h, _i, cmp); \
99 heap_sift(h, _i, cmp); \
100 } \
101 _r; \
102})
103
104#define heap_pop(h, d, cmp) \
105({ \
106 bool _r = (h)->used; \
107 if (_r) { \
108 (d) = (h)->data[0]; \
109 (h)->used--; \
110 heap_swap(h, 0, (h)->used); \
111 heap_sift(h, 0, cmp); \
112 } \
113 _r; \
114})
115
116#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
117
118#define heap_full(h) ((h)->used == (h)->size)
119
120#define DECLARE_FIFO(type, name) \
121 struct { \
122 size_t front, back, size, mask; \
123 type *data; \
124 } name
125
126#define fifo_for_each(c, fifo, iter) \
127 for (iter = (fifo)->front; \
128 c = (fifo)->data[iter], iter != (fifo)->back; \
129 iter = (iter + 1) & (fifo)->mask)
130
131#define __init_fifo(fifo, gfp) \
132({ \
133 size_t _allocated_size, _bytes; \
134 BUG_ON(!(fifo)->size); \
135 \
136 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
137 _bytes = _allocated_size * sizeof(*(fifo)->data); \
138 \
139 (fifo)->mask = _allocated_size - 1; \
140 (fifo)->front = (fifo)->back = 0; \
141 (fifo)->data = NULL; \
142 \
143 if (_bytes < KMALLOC_MAX_SIZE) \
144 (fifo)->data = kmalloc(_bytes, (gfp)); \
145 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
146 (fifo)->data = vmalloc(_bytes); \
147 (fifo)->data; \
148})
149
150#define init_fifo_exact(fifo, _size, gfp) \
151({ \
152 (fifo)->size = (_size); \
153 __init_fifo(fifo, gfp); \
154})
155
156#define init_fifo(fifo, _size, gfp) \
157({ \
158 (fifo)->size = (_size); \
159 if ((fifo)->size > 4) \
160 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
161 __init_fifo(fifo, gfp); \
162})
163
164#define free_fifo(fifo) \
165do { \
166 if (is_vmalloc_addr((fifo)->data)) \
167 vfree((fifo)->data); \
168 else \
169 kfree((fifo)->data); \
170 (fifo)->data = NULL; \
171} while (0)
172
173#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
174#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
175
176#define fifo_empty(fifo) (!fifo_used(fifo))
177#define fifo_full(fifo) (!fifo_free(fifo))
178
179#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
180#define fifo_back(fifo) \
181 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
182
183#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
184
185#define fifo_push_back(fifo, i) \
186({ \
187 bool _r = !fifo_full((fifo)); \
188 if (_r) { \
189 (fifo)->data[(fifo)->back++] = (i); \
190 (fifo)->back &= (fifo)->mask; \
191 } \
192 _r; \
193})
194
195#define fifo_pop_front(fifo, i) \
196({ \
197 bool _r = !fifo_empty((fifo)); \
198 if (_r) { \
199 (i) = (fifo)->data[(fifo)->front++]; \
200 (fifo)->front &= (fifo)->mask; \
201 } \
202 _r; \
203})
204
205#define fifo_push_front(fifo, i) \
206({ \
207 bool _r = !fifo_full((fifo)); \
208 if (_r) { \
209 --(fifo)->front; \
210 (fifo)->front &= (fifo)->mask; \
211 (fifo)->data[(fifo)->front] = (i); \
212 } \
213 _r; \
214})
215
216#define fifo_pop_back(fifo, i) \
217({ \
218 bool _r = !fifo_empty((fifo)); \
219 if (_r) { \
220 --(fifo)->back; \
221 (fifo)->back &= (fifo)->mask; \
222 (i) = (fifo)->data[(fifo)->back] \
223 } \
224 _r; \
225})
226
227#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
228#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
229
230#define fifo_swap(l, r) \
231do { \
232 swap((l)->front, (r)->front); \
233 swap((l)->back, (r)->back); \
234 swap((l)->size, (r)->size); \
235 swap((l)->mask, (r)->mask); \
236 swap((l)->data, (r)->data); \
237} while (0)
238
239#define fifo_move(dest, src) \
240do { \
241 typeof(*((dest)->data)) _t; \
242 while (!fifo_full(dest) && \
243 fifo_pop(src, _t)) \
244 fifo_push(dest, _t); \
245} while (0)
246
247/*
248 * Simple array based allocator - preallocates a number of elements and you can
249 * never allocate more than that, also has no locking.
250 *
251 * Handy because if you know you only need a fixed number of elements you don't
252 * have to worry about memory allocation failure, and sometimes a mempool isn't
253 * what you want.
254 *
255 * We treat the free elements as entries in a singly linked list, and the
256 * freelist as a stack - allocating and freeing push and pop off the freelist.
257 */
258
259#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
260 struct { \
261 type *freelist; \
262 type data[size]; \
263 } name
264
265#define array_alloc(array) \
266({ \
267 typeof((array)->freelist) _ret = (array)->freelist; \
268 \
269 if (_ret) \
270 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
271 \
272 _ret; \
273})
274
275#define array_free(array, ptr) \
276do { \
277 typeof((array)->freelist) _ptr = ptr; \
278 \
279 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
280 (array)->freelist = _ptr; \
281} while (0)
282
283#define array_allocator_init(array) \
284do { \
285 typeof((array)->freelist) _i; \
286 \
287 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
288 (array)->freelist = NULL; \
289 \
290 for (_i = (array)->data; \
291 _i < (array)->data + ARRAY_SIZE((array)->data); \
292 _i++) \
293 array_free(array, _i); \
294} while (0)
295
296#define array_freelist_empty(array) ((array)->freelist == NULL)
297
298#define ANYSINT_MAX(t) \
299 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
300
301int bch_strtoint_h(const char *, int *);
302int bch_strtouint_h(const char *, unsigned int *);
303int bch_strtoll_h(const char *, long long *);
304int bch_strtoull_h(const char *, unsigned long long *);
305
306static inline int bch_strtol_h(const char *cp, long *res)
307{
308#if BITS_PER_LONG == 32
309 return bch_strtoint_h(cp, (int *) res);
310#else
311 return bch_strtoll_h(cp, (long long *) res);
312#endif
313}
314
315static inline int bch_strtoul_h(const char *cp, long *res)
316{
317#if BITS_PER_LONG == 32
318 return bch_strtouint_h(cp, (unsigned int *) res);
319#else
320 return bch_strtoull_h(cp, (unsigned long long *) res);
321#endif
322}
323
324#define strtoi_h(cp, res) \
325 (__builtin_types_compatible_p(typeof(*res), int) \
326 ? bch_strtoint_h(cp, (void *) res) \
327 : __builtin_types_compatible_p(typeof(*res), long) \
328 ? bch_strtol_h(cp, (void *) res) \
329 : __builtin_types_compatible_p(typeof(*res), long long) \
330 ? bch_strtoll_h(cp, (void *) res) \
331 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
332 ? bch_strtouint_h(cp, (void *) res) \
333 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
334 ? bch_strtoul_h(cp, (void *) res) \
335 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
336 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
337
338#define strtoul_safe(cp, var) \
339({ \
340 unsigned long _v; \
341 int _r = kstrtoul(cp, 10, &_v); \
342 if (!_r) \
343 var = _v; \
344 _r; \
345})
346
347#define strtoul_safe_clamp(cp, var, min, max) \
348({ \
349 unsigned long _v; \
350 int _r = kstrtoul(cp, 10, &_v); \
351 if (!_r) \
352 var = clamp_t(typeof(var), _v, min, max); \
353 _r; \
354})
355
356#define snprint(buf, size, var) \
357 snprintf(buf, size, \
358 __builtin_types_compatible_p(typeof(var), int) \
359 ? "%i\n" : \
360 __builtin_types_compatible_p(typeof(var), unsigned) \
361 ? "%u\n" : \
362 __builtin_types_compatible_p(typeof(var), long) \
363 ? "%li\n" : \
364 __builtin_types_compatible_p(typeof(var), unsigned long)\
365 ? "%lu\n" : \
366 __builtin_types_compatible_p(typeof(var), int64_t) \
367 ? "%lli\n" : \
368 __builtin_types_compatible_p(typeof(var), uint64_t) \
369 ? "%llu\n" : \
370 __builtin_types_compatible_p(typeof(var), const char *) \
371 ? "%s\n" : "%i\n", var)
372
373ssize_t bch_hprint(char *buf, int64_t v);
374
375bool bch_is_zero(const char *p, size_t n);
376int bch_parse_uuid(const char *s, char *uuid);
377
378ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
379 size_t selected);
380
381ssize_t bch_read_string_list(const char *buf, const char * const list[]);
382
383struct time_stats {
384 spinlock_t lock;
385 /*
386 * all fields are in nanoseconds, averages are ewmas stored left shifted
387 * by 8
388 */
389 uint64_t max_duration;
390 uint64_t average_duration;
391 uint64_t average_frequency;
392 uint64_t last;
393};
394
395void bch_time_stats_update(struct time_stats *stats, uint64_t time);
396
397static inline unsigned local_clock_us(void)
398{
399 return local_clock() >> 10;
400}
401
402#define NSEC_PER_ns 1L
403#define NSEC_PER_us NSEC_PER_USEC
404#define NSEC_PER_ms NSEC_PER_MSEC
405#define NSEC_PER_sec NSEC_PER_SEC
406
407#define __print_time_stat(stats, name, stat, units) \
408 sysfs_print(name ## _ ## stat ## _ ## units, \
409 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
410
411#define sysfs_print_time_stats(stats, name, \
412 frequency_units, \
413 duration_units) \
414do { \
415 __print_time_stat(stats, name, \
416 average_frequency, frequency_units); \
417 __print_time_stat(stats, name, \
418 average_duration, duration_units); \
419 __print_time_stat(stats, name, \
420 max_duration, duration_units); \
421 \
422 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
423 ? div_s64(local_clock() - (stats)->last, \
424 NSEC_PER_ ## frequency_units) \
425 : -1LL); \
426} while (0)
427
428#define sysfs_time_stats_attribute(name, \
429 frequency_units, \
430 duration_units) \
431read_attribute(name ## _average_frequency_ ## frequency_units); \
432read_attribute(name ## _average_duration_ ## duration_units); \
433read_attribute(name ## _max_duration_ ## duration_units); \
434read_attribute(name ## _last_ ## frequency_units)
435
436#define sysfs_time_stats_attribute_list(name, \
437 frequency_units, \
438 duration_units) \
439&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
440&sysfs_ ## name ## _average_duration_ ## duration_units, \
441&sysfs_ ## name ## _max_duration_ ## duration_units, \
442&sysfs_ ## name ## _last_ ## frequency_units,
443
444#define ewma_add(ewma, val, weight, factor) \
445({ \
446 (ewma) *= (weight) - 1; \
447 (ewma) += (val) << factor; \
448 (ewma) /= (weight); \
449 (ewma) >> factor; \
450})
451
452struct bch_ratelimit {
453 /* Next time we want to do some work, in nanoseconds */
454 uint64_t next;
455
456 /*
457 * Rate at which we want to do work, in units per nanosecond
458 * The units here correspond to the units passed to bch_next_delay()
459 */
460 unsigned rate;
461};
462
463static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
464{
465 d->next = local_clock();
466}
467
468uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
469
470#define __DIV_SAFE(n, d, zero) \
471({ \
472 typeof(n) _n = (n); \
473 typeof(d) _d = (d); \
474 _d ? _n / _d : zero; \
475})
476
477#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
478
479#define container_of_or_null(ptr, type, member) \
480({ \
481 typeof(ptr) _ptr = ptr; \
482 _ptr ? container_of(_ptr, type, member) : NULL; \
483})
484
485#define RB_INSERT(root, new, member, cmp) \
486({ \
487 __label__ dup; \
488 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
489 typeof(new) this; \
490 int res, ret = -1; \
491 \
492 while (*n) { \
493 parent = *n; \
494 this = container_of(*n, typeof(*(new)), member); \
495 res = cmp(new, this); \
496 if (!res) \
497 goto dup; \
498 n = res < 0 \
499 ? &(*n)->rb_left \
500 : &(*n)->rb_right; \
501 } \
502 \
503 rb_link_node(&(new)->member, parent, n); \
504 rb_insert_color(&(new)->member, root); \
505 ret = 0; \
506dup: \
507 ret; \
508})
509
510#define RB_SEARCH(root, search, member, cmp) \
511({ \
512 struct rb_node *n = (root)->rb_node; \
513 typeof(&(search)) this, ret = NULL; \
514 int res; \
515 \
516 while (n) { \
517 this = container_of(n, typeof(search), member); \
518 res = cmp(&(search), this); \
519 if (!res) { \
520 ret = this; \
521 break; \
522 } \
523 n = res < 0 \
524 ? n->rb_left \
525 : n->rb_right; \
526 } \
527 ret; \
528})
529
530#define RB_GREATER(root, search, member, cmp) \
531({ \
532 struct rb_node *n = (root)->rb_node; \
533 typeof(&(search)) this, ret = NULL; \
534 int res; \
535 \
536 while (n) { \
537 this = container_of(n, typeof(search), member); \
538 res = cmp(&(search), this); \
539 if (res < 0) { \
540 ret = this; \
541 n = n->rb_left; \
542 } else \
543 n = n->rb_right; \
544 } \
545 ret; \
546})
547
548#define RB_FIRST(root, type, member) \
549 container_of_or_null(rb_first(root), type, member)
550
551#define RB_LAST(root, type, member) \
552 container_of_or_null(rb_last(root), type, member)
553
554#define RB_NEXT(ptr, member) \
555 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
556
557#define RB_PREV(ptr, member) \
558 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
559
560/* Does linear interpolation between powers of two */
561static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
562{
563 unsigned fract = x & ~(~0 << fract_bits);
564
565 x >>= fract_bits;
566 x = 1 << x;
567 x += (x * fract) >> fract_bits;
568
569 return x;
570}
571
572void bch_bio_map(struct bio *bio, void *base);
573
574static inline sector_t bdev_sectors(struct block_device *bdev)
575{
576 return bdev->bd_inode->i_size >> 9;
577}
578
579#define closure_bio_submit(bio, cl, dev) \
580do { \
581 closure_get(cl); \
582 bch_generic_make_request(bio, &(dev)->bio_split_hook); \
583} while (0)
584
585uint64_t bch_crc64_update(uint64_t, const void *, size_t);
586uint64_t bch_crc64(const void *, size_t);
587
588#endif /* _BCACHE_UTIL_H */
1
2#ifndef _BCACHE_UTIL_H
3#define _BCACHE_UTIL_H
4
5#include <linux/blkdev.h>
6#include <linux/errno.h>
7#include <linux/blkdev.h>
8#include <linux/kernel.h>
9#include <linux/llist.h>
10#include <linux/ratelimit.h>
11#include <linux/vmalloc.h>
12#include <linux/workqueue.h>
13
14#include "closure.h"
15
16#define PAGE_SECTORS (PAGE_SIZE / 512)
17
18struct closure;
19
20#ifdef CONFIG_BCACHE_DEBUG
21
22#define EBUG_ON(cond) BUG_ON(cond)
23#define atomic_dec_bug(v) BUG_ON(atomic_dec_return(v) < 0)
24#define atomic_inc_bug(v, i) BUG_ON(atomic_inc_return(v) <= i)
25
26#else /* DEBUG */
27
28#define EBUG_ON(cond) do { if (cond); } while (0)
29#define atomic_dec_bug(v) atomic_dec(v)
30#define atomic_inc_bug(v, i) atomic_inc(v)
31
32#endif
33
34#define DECLARE_HEAP(type, name) \
35 struct { \
36 size_t size, used; \
37 type *data; \
38 } name
39
40#define init_heap(heap, _size, gfp) \
41({ \
42 size_t _bytes; \
43 (heap)->used = 0; \
44 (heap)->size = (_size); \
45 _bytes = (heap)->size * sizeof(*(heap)->data); \
46 (heap)->data = NULL; \
47 if (_bytes < KMALLOC_MAX_SIZE) \
48 (heap)->data = kmalloc(_bytes, (gfp)); \
49 if ((!(heap)->data) && ((gfp) & GFP_KERNEL)) \
50 (heap)->data = vmalloc(_bytes); \
51 (heap)->data; \
52})
53
54#define free_heap(heap) \
55do { \
56 kvfree((heap)->data); \
57 (heap)->data = NULL; \
58} while (0)
59
60#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
61
62#define heap_sift(h, i, cmp) \
63do { \
64 size_t _r, _j = i; \
65 \
66 for (; _j * 2 + 1 < (h)->used; _j = _r) { \
67 _r = _j * 2 + 1; \
68 if (_r + 1 < (h)->used && \
69 cmp((h)->data[_r], (h)->data[_r + 1])) \
70 _r++; \
71 \
72 if (cmp((h)->data[_r], (h)->data[_j])) \
73 break; \
74 heap_swap(h, _r, _j); \
75 } \
76} while (0)
77
78#define heap_sift_down(h, i, cmp) \
79do { \
80 while (i) { \
81 size_t p = (i - 1) / 2; \
82 if (cmp((h)->data[i], (h)->data[p])) \
83 break; \
84 heap_swap(h, i, p); \
85 i = p; \
86 } \
87} while (0)
88
89#define heap_add(h, d, cmp) \
90({ \
91 bool _r = !heap_full(h); \
92 if (_r) { \
93 size_t _i = (h)->used++; \
94 (h)->data[_i] = d; \
95 \
96 heap_sift_down(h, _i, cmp); \
97 heap_sift(h, _i, cmp); \
98 } \
99 _r; \
100})
101
102#define heap_pop(h, d, cmp) \
103({ \
104 bool _r = (h)->used; \
105 if (_r) { \
106 (d) = (h)->data[0]; \
107 (h)->used--; \
108 heap_swap(h, 0, (h)->used); \
109 heap_sift(h, 0, cmp); \
110 } \
111 _r; \
112})
113
114#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
115
116#define heap_full(h) ((h)->used == (h)->size)
117
118#define DECLARE_FIFO(type, name) \
119 struct { \
120 size_t front, back, size, mask; \
121 type *data; \
122 } name
123
124#define fifo_for_each(c, fifo, iter) \
125 for (iter = (fifo)->front; \
126 c = (fifo)->data[iter], iter != (fifo)->back; \
127 iter = (iter + 1) & (fifo)->mask)
128
129#define __init_fifo(fifo, gfp) \
130({ \
131 size_t _allocated_size, _bytes; \
132 BUG_ON(!(fifo)->size); \
133 \
134 _allocated_size = roundup_pow_of_two((fifo)->size + 1); \
135 _bytes = _allocated_size * sizeof(*(fifo)->data); \
136 \
137 (fifo)->mask = _allocated_size - 1; \
138 (fifo)->front = (fifo)->back = 0; \
139 (fifo)->data = NULL; \
140 \
141 if (_bytes < KMALLOC_MAX_SIZE) \
142 (fifo)->data = kmalloc(_bytes, (gfp)); \
143 if ((!(fifo)->data) && ((gfp) & GFP_KERNEL)) \
144 (fifo)->data = vmalloc(_bytes); \
145 (fifo)->data; \
146})
147
148#define init_fifo_exact(fifo, _size, gfp) \
149({ \
150 (fifo)->size = (_size); \
151 __init_fifo(fifo, gfp); \
152})
153
154#define init_fifo(fifo, _size, gfp) \
155({ \
156 (fifo)->size = (_size); \
157 if ((fifo)->size > 4) \
158 (fifo)->size = roundup_pow_of_two((fifo)->size) - 1; \
159 __init_fifo(fifo, gfp); \
160})
161
162#define free_fifo(fifo) \
163do { \
164 kvfree((fifo)->data); \
165 (fifo)->data = NULL; \
166} while (0)
167
168#define fifo_used(fifo) (((fifo)->back - (fifo)->front) & (fifo)->mask)
169#define fifo_free(fifo) ((fifo)->size - fifo_used(fifo))
170
171#define fifo_empty(fifo) (!fifo_used(fifo))
172#define fifo_full(fifo) (!fifo_free(fifo))
173
174#define fifo_front(fifo) ((fifo)->data[(fifo)->front])
175#define fifo_back(fifo) \
176 ((fifo)->data[((fifo)->back - 1) & (fifo)->mask])
177
178#define fifo_idx(fifo, p) (((p) - &fifo_front(fifo)) & (fifo)->mask)
179
180#define fifo_push_back(fifo, i) \
181({ \
182 bool _r = !fifo_full((fifo)); \
183 if (_r) { \
184 (fifo)->data[(fifo)->back++] = (i); \
185 (fifo)->back &= (fifo)->mask; \
186 } \
187 _r; \
188})
189
190#define fifo_pop_front(fifo, i) \
191({ \
192 bool _r = !fifo_empty((fifo)); \
193 if (_r) { \
194 (i) = (fifo)->data[(fifo)->front++]; \
195 (fifo)->front &= (fifo)->mask; \
196 } \
197 _r; \
198})
199
200#define fifo_push_front(fifo, i) \
201({ \
202 bool _r = !fifo_full((fifo)); \
203 if (_r) { \
204 --(fifo)->front; \
205 (fifo)->front &= (fifo)->mask; \
206 (fifo)->data[(fifo)->front] = (i); \
207 } \
208 _r; \
209})
210
211#define fifo_pop_back(fifo, i) \
212({ \
213 bool _r = !fifo_empty((fifo)); \
214 if (_r) { \
215 --(fifo)->back; \
216 (fifo)->back &= (fifo)->mask; \
217 (i) = (fifo)->data[(fifo)->back] \
218 } \
219 _r; \
220})
221
222#define fifo_push(fifo, i) fifo_push_back(fifo, (i))
223#define fifo_pop(fifo, i) fifo_pop_front(fifo, (i))
224
225#define fifo_swap(l, r) \
226do { \
227 swap((l)->front, (r)->front); \
228 swap((l)->back, (r)->back); \
229 swap((l)->size, (r)->size); \
230 swap((l)->mask, (r)->mask); \
231 swap((l)->data, (r)->data); \
232} while (0)
233
234#define fifo_move(dest, src) \
235do { \
236 typeof(*((dest)->data)) _t; \
237 while (!fifo_full(dest) && \
238 fifo_pop(src, _t)) \
239 fifo_push(dest, _t); \
240} while (0)
241
242/*
243 * Simple array based allocator - preallocates a number of elements and you can
244 * never allocate more than that, also has no locking.
245 *
246 * Handy because if you know you only need a fixed number of elements you don't
247 * have to worry about memory allocation failure, and sometimes a mempool isn't
248 * what you want.
249 *
250 * We treat the free elements as entries in a singly linked list, and the
251 * freelist as a stack - allocating and freeing push and pop off the freelist.
252 */
253
254#define DECLARE_ARRAY_ALLOCATOR(type, name, size) \
255 struct { \
256 type *freelist; \
257 type data[size]; \
258 } name
259
260#define array_alloc(array) \
261({ \
262 typeof((array)->freelist) _ret = (array)->freelist; \
263 \
264 if (_ret) \
265 (array)->freelist = *((typeof((array)->freelist) *) _ret);\
266 \
267 _ret; \
268})
269
270#define array_free(array, ptr) \
271do { \
272 typeof((array)->freelist) _ptr = ptr; \
273 \
274 *((typeof((array)->freelist) *) _ptr) = (array)->freelist; \
275 (array)->freelist = _ptr; \
276} while (0)
277
278#define array_allocator_init(array) \
279do { \
280 typeof((array)->freelist) _i; \
281 \
282 BUILD_BUG_ON(sizeof((array)->data[0]) < sizeof(void *)); \
283 (array)->freelist = NULL; \
284 \
285 for (_i = (array)->data; \
286 _i < (array)->data + ARRAY_SIZE((array)->data); \
287 _i++) \
288 array_free(array, _i); \
289} while (0)
290
291#define array_freelist_empty(array) ((array)->freelist == NULL)
292
293#define ANYSINT_MAX(t) \
294 ((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
295
296int bch_strtoint_h(const char *, int *);
297int bch_strtouint_h(const char *, unsigned int *);
298int bch_strtoll_h(const char *, long long *);
299int bch_strtoull_h(const char *, unsigned long long *);
300
301static inline int bch_strtol_h(const char *cp, long *res)
302{
303#if BITS_PER_LONG == 32
304 return bch_strtoint_h(cp, (int *) res);
305#else
306 return bch_strtoll_h(cp, (long long *) res);
307#endif
308}
309
310static inline int bch_strtoul_h(const char *cp, long *res)
311{
312#if BITS_PER_LONG == 32
313 return bch_strtouint_h(cp, (unsigned int *) res);
314#else
315 return bch_strtoull_h(cp, (unsigned long long *) res);
316#endif
317}
318
319#define strtoi_h(cp, res) \
320 (__builtin_types_compatible_p(typeof(*res), int) \
321 ? bch_strtoint_h(cp, (void *) res) \
322 : __builtin_types_compatible_p(typeof(*res), long) \
323 ? bch_strtol_h(cp, (void *) res) \
324 : __builtin_types_compatible_p(typeof(*res), long long) \
325 ? bch_strtoll_h(cp, (void *) res) \
326 : __builtin_types_compatible_p(typeof(*res), unsigned int) \
327 ? bch_strtouint_h(cp, (void *) res) \
328 : __builtin_types_compatible_p(typeof(*res), unsigned long) \
329 ? bch_strtoul_h(cp, (void *) res) \
330 : __builtin_types_compatible_p(typeof(*res), unsigned long long)\
331 ? bch_strtoull_h(cp, (void *) res) : -EINVAL)
332
333#define strtoul_safe(cp, var) \
334({ \
335 unsigned long _v; \
336 int _r = kstrtoul(cp, 10, &_v); \
337 if (!_r) \
338 var = _v; \
339 _r; \
340})
341
342#define strtoul_safe_clamp(cp, var, min, max) \
343({ \
344 unsigned long _v; \
345 int _r = kstrtoul(cp, 10, &_v); \
346 if (!_r) \
347 var = clamp_t(typeof(var), _v, min, max); \
348 _r; \
349})
350
351#define snprint(buf, size, var) \
352 snprintf(buf, size, \
353 __builtin_types_compatible_p(typeof(var), int) \
354 ? "%i\n" : \
355 __builtin_types_compatible_p(typeof(var), unsigned) \
356 ? "%u\n" : \
357 __builtin_types_compatible_p(typeof(var), long) \
358 ? "%li\n" : \
359 __builtin_types_compatible_p(typeof(var), unsigned long)\
360 ? "%lu\n" : \
361 __builtin_types_compatible_p(typeof(var), int64_t) \
362 ? "%lli\n" : \
363 __builtin_types_compatible_p(typeof(var), uint64_t) \
364 ? "%llu\n" : \
365 __builtin_types_compatible_p(typeof(var), const char *) \
366 ? "%s\n" : "%i\n", var)
367
368ssize_t bch_hprint(char *buf, int64_t v);
369
370bool bch_is_zero(const char *p, size_t n);
371int bch_parse_uuid(const char *s, char *uuid);
372
373ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
374 size_t selected);
375
376ssize_t bch_read_string_list(const char *buf, const char * const list[]);
377
378struct time_stats {
379 spinlock_t lock;
380 /*
381 * all fields are in nanoseconds, averages are ewmas stored left shifted
382 * by 8
383 */
384 uint64_t max_duration;
385 uint64_t average_duration;
386 uint64_t average_frequency;
387 uint64_t last;
388};
389
390void bch_time_stats_update(struct time_stats *stats, uint64_t time);
391
392static inline unsigned local_clock_us(void)
393{
394 return local_clock() >> 10;
395}
396
397#define NSEC_PER_ns 1L
398#define NSEC_PER_us NSEC_PER_USEC
399#define NSEC_PER_ms NSEC_PER_MSEC
400#define NSEC_PER_sec NSEC_PER_SEC
401
402#define __print_time_stat(stats, name, stat, units) \
403 sysfs_print(name ## _ ## stat ## _ ## units, \
404 div_u64((stats)->stat >> 8, NSEC_PER_ ## units))
405
406#define sysfs_print_time_stats(stats, name, \
407 frequency_units, \
408 duration_units) \
409do { \
410 __print_time_stat(stats, name, \
411 average_frequency, frequency_units); \
412 __print_time_stat(stats, name, \
413 average_duration, duration_units); \
414 sysfs_print(name ## _ ##max_duration ## _ ## duration_units, \
415 div_u64((stats)->max_duration, NSEC_PER_ ## duration_units));\
416 \
417 sysfs_print(name ## _last_ ## frequency_units, (stats)->last \
418 ? div_s64(local_clock() - (stats)->last, \
419 NSEC_PER_ ## frequency_units) \
420 : -1LL); \
421} while (0)
422
423#define sysfs_time_stats_attribute(name, \
424 frequency_units, \
425 duration_units) \
426read_attribute(name ## _average_frequency_ ## frequency_units); \
427read_attribute(name ## _average_duration_ ## duration_units); \
428read_attribute(name ## _max_duration_ ## duration_units); \
429read_attribute(name ## _last_ ## frequency_units)
430
431#define sysfs_time_stats_attribute_list(name, \
432 frequency_units, \
433 duration_units) \
434&sysfs_ ## name ## _average_frequency_ ## frequency_units, \
435&sysfs_ ## name ## _average_duration_ ## duration_units, \
436&sysfs_ ## name ## _max_duration_ ## duration_units, \
437&sysfs_ ## name ## _last_ ## frequency_units,
438
439#define ewma_add(ewma, val, weight, factor) \
440({ \
441 (ewma) *= (weight) - 1; \
442 (ewma) += (val) << factor; \
443 (ewma) /= (weight); \
444 (ewma) >> factor; \
445})
446
447struct bch_ratelimit {
448 /* Next time we want to do some work, in nanoseconds */
449 uint64_t next;
450
451 /*
452 * Rate at which we want to do work, in units per nanosecond
453 * The units here correspond to the units passed to bch_next_delay()
454 */
455 unsigned rate;
456};
457
458static inline void bch_ratelimit_reset(struct bch_ratelimit *d)
459{
460 d->next = local_clock();
461}
462
463uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done);
464
465#define __DIV_SAFE(n, d, zero) \
466({ \
467 typeof(n) _n = (n); \
468 typeof(d) _d = (d); \
469 _d ? _n / _d : zero; \
470})
471
472#define DIV_SAFE(n, d) __DIV_SAFE(n, d, 0)
473
474#define container_of_or_null(ptr, type, member) \
475({ \
476 typeof(ptr) _ptr = ptr; \
477 _ptr ? container_of(_ptr, type, member) : NULL; \
478})
479
480#define RB_INSERT(root, new, member, cmp) \
481({ \
482 __label__ dup; \
483 struct rb_node **n = &(root)->rb_node, *parent = NULL; \
484 typeof(new) this; \
485 int res, ret = -1; \
486 \
487 while (*n) { \
488 parent = *n; \
489 this = container_of(*n, typeof(*(new)), member); \
490 res = cmp(new, this); \
491 if (!res) \
492 goto dup; \
493 n = res < 0 \
494 ? &(*n)->rb_left \
495 : &(*n)->rb_right; \
496 } \
497 \
498 rb_link_node(&(new)->member, parent, n); \
499 rb_insert_color(&(new)->member, root); \
500 ret = 0; \
501dup: \
502 ret; \
503})
504
505#define RB_SEARCH(root, search, member, cmp) \
506({ \
507 struct rb_node *n = (root)->rb_node; \
508 typeof(&(search)) this, ret = NULL; \
509 int res; \
510 \
511 while (n) { \
512 this = container_of(n, typeof(search), member); \
513 res = cmp(&(search), this); \
514 if (!res) { \
515 ret = this; \
516 break; \
517 } \
518 n = res < 0 \
519 ? n->rb_left \
520 : n->rb_right; \
521 } \
522 ret; \
523})
524
525#define RB_GREATER(root, search, member, cmp) \
526({ \
527 struct rb_node *n = (root)->rb_node; \
528 typeof(&(search)) this, ret = NULL; \
529 int res; \
530 \
531 while (n) { \
532 this = container_of(n, typeof(search), member); \
533 res = cmp(&(search), this); \
534 if (res < 0) { \
535 ret = this; \
536 n = n->rb_left; \
537 } else \
538 n = n->rb_right; \
539 } \
540 ret; \
541})
542
543#define RB_FIRST(root, type, member) \
544 container_of_or_null(rb_first(root), type, member)
545
546#define RB_LAST(root, type, member) \
547 container_of_or_null(rb_last(root), type, member)
548
549#define RB_NEXT(ptr, member) \
550 container_of_or_null(rb_next(&(ptr)->member), typeof(*ptr), member)
551
552#define RB_PREV(ptr, member) \
553 container_of_or_null(rb_prev(&(ptr)->member), typeof(*ptr), member)
554
555/* Does linear interpolation between powers of two */
556static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
557{
558 unsigned fract = x & ~(~0 << fract_bits);
559
560 x >>= fract_bits;
561 x = 1 << x;
562 x += (x * fract) >> fract_bits;
563
564 return x;
565}
566
567void bch_bio_map(struct bio *bio, void *base);
568
569static inline sector_t bdev_sectors(struct block_device *bdev)
570{
571 return bdev->bd_inode->i_size >> 9;
572}
573
574#define closure_bio_submit(bio, cl) \
575do { \
576 closure_get(cl); \
577 generic_make_request(bio); \
578} while (0)
579
580uint64_t bch_crc64_update(uint64_t, const void *, size_t);
581uint64_t bch_crc64(const void *, size_t);
582
583#endif /* _BCACHE_UTIL_H */