Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _BCACHEFS_UTIL_H
  3#define _BCACHEFS_UTIL_H
  4
  5#include <linux/bio.h>
  6#include <linux/blkdev.h>
  7#include <linux/closure.h>
  8#include <linux/errno.h>
  9#include <linux/freezer.h>
 10#include <linux/kernel.h>
 11#include <linux/sched/clock.h>
 12#include <linux/llist.h>
 13#include <linux/log2.h>
 14#include <linux/percpu.h>
 15#include <linux/preempt.h>
 16#include <linux/ratelimit.h>
 17#include <linux/slab.h>
 18#include <linux/vmalloc.h>
 19#include <linux/workqueue.h>
 20
 21#include "mean_and_variance.h"
 22
 23#include "darray.h"
 24
 25struct closure;
 26
 27#ifdef CONFIG_BCACHEFS_DEBUG
 28#define EBUG_ON(cond)		BUG_ON(cond)
 29#else
 30#define EBUG_ON(cond)
 31#endif
 32
 33#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 34#define CPU_BIG_ENDIAN		0
 35#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 36#define CPU_BIG_ENDIAN		1
 37#endif
 38
 39/* type hackery */
 40
 41#define type_is_exact(_val, _type)					\
 42	__builtin_types_compatible_p(typeof(_val), _type)
 43
 44#define type_is(_val, _type)						\
 45	(__builtin_types_compatible_p(typeof(_val), _type) ||		\
 46	 __builtin_types_compatible_p(typeof(_val), const _type))
 47
 48/* Userspace doesn't align allocations as nicely as the kernel allocators: */
 49static inline size_t buf_pages(void *p, size_t len)
 50{
 51	return DIV_ROUND_UP(len +
 52			    ((unsigned long) p & (PAGE_SIZE - 1)),
 53			    PAGE_SIZE);
 54}
 55
 56static inline void vpfree(void *p, size_t size)
 57{
 58	if (is_vmalloc_addr(p))
 59		vfree(p);
 60	else
 61		free_pages((unsigned long) p, get_order(size));
 62}
 63
 64static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
 65{
 66	return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
 67					 get_order(size)) ?:
 68		__vmalloc(size, gfp_mask);
 69}
 70
 71static inline void kvpfree(void *p, size_t size)
 72{
 73	if (size < PAGE_SIZE)
 74		kfree(p);
 75	else
 76		vpfree(p, size);
 77}
 78
 79static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
 80{
 81	return size < PAGE_SIZE
 82		? kmalloc(size, gfp_mask)
 83		: vpmalloc(size, gfp_mask);
 84}
 85
 86int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
 87
 88#define HEAP(type)							\
 89struct {								\
 90	size_t size, used;						\
 91	type *data;							\
 92}
 93
 94#define DECLARE_HEAP(type, name) HEAP(type) name
 95
 96#define init_heap(heap, _size, gfp)					\
 97({									\
 98	(heap)->used = 0;						\
 99	(heap)->size = (_size);						\
100	(heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\
101				 (gfp));				\
102})
103
104#define free_heap(heap)							\
105do {									\
106	kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0]));	\
107	(heap)->data = NULL;						\
108} while (0)
109
110#define heap_set_backpointer(h, i, _fn)					\
111do {									\
112	void (*fn)(typeof(h), size_t) = _fn;				\
113	if (fn)								\
114		fn(h, i);						\
115} while (0)
116
117#define heap_swap(h, i, j, set_backpointer)				\
118do {									\
119	swap((h)->data[i], (h)->data[j]);				\
120	heap_set_backpointer(h, i, set_backpointer);			\
121	heap_set_backpointer(h, j, set_backpointer);			\
122} while (0)
123
124#define heap_peek(h)							\
125({									\
126	EBUG_ON(!(h)->used);						\
127	(h)->data[0];							\
128})
129
130#define heap_full(h)	((h)->used == (h)->size)
131
132#define heap_sift_down(h, i, cmp, set_backpointer)			\
133do {									\
134	size_t _c, _j = i;						\
135									\
136	for (; _j * 2 + 1 < (h)->used; _j = _c) {			\
137		_c = _j * 2 + 1;					\
138		if (_c + 1 < (h)->used &&				\
139		    cmp(h, (h)->data[_c], (h)->data[_c + 1]) >= 0)	\
140			_c++;						\
141									\
142		if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0)		\
143			break;						\
144		heap_swap(h, _c, _j, set_backpointer);			\
145	}								\
146} while (0)
147
148#define heap_sift_up(h, i, cmp, set_backpointer)			\
149do {									\
150	while (i) {							\
151		size_t p = (i - 1) / 2;					\
152		if (cmp(h, (h)->data[i], (h)->data[p]) >= 0)		\
153			break;						\
154		heap_swap(h, i, p, set_backpointer);			\
155		i = p;							\
156	}								\
157} while (0)
158
159#define __heap_add(h, d, cmp, set_backpointer)				\
160({									\
161	size_t _i = (h)->used++;					\
162	(h)->data[_i] = d;						\
163	heap_set_backpointer(h, _i, set_backpointer);			\
164									\
165	heap_sift_up(h, _i, cmp, set_backpointer);			\
166	_i;								\
167})
168
169#define heap_add(h, d, cmp, set_backpointer)				\
170({									\
171	bool _r = !heap_full(h);					\
172	if (_r)								\
173		__heap_add(h, d, cmp, set_backpointer);			\
174	_r;								\
175})
176
177#define heap_add_or_replace(h, new, cmp, set_backpointer)		\
178do {									\
179	if (!heap_add(h, new, cmp, set_backpointer) &&			\
180	    cmp(h, new, heap_peek(h)) >= 0) {				\
181		(h)->data[0] = new;					\
182		heap_set_backpointer(h, 0, set_backpointer);		\
183		heap_sift_down(h, 0, cmp, set_backpointer);		\
184	}								\
185} while (0)
186
187#define heap_del(h, i, cmp, set_backpointer)				\
188do {									\
189	size_t _i = (i);						\
190									\
191	BUG_ON(_i >= (h)->used);					\
192	(h)->used--;							\
193	if ((_i) < (h)->used) {						\
194		heap_swap(h, _i, (h)->used, set_backpointer);		\
195		heap_sift_up(h, _i, cmp, set_backpointer);		\
196		heap_sift_down(h, _i, cmp, set_backpointer);		\
197	}								\
198} while (0)
199
200#define heap_pop(h, d, cmp, set_backpointer)				\
201({									\
202	bool _r = (h)->used;						\
203	if (_r) {							\
204		(d) = (h)->data[0];					\
205		heap_del(h, 0, cmp, set_backpointer);			\
206	}								\
207	_r;								\
208})
209
210#define heap_resort(heap, cmp, set_backpointer)				\
211do {									\
212	ssize_t _i;							\
213	for (_i = (ssize_t) (heap)->used / 2 -  1; _i >= 0; --_i)	\
214		heap_sift_down(heap, _i, cmp, set_backpointer);		\
215} while (0)
216
217#define ANYSINT_MAX(t)							\
218	((((t) 1 << (sizeof(t) * 8 - 2)) - (t) 1) * (t) 2 + (t) 1)
219
220#include "printbuf.h"
221
222#define prt_vprintf(_out, ...)		bch2_prt_vprintf(_out, __VA_ARGS__)
223#define prt_printf(_out, ...)		bch2_prt_printf(_out, __VA_ARGS__)
224#define printbuf_str(_buf)		bch2_printbuf_str(_buf)
225#define printbuf_exit(_buf)		bch2_printbuf_exit(_buf)
226
227#define printbuf_tabstops_reset(_buf)	bch2_printbuf_tabstops_reset(_buf)
228#define printbuf_tabstop_pop(_buf)	bch2_printbuf_tabstop_pop(_buf)
229#define printbuf_tabstop_push(_buf, _n)	bch2_printbuf_tabstop_push(_buf, _n)
230
231#define printbuf_indent_add(_out, _n)	bch2_printbuf_indent_add(_out, _n)
232#define printbuf_indent_sub(_out, _n)	bch2_printbuf_indent_sub(_out, _n)
233
234#define prt_newline(_out)		bch2_prt_newline(_out)
235#define prt_tab(_out)			bch2_prt_tab(_out)
236#define prt_tab_rjust(_out)		bch2_prt_tab_rjust(_out)
237
238#define prt_bytes_indented(...)		bch2_prt_bytes_indented(__VA_ARGS__)
239#define prt_u64(_out, _v)		prt_printf(_out, "%llu", (u64) (_v))
240#define prt_human_readable_u64(...)	bch2_prt_human_readable_u64(__VA_ARGS__)
241#define prt_human_readable_s64(...)	bch2_prt_human_readable_s64(__VA_ARGS__)
242#define prt_units_u64(...)		bch2_prt_units_u64(__VA_ARGS__)
243#define prt_units_s64(...)		bch2_prt_units_s64(__VA_ARGS__)
244#define prt_string_option(...)		bch2_prt_string_option(__VA_ARGS__)
245#define prt_bitflags(...)		bch2_prt_bitflags(__VA_ARGS__)
246#define prt_bitflags_vector(...)	bch2_prt_bitflags_vector(__VA_ARGS__)
247
248void bch2_pr_time_units(struct printbuf *, u64);
249void bch2_prt_datetime(struct printbuf *, time64_t);
250
251#ifdef __KERNEL__
252static inline void uuid_unparse_lower(u8 *uuid, char *out)
253{
254	sprintf(out, "%pUb", uuid);
255}
256#else
257#include <uuid/uuid.h>
258#endif
259
260static inline void pr_uuid(struct printbuf *out, u8 *uuid)
261{
262	char uuid_str[40];
263
264	uuid_unparse_lower(uuid, uuid_str);
265	prt_printf(out, "%s", uuid_str);
266}
267
268int bch2_strtoint_h(const char *, int *);
269int bch2_strtouint_h(const char *, unsigned int *);
270int bch2_strtoll_h(const char *, long long *);
271int bch2_strtoull_h(const char *, unsigned long long *);
272int bch2_strtou64_h(const char *, u64 *);
273
274static inline int bch2_strtol_h(const char *cp, long *res)
275{
276#if BITS_PER_LONG == 32
277	return bch2_strtoint_h(cp, (int *) res);
278#else
279	return bch2_strtoll_h(cp, (long long *) res);
280#endif
281}
282
283static inline int bch2_strtoul_h(const char *cp, long *res)
284{
285#if BITS_PER_LONG == 32
286	return bch2_strtouint_h(cp, (unsigned int *) res);
287#else
288	return bch2_strtoull_h(cp, (unsigned long long *) res);
289#endif
290}
291
292#define strtoi_h(cp, res)						\
293	( type_is(*res, int)		? bch2_strtoint_h(cp, (void *) res)\
294	: type_is(*res, long)		? bch2_strtol_h(cp, (void *) res)\
295	: type_is(*res, long long)	? bch2_strtoll_h(cp, (void *) res)\
296	: type_is(*res, unsigned)	? bch2_strtouint_h(cp, (void *) res)\
297	: type_is(*res, unsigned long)	? bch2_strtoul_h(cp, (void *) res)\
298	: type_is(*res, unsigned long long) ? bch2_strtoull_h(cp, (void *) res)\
299	: -EINVAL)
300
301#define strtoul_safe(cp, var)						\
302({									\
303	unsigned long _v;						\
304	int _r = kstrtoul(cp, 10, &_v);					\
305	if (!_r)							\
306		var = _v;						\
307	_r;								\
308})
309
310#define strtoul_safe_clamp(cp, var, min, max)				\
311({									\
312	unsigned long _v;						\
313	int _r = kstrtoul(cp, 10, &_v);					\
314	if (!_r)							\
315		var = clamp_t(typeof(var), _v, min, max);		\
316	_r;								\
317})
318
319#define strtoul_safe_restrict(cp, var, min, max)			\
320({									\
321	unsigned long _v;						\
322	int _r = kstrtoul(cp, 10, &_v);					\
323	if (!_r && _v >= min && _v <= max)				\
324		var = _v;						\
325	else								\
326		_r = -EINVAL;						\
327	_r;								\
328})
329
330#define snprint(out, var)						\
331	prt_printf(out,							\
332		   type_is(var, int)		? "%i\n"		\
333		 : type_is(var, unsigned)	? "%u\n"		\
334		 : type_is(var, long)		? "%li\n"		\
335		 : type_is(var, unsigned long)	? "%lu\n"		\
336		 : type_is(var, s64)		? "%lli\n"		\
337		 : type_is(var, u64)		? "%llu\n"		\
338		 : type_is(var, char *)		? "%s\n"		\
339		 : "%i\n", var)
340
341bool bch2_is_zero(const void *, size_t);
342
343u64 bch2_read_flag_list(char *, const char * const[]);
344
345void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
346void bch2_prt_u64_base2(struct printbuf *, u64);
347
348void bch2_print_string_as_lines(const char *prefix, const char *lines);
349
350typedef DARRAY(unsigned long) bch_stacktrace;
351int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
352void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
353int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
354
355static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
356{
357#ifdef __KERNEL__
358	prt_printf(out, "%pg", bdev);
359#else
360	prt_str(out, bdev->name);
361#endif
362}
363
364#define NR_QUANTILES	15
365#define QUANTILE_IDX(i)	inorder_to_eytzinger0(i, NR_QUANTILES)
366#define QUANTILE_FIRST	eytzinger0_first(NR_QUANTILES)
367#define QUANTILE_LAST	eytzinger0_last(NR_QUANTILES)
368
369struct bch2_quantiles {
370	struct bch2_quantile_entry {
371		u64	m;
372		u64	step;
373	}		entries[NR_QUANTILES];
374};
375
376struct bch2_time_stat_buffer {
377	unsigned	nr;
378	struct bch2_time_stat_buffer_entry {
379		u64	start;
380		u64	end;
381	}		entries[32];
382};
383
384struct bch2_time_stats {
385	spinlock_t	lock;
386	/* all fields are in nanoseconds */
387	u64             min_duration;
388	u64		max_duration;
389	u64		total_duration;
390	u64             max_freq;
391	u64             min_freq;
392	u64		last_event;
393	struct bch2_quantiles quantiles;
394
395	struct mean_and_variance	  duration_stats;
396	struct mean_and_variance_weighted duration_stats_weighted;
397	struct mean_and_variance	  freq_stats;
398	struct mean_and_variance_weighted freq_stats_weighted;
399	struct bch2_time_stat_buffer __percpu *buffer;
400};
401
402#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
403void __bch2_time_stats_update(struct bch2_time_stats *stats, u64, u64);
404
405static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start)
406{
407	__bch2_time_stats_update(stats, start, local_clock());
408}
409
410static inline bool track_event_change(struct bch2_time_stats *stats,
411				      u64 *start, bool v)
412{
413	if (v != !!*start) {
414		if (!v) {
415			bch2_time_stats_update(stats, *start);
416			*start = 0;
417		} else {
418			*start = local_clock() ?: 1;
419			return true;
420		}
421	}
422
423	return false;
424}
425#else
426static inline void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end) {}
427static inline void bch2_time_stats_update(struct bch2_time_stats *stats, u64 start) {}
428static inline bool track_event_change(struct bch2_time_stats *stats,
429				      u64 *start, bool v)
430{
431	bool ret = v && !*start;
432	*start = v;
433	return ret;
434}
435#endif
436
437void bch2_time_stats_to_text(struct printbuf *, struct bch2_time_stats *);
438
439void bch2_time_stats_exit(struct bch2_time_stats *);
440void bch2_time_stats_init(struct bch2_time_stats *);
441
442#define ewma_add(ewma, val, weight)					\
443({									\
444	typeof(ewma) _ewma = (ewma);					\
445	typeof(weight) _weight = (weight);				\
446									\
447	(((_ewma << _weight) - _ewma) + (val)) >> _weight;		\
448})
449
450struct bch_ratelimit {
451	/* Next time we want to do some work, in nanoseconds */
452	u64			next;
453
454	/*
455	 * Rate at which we want to do work, in units per nanosecond
456	 * The units here correspond to the units passed to
457	 * bch2_ratelimit_increment()
458	 */
459	unsigned		rate;
460};
461
462static inline void bch2_ratelimit_reset(struct bch_ratelimit *d)
463{
464	d->next = local_clock();
465}
466
467u64 bch2_ratelimit_delay(struct bch_ratelimit *);
468void bch2_ratelimit_increment(struct bch_ratelimit *, u64);
469
470struct bch_pd_controller {
471	struct bch_ratelimit	rate;
472	unsigned long		last_update;
473
474	s64			last_actual;
475	s64			smoothed_derivative;
476
477	unsigned		p_term_inverse;
478	unsigned		d_smooth;
479	unsigned		d_term;
480
481	/* for exporting to sysfs (no effect on behavior) */
482	s64			last_derivative;
483	s64			last_proportional;
484	s64			last_change;
485	s64			last_target;
486
487	/*
488	 * If true, the rate will not increase if bch2_ratelimit_delay()
489	 * is not being called often enough.
490	 */
491	bool			backpressure;
492};
493
494void bch2_pd_controller_update(struct bch_pd_controller *, s64, s64, int);
495void bch2_pd_controller_init(struct bch_pd_controller *);
496void bch2_pd_controller_debug_to_text(struct printbuf *, struct bch_pd_controller *);
497
498#define sysfs_pd_controller_attribute(name)				\
499	rw_attribute(name##_rate);					\
500	rw_attribute(name##_rate_bytes);				\
501	rw_attribute(name##_rate_d_term);				\
502	rw_attribute(name##_rate_p_term_inverse);			\
503	read_attribute(name##_rate_debug)
504
505#define sysfs_pd_controller_files(name)					\
506	&sysfs_##name##_rate,						\
507	&sysfs_##name##_rate_bytes,					\
508	&sysfs_##name##_rate_d_term,					\
509	&sysfs_##name##_rate_p_term_inverse,				\
510	&sysfs_##name##_rate_debug
511
512#define sysfs_pd_controller_show(name, var)				\
513do {									\
514	sysfs_hprint(name##_rate,		(var)->rate.rate);	\
515	sysfs_print(name##_rate_bytes,		(var)->rate.rate);	\
516	sysfs_print(name##_rate_d_term,		(var)->d_term);		\
517	sysfs_print(name##_rate_p_term_inverse,	(var)->p_term_inverse);	\
518									\
519	if (attr == &sysfs_##name##_rate_debug)				\
520		bch2_pd_controller_debug_to_text(out, var);		\
521} while (0)
522
523#define sysfs_pd_controller_store(name, var)				\
524do {									\
525	sysfs_strtoul_clamp(name##_rate,				\
526			    (var)->rate.rate, 1, UINT_MAX);		\
527	sysfs_strtoul_clamp(name##_rate_bytes,				\
528			    (var)->rate.rate, 1, UINT_MAX);		\
529	sysfs_strtoul(name##_rate_d_term,	(var)->d_term);		\
530	sysfs_strtoul_clamp(name##_rate_p_term_inverse,			\
531			    (var)->p_term_inverse, 1, INT_MAX);		\
532} while (0)
533
534#define container_of_or_null(ptr, type, member)				\
535({									\
536	typeof(ptr) _ptr = ptr;						\
537	_ptr ? container_of(_ptr, type, member) : NULL;			\
538})
539
540/* Does linear interpolation between powers of two */
541static inline unsigned fract_exp_two(unsigned x, unsigned fract_bits)
542{
543	unsigned fract = x & ~(~0 << fract_bits);
544
545	x >>= fract_bits;
546	x   = 1 << x;
547	x  += (x * fract) >> fract_bits;
548
549	return x;
550}
551
552void bch2_bio_map(struct bio *bio, void *base, size_t);
553int bch2_bio_alloc_pages(struct bio *, size_t, gfp_t);
554
555static inline sector_t bdev_sectors(struct block_device *bdev)
556{
557	return bdev->bd_inode->i_size >> 9;
558}
559
560#define closure_bio_submit(bio, cl)					\
561do {									\
562	closure_get(cl);						\
563	submit_bio(bio);						\
564} while (0)
565
566#define kthread_wait(cond)						\
567({									\
568	int _ret = 0;							\
569									\
570	while (1) {							\
571		set_current_state(TASK_INTERRUPTIBLE);			\
572		if (kthread_should_stop()) {				\
573			_ret = -1;					\
574			break;						\
575		}							\
576									\
577		if (cond)						\
578			break;						\
579									\
580		schedule();						\
581	}								\
582	set_current_state(TASK_RUNNING);				\
583	_ret;								\
584})
585
586#define kthread_wait_freezable(cond)					\
587({									\
588	int _ret = 0;							\
589	while (1) {							\
590		set_current_state(TASK_INTERRUPTIBLE);			\
591		if (kthread_should_stop()) {				\
592			_ret = -1;					\
593			break;						\
594		}							\
595									\
596		if (cond)						\
597			break;						\
598									\
599		schedule();						\
600		try_to_freeze();					\
601	}								\
602	set_current_state(TASK_RUNNING);				\
603	_ret;								\
604})
605
606size_t bch2_rand_range(size_t);
607
608void memcpy_to_bio(struct bio *, struct bvec_iter, const void *);
609void memcpy_from_bio(void *, struct bio *, struct bvec_iter);
610
611static inline void memcpy_u64s_small(void *dst, const void *src,
612				     unsigned u64s)
613{
614	u64 *d = dst;
615	const u64 *s = src;
616
617	while (u64s--)
618		*d++ = *s++;
619}
620
621static inline void __memcpy_u64s(void *dst, const void *src,
622				 unsigned u64s)
623{
624#ifdef CONFIG_X86_64
625	long d0, d1, d2;
626
627	asm volatile("rep ; movsq"
628		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
629		     : "0" (u64s), "1" (dst), "2" (src)
630		     : "memory");
631#else
632	u64 *d = dst;
633	const u64 *s = src;
634
635	while (u64s--)
636		*d++ = *s++;
637#endif
638}
639
640static inline void memcpy_u64s(void *dst, const void *src,
641			       unsigned u64s)
642{
643	EBUG_ON(!(dst >= src + u64s * sizeof(u64) ||
644		 dst + u64s * sizeof(u64) <= src));
645
646	__memcpy_u64s(dst, src, u64s);
647}
648
649static inline void __memmove_u64s_down(void *dst, const void *src,
650				       unsigned u64s)
651{
652	__memcpy_u64s(dst, src, u64s);
653}
654
655static inline void memmove_u64s_down(void *dst, const void *src,
656				     unsigned u64s)
657{
658	EBUG_ON(dst > src);
659
660	__memmove_u64s_down(dst, src, u64s);
661}
662
663static inline void __memmove_u64s_down_small(void *dst, const void *src,
664				       unsigned u64s)
665{
666	memcpy_u64s_small(dst, src, u64s);
667}
668
669static inline void memmove_u64s_down_small(void *dst, const void *src,
670				     unsigned u64s)
671{
672	EBUG_ON(dst > src);
673
674	__memmove_u64s_down_small(dst, src, u64s);
675}
676
677static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
678					   unsigned u64s)
679{
680	u64 *dst = (u64 *) _dst + u64s;
681	u64 *src = (u64 *) _src + u64s;
682
683	while (u64s--)
684		*--dst = *--src;
685}
686
687static inline void memmove_u64s_up_small(void *dst, const void *src,
688					 unsigned u64s)
689{
690	EBUG_ON(dst < src);
691
692	__memmove_u64s_up_small(dst, src, u64s);
693}
694
695static inline void __memmove_u64s_up(void *_dst, const void *_src,
696				     unsigned u64s)
697{
698	u64 *dst = (u64 *) _dst + u64s - 1;
699	u64 *src = (u64 *) _src + u64s - 1;
700
701#ifdef CONFIG_X86_64
702	long d0, d1, d2;
703
704	asm volatile("std ;\n"
705		     "rep ; movsq\n"
706		     "cld ;\n"
707		     : "=&c" (d0), "=&D" (d1), "=&S" (d2)
708		     : "0" (u64s), "1" (dst), "2" (src)
709		     : "memory");
710#else
711	while (u64s--)
712		*dst-- = *src--;
713#endif
714}
715
716static inline void memmove_u64s_up(void *dst, const void *src,
717				   unsigned u64s)
718{
719	EBUG_ON(dst < src);
720
721	__memmove_u64s_up(dst, src, u64s);
722}
723
724static inline void memmove_u64s(void *dst, const void *src,
725				unsigned u64s)
726{
727	if (dst < src)
728		__memmove_u64s_down(dst, src, u64s);
729	else
730		__memmove_u64s_up(dst, src, u64s);
731}
732
733/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
734static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
735{
736	unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
737
738	memset(s + bytes, c, rem);
739}
740
741void sort_cmp_size(void *base, size_t num, size_t size,
742	  int (*cmp_func)(const void *, const void *, size_t),
743	  void (*swap_func)(void *, void *, size_t));
744
745/* just the memmove, doesn't update @_nr */
746#define __array_insert_item(_array, _nr, _pos)				\
747	memmove(&(_array)[(_pos) + 1],					\
748		&(_array)[(_pos)],					\
749		sizeof((_array)[0]) * ((_nr) - (_pos)))
750
751#define array_insert_item(_array, _nr, _pos, _new_item)			\
752do {									\
753	__array_insert_item(_array, _nr, _pos);				\
754	(_nr)++;							\
755	(_array)[(_pos)] = (_new_item);					\
756} while (0)
757
758#define array_remove_items(_array, _nr, _pos, _nr_to_remove)		\
759do {									\
760	(_nr) -= (_nr_to_remove);					\
761	memmove(&(_array)[(_pos)],					\
762		&(_array)[(_pos) + (_nr_to_remove)],			\
763		sizeof((_array)[0]) * ((_nr) - (_pos)));		\
764} while (0)
765
766#define array_remove_item(_array, _nr, _pos)				\
767	array_remove_items(_array, _nr, _pos, 1)
768
769static inline void __move_gap(void *array, size_t element_size,
770			      size_t nr, size_t size,
771			      size_t old_gap, size_t new_gap)
772{
773	size_t gap_end = old_gap + size - nr;
774
775	if (new_gap < old_gap) {
776		size_t move = old_gap - new_gap;
777
778		memmove(array + element_size * (gap_end - move),
779			array + element_size * (old_gap - move),
780				element_size * move);
781	} else if (new_gap > old_gap) {
782		size_t move = new_gap - old_gap;
783
784		memmove(array + element_size * old_gap,
785			array + element_size * gap_end,
786				element_size * move);
787	}
788}
789
790/* Move the gap in a gap buffer: */
791#define move_gap(_array, _nr, _size, _old_gap, _new_gap)	\
792	__move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
793
794#define bubble_sort(_base, _nr, _cmp)					\
795do {									\
796	ssize_t _i, _last;						\
797	bool _swapped = true;						\
798									\
799	for (_last= (ssize_t) (_nr) - 1; _last > 0 && _swapped; --_last) {\
800		_swapped = false;					\
801		for (_i = 0; _i < _last; _i++)				\
802			if (_cmp((_base)[_i], (_base)[_i + 1]) > 0) {	\
803				swap((_base)[_i], (_base)[_i + 1]);	\
804				_swapped = true;			\
805			}						\
806	}								\
807} while (0)
808
809static inline u64 percpu_u64_get(u64 __percpu *src)
810{
811	u64 ret = 0;
812	int cpu;
813
814	for_each_possible_cpu(cpu)
815		ret += *per_cpu_ptr(src, cpu);
816	return ret;
817}
818
819static inline void percpu_u64_set(u64 __percpu *dst, u64 src)
820{
821	int cpu;
822
823	for_each_possible_cpu(cpu)
824		*per_cpu_ptr(dst, cpu) = 0;
825	this_cpu_write(*dst, src);
826}
827
828static inline void acc_u64s(u64 *acc, const u64 *src, unsigned nr)
829{
830	unsigned i;
831
832	for (i = 0; i < nr; i++)
833		acc[i] += src[i];
834}
835
836static inline void acc_u64s_percpu(u64 *acc, const u64 __percpu *src,
837				   unsigned nr)
838{
839	int cpu;
840
841	for_each_possible_cpu(cpu)
842		acc_u64s(acc, per_cpu_ptr(src, cpu), nr);
843}
844
845static inline void percpu_memset(void __percpu *p, int c, size_t bytes)
846{
847	int cpu;
848
849	for_each_possible_cpu(cpu)
850		memset(per_cpu_ptr(p, cpu), c, bytes);
851}
852
853u64 *bch2_acc_percpu_u64s(u64 __percpu *, unsigned);
854
855#define cmp_int(l, r)		((l > r) - (l < r))
856
857static inline int u8_cmp(u8 l, u8 r)
858{
859	return cmp_int(l, r);
860}
861
862static inline int cmp_le32(__le32 l, __le32 r)
863{
864	return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
865}
866
867#include <linux/uuid.h>
868
869#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
870
871static inline bool qstr_eq(const struct qstr l, const struct qstr r)
872{
873	return l.len == r.len && !memcmp(l.name, r.name, l.len);
874}
875
876void bch2_darray_str_exit(darray_str *);
877int bch2_split_devs(const char *, darray_str *);
878
879#endif /* _BCACHEFS_UTIL_H */