Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  4 *
  5 * Based on former do_div() implementation from asm-parisc/div64.h:
  6 *	Copyright (C) 1999 Hewlett-Packard Co
  7 *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *
 10 * Generic C version of 64bit/32bit division and modulo, with
 11 * 64bit result and 32bit remainder.
 12 *
 13 * The fast case for (n>>32 == 0) is handled inline by do_div().
 14 *
 15 * Code generated for this function might be very inefficient
 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
 17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
 18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
 19 */
 20
 21#include <linux/bitops.h>
 22#include <linux/export.h>
 23#include <linux/math.h>
 24#include <linux/math64.h>
 25#include <linux/minmax.h>
 26#include <linux/log2.h>
 27
 28/* Not needed on 64bit architectures */
 29#if BITS_PER_LONG == 32
 30
 31#ifndef __div64_32
 32uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 33{
 34	uint64_t rem = *n;
 35	uint64_t b = base;
 36	uint64_t res, d = 1;
 37	uint32_t high = rem >> 32;
 38
 39	/* Reduce the thing a bit first */
 40	res = 0;
 41	if (high >= base) {
 42		high /= base;
 43		res = (uint64_t) high << 32;
 44		rem -= (uint64_t) (high*base) << 32;
 45	}
 46
 47	while ((int64_t)b > 0 && b < rem) {
 48		b = b+b;
 49		d = d+d;
 50	}
 51
 52	do {
 53		if (rem >= b) {
 54			rem -= b;
 55			res += d;
 56		}
 57		b >>= 1;
 58		d >>= 1;
 59	} while (d);
 60
 61	*n = res;
 62	return rem;
 63}
 64EXPORT_SYMBOL(__div64_32);
 65#endif
 66
 
 
 
 
 
 
 67#ifndef div_s64_rem
 68s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 69{
 70	u64 quotient;
 71
 72	if (dividend < 0) {
 73		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
 74		*remainder = -*remainder;
 75		if (divisor > 0)
 76			quotient = -quotient;
 77	} else {
 78		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
 79		if (divisor < 0)
 80			quotient = -quotient;
 81	}
 82	return quotient;
 83}
 84EXPORT_SYMBOL(div_s64_rem);
 85#endif
 86
 87/*
 88 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
 89 * @dividend:	64bit dividend
 90 * @divisor:	64bit divisor
 91 * @remainder:  64bit remainder
 92 *
 93 * This implementation is a comparable to algorithm used by div64_u64.
 94 * But this operation, which includes math for calculating the remainder,
 95 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
 96 * systems.
 97 */
 98#ifndef div64_u64_rem
 99u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
100{
101	u32 high = divisor >> 32;
102	u64 quot;
103
104	if (high == 0) {
105		u32 rem32;
106		quot = div_u64_rem(dividend, divisor, &rem32);
107		*remainder = rem32;
108	} else {
109		int n = fls(high);
110		quot = div_u64(dividend >> n, divisor >> n);
111
112		if (quot != 0)
113			quot--;
114
115		*remainder = dividend - quot * divisor;
116		if (*remainder >= divisor) {
117			quot++;
118			*remainder -= divisor;
119		}
120	}
121
122	return quot;
123}
124EXPORT_SYMBOL(div64_u64_rem);
125#endif
126
127/*
128 * div64_u64 - unsigned 64bit divide with 64bit divisor
129 * @dividend:	64bit dividend
130 * @divisor:	64bit divisor
131 *
132 * This implementation is a modified version of the algorithm proposed
133 * by the book 'Hacker's Delight'.  The original source and full proof
134 * can be found here and is available for use without restriction.
135 *
136 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
137 */
138#ifndef div64_u64
139u64 div64_u64(u64 dividend, u64 divisor)
140{
141	u32 high = divisor >> 32;
142	u64 quot;
143
144	if (high == 0) {
145		quot = div_u64(dividend, divisor);
146	} else {
147		int n = fls(high);
148		quot = div_u64(dividend >> n, divisor >> n);
149
150		if (quot != 0)
151			quot--;
152		if ((dividend - quot * divisor) >= divisor)
153			quot++;
154	}
155
156	return quot;
157}
158EXPORT_SYMBOL(div64_u64);
159#endif
160
 
 
 
 
 
161#ifndef div64_s64
162s64 div64_s64(s64 dividend, s64 divisor)
163{
164	s64 quot, t;
165
166	quot = div64_u64(abs(dividend), abs(divisor));
167	t = (dividend ^ divisor) >> 63;
168
169	return (quot ^ t) - t;
170}
171EXPORT_SYMBOL(div64_s64);
172#endif
173
174#endif /* BITS_PER_LONG == 32 */
175
176/*
177 * Iterative div/mod for use when dividend is not expected to be much
178 * bigger than divisor.
179 */
180u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
181{
182	return __iter_div_u64_rem(dividend, divisor, remainder);
183}
184EXPORT_SYMBOL(iter_div_u64_rem);
185
186#ifndef mul_u64_u64_div_u64
187u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
188{
189	if (ilog2(a) + ilog2(b) <= 62)
190		return div64_u64(a * b, c);
191
192#if defined(__SIZEOF_INT128__)
193
194	/* native 64x64=128 bits multiplication */
195	u128 prod = (u128)a * b;
196	u64 n_lo = prod, n_hi = prod >> 64;
197
198#else
199
200	/* perform a 64x64=128 bits multiplication manually */
201	u32 a_lo = a, a_hi = a >> 32, b_lo = b, b_hi = b >> 32;
202	u64 x, y, z;
203
204	x = (u64)a_lo * b_lo;
205	y = (u64)a_lo * b_hi + (u32)(x >> 32);
206	z = (u64)a_hi * b_hi + (u32)(y >> 32);
207	y = (u64)a_hi * b_lo + (u32)y;
208	z += (u32)(y >> 32);
209	x = (y << 32) + (u32)x;
210
211	u64 n_lo = x, n_hi = z;
212
213#endif
214
215	/* make sure c is not zero, trigger exception otherwise */
216#pragma GCC diagnostic push
217#pragma GCC diagnostic ignored "-Wdiv-by-zero"
218	if (unlikely(c == 0))
219		return 1/0;
220#pragma GCC diagnostic pop
221
222	int shift = __builtin_ctzll(c);
223
224	/* try reducing the fraction in case the dividend becomes <= 64 bits */
225	if ((n_hi >> shift) == 0) {
226		u64 n = shift ? (n_lo >> shift) | (n_hi << (64 - shift)) : n_lo;
227
228		return div64_u64(n, c >> shift);
229		/*
230		 * The remainder value if needed would be:
231		 *   res = div64_u64_rem(n, c >> shift, &rem);
232		 *   rem = (rem << shift) + (n_lo - (n << shift));
 
 
 
 
 
 
 
 
 
 
233		 */
234	}
235
236	if (n_hi >= c) {
237		/* overflow: result is unrepresentable in a u64 */
238		return -1;
239	}
240
241	/* Do the full 128 by 64 bits division */
242
243	shift = __builtin_clzll(c);
244	c <<= shift;
245
246	int p = 64 + shift;
247	u64 res = 0;
248	bool carry;
249
250	do {
251		carry = n_hi >> 63;
252		shift = carry ? 1 : __builtin_clzll(n_hi);
253		if (p < shift)
254			break;
255		p -= shift;
256		n_hi <<= shift;
257		n_hi |= n_lo >> (64 - shift);
258		n_lo <<= shift;
259		if (carry || (n_hi >= c)) {
260			n_hi -= c;
261			res |= 1ULL << p;
262		}
263	} while (n_hi);
264	/* The remainder value if needed would be n_hi << p */
265
266	return res;
267}
268EXPORT_SYMBOL(mul_u64_u64_div_u64);
269#endif
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  4 *
  5 * Based on former do_div() implementation from asm-parisc/div64.h:
  6 *	Copyright (C) 1999 Hewlett-Packard Co
  7 *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *
 10 * Generic C version of 64bit/32bit division and modulo, with
 11 * 64bit result and 32bit remainder.
 12 *
 13 * The fast case for (n>>32 == 0) is handled inline by do_div().
 14 *
 15 * Code generated for this function might be very inefficient
 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
 17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
 18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
 19 */
 20
 
 21#include <linux/export.h>
 22#include <linux/kernel.h>
 23#include <linux/math64.h>
 
 
 24
 25/* Not needed on 64bit architectures */
 26#if BITS_PER_LONG == 32
 27
 28#ifndef __div64_32
 29uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 30{
 31	uint64_t rem = *n;
 32	uint64_t b = base;
 33	uint64_t res, d = 1;
 34	uint32_t high = rem >> 32;
 35
 36	/* Reduce the thing a bit first */
 37	res = 0;
 38	if (high >= base) {
 39		high /= base;
 40		res = (uint64_t) high << 32;
 41		rem -= (uint64_t) (high*base) << 32;
 42	}
 43
 44	while ((int64_t)b > 0 && b < rem) {
 45		b = b+b;
 46		d = d+d;
 47	}
 48
 49	do {
 50		if (rem >= b) {
 51			rem -= b;
 52			res += d;
 53		}
 54		b >>= 1;
 55		d >>= 1;
 56	} while (d);
 57
 58	*n = res;
 59	return rem;
 60}
 61EXPORT_SYMBOL(__div64_32);
 62#endif
 63
 64/**
 65 * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
 66 * @dividend:	64bit dividend
 67 * @divisor:	64bit divisor
 68 * @remainder:  64bit remainder
 69 */
 70#ifndef div_s64_rem
 71s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 72{
 73	u64 quotient;
 74
 75	if (dividend < 0) {
 76		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
 77		*remainder = -*remainder;
 78		if (divisor > 0)
 79			quotient = -quotient;
 80	} else {
 81		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
 82		if (divisor < 0)
 83			quotient = -quotient;
 84	}
 85	return quotient;
 86}
 87EXPORT_SYMBOL(div_s64_rem);
 88#endif
 89
 90/**
 91 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
 92 * @dividend:	64bit dividend
 93 * @divisor:	64bit divisor
 94 * @remainder:  64bit remainder
 95 *
 96 * This implementation is a comparable to algorithm used by div64_u64.
 97 * But this operation, which includes math for calculating the remainder,
 98 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
 99 * systems.
100 */
101#ifndef div64_u64_rem
102u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
103{
104	u32 high = divisor >> 32;
105	u64 quot;
106
107	if (high == 0) {
108		u32 rem32;
109		quot = div_u64_rem(dividend, divisor, &rem32);
110		*remainder = rem32;
111	} else {
112		int n = fls(high);
113		quot = div_u64(dividend >> n, divisor >> n);
114
115		if (quot != 0)
116			quot--;
117
118		*remainder = dividend - quot * divisor;
119		if (*remainder >= divisor) {
120			quot++;
121			*remainder -= divisor;
122		}
123	}
124
125	return quot;
126}
127EXPORT_SYMBOL(div64_u64_rem);
128#endif
129
130/**
131 * div64_u64 - unsigned 64bit divide with 64bit divisor
132 * @dividend:	64bit dividend
133 * @divisor:	64bit divisor
134 *
135 * This implementation is a modified version of the algorithm proposed
136 * by the book 'Hacker's Delight'.  The original source and full proof
137 * can be found here and is available for use without restriction.
138 *
139 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
140 */
141#ifndef div64_u64
142u64 div64_u64(u64 dividend, u64 divisor)
143{
144	u32 high = divisor >> 32;
145	u64 quot;
146
147	if (high == 0) {
148		quot = div_u64(dividend, divisor);
149	} else {
150		int n = fls(high);
151		quot = div_u64(dividend >> n, divisor >> n);
152
153		if (quot != 0)
154			quot--;
155		if ((dividend - quot * divisor) >= divisor)
156			quot++;
157	}
158
159	return quot;
160}
161EXPORT_SYMBOL(div64_u64);
162#endif
163
164/**
165 * div64_s64 - signed 64bit divide with 64bit divisor
166 * @dividend:	64bit dividend
167 * @divisor:	64bit divisor
168 */
169#ifndef div64_s64
170s64 div64_s64(s64 dividend, s64 divisor)
171{
172	s64 quot, t;
173
174	quot = div64_u64(abs(dividend), abs(divisor));
175	t = (dividend ^ divisor) >> 63;
176
177	return (quot ^ t) - t;
178}
179EXPORT_SYMBOL(div64_s64);
180#endif
181
182#endif /* BITS_PER_LONG == 32 */
183
184/*
185 * Iterative div/mod for use when dividend is not expected to be much
186 * bigger than divisor.
187 */
188u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
189{
190	return __iter_div_u64_rem(dividend, divisor, remainder);
191}
192EXPORT_SYMBOL(iter_div_u64_rem);
193
194#ifndef mul_u64_u64_div_u64
195u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
196{
197	u64 res = 0, div, rem;
198	int shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
200	/* can a * b overflow ? */
201	if (ilog2(a) + ilog2(b) > 62) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202		/*
203		 * (b * a) / c is equal to
204		 *
205		 *      (b / c) * a +
206		 *      (b % c) * a / c
207		 *
208		 * if nothing overflows. Can the 1st multiplication
209		 * overflow? Yes, but we do not care: this can only
210		 * happen if the end result can't fit in u64 anyway.
211		 *
212		 * So the code below does
213		 *
214		 *      res = (b / c) * a;
215		 *      b = b % c;
216		 */
217		div = div64_u64_rem(b, c, &rem);
218		res = div * a;
219		b = rem;
220
221		shift = ilog2(a) + ilog2(b) - 62;
222		if (shift > 0) {
223			/* drop precision */
224			b >>= shift;
225			c >>= shift;
226			if (!c)
227				return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228		}
229	}
 
230
231	return res + div64_u64(a * b, c);
232}
 
233#endif