Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  4 *
  5 * Based on former do_div() implementation from asm-parisc/div64.h:
  6 *	Copyright (C) 1999 Hewlett-Packard Co
  7 *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *
 10 * Generic C version of 64bit/32bit division and modulo, with
 11 * 64bit result and 32bit remainder.
 12 *
 13 * The fast case for (n>>32 == 0) is handled inline by do_div().
 14 *
 15 * Code generated for this function might be very inefficient
 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
 17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
 18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
 19 */
 20
 
 21#include <linux/export.h>
 22#include <linux/kernel.h>
 23#include <linux/math64.h>
 
 24
 25/* Not needed on 64bit architectures */
 26#if BITS_PER_LONG == 32
 27
 28#ifndef __div64_32
 29uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 30{
 31	uint64_t rem = *n;
 32	uint64_t b = base;
 33	uint64_t res, d = 1;
 34	uint32_t high = rem >> 32;
 35
 36	/* Reduce the thing a bit first */
 37	res = 0;
 38	if (high >= base) {
 39		high /= base;
 40		res = (uint64_t) high << 32;
 41		rem -= (uint64_t) (high*base) << 32;
 42	}
 43
 44	while ((int64_t)b > 0 && b < rem) {
 45		b = b+b;
 46		d = d+d;
 47	}
 48
 49	do {
 50		if (rem >= b) {
 51			rem -= b;
 52			res += d;
 53		}
 54		b >>= 1;
 55		d >>= 1;
 56	} while (d);
 57
 58	*n = res;
 59	return rem;
 60}
 61EXPORT_SYMBOL(__div64_32);
 62#endif
 63
 64/**
 65 * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
 66 * @dividend:	64bit dividend
 67 * @divisor:	64bit divisor
 68 * @remainder:  64bit remainder
 69 */
 70#ifndef div_s64_rem
 71s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 72{
 73	u64 quotient;
 74
 75	if (dividend < 0) {
 76		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
 77		*remainder = -*remainder;
 78		if (divisor > 0)
 79			quotient = -quotient;
 80	} else {
 81		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
 82		if (divisor < 0)
 83			quotient = -quotient;
 84	}
 85	return quotient;
 86}
 87EXPORT_SYMBOL(div_s64_rem);
 88#endif
 89
 90/**
 91 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
 92 * @dividend:	64bit dividend
 93 * @divisor:	64bit divisor
 94 * @remainder:  64bit remainder
 95 *
 96 * This implementation is a comparable to algorithm used by div64_u64.
 97 * But this operation, which includes math for calculating the remainder,
 98 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
 99 * systems.
100 */
101#ifndef div64_u64_rem
102u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
103{
104	u32 high = divisor >> 32;
105	u64 quot;
106
107	if (high == 0) {
108		u32 rem32;
109		quot = div_u64_rem(dividend, divisor, &rem32);
110		*remainder = rem32;
111	} else {
112		int n = fls(high);
113		quot = div_u64(dividend >> n, divisor >> n);
114
115		if (quot != 0)
116			quot--;
117
118		*remainder = dividend - quot * divisor;
119		if (*remainder >= divisor) {
120			quot++;
121			*remainder -= divisor;
122		}
123	}
124
125	return quot;
126}
127EXPORT_SYMBOL(div64_u64_rem);
128#endif
129
130/**
131 * div64_u64 - unsigned 64bit divide with 64bit divisor
132 * @dividend:	64bit dividend
133 * @divisor:	64bit divisor
134 *
135 * This implementation is a modified version of the algorithm proposed
136 * by the book 'Hacker's Delight'.  The original source and full proof
137 * can be found here and is available for use without restriction.
138 *
139 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
140 */
141#ifndef div64_u64
142u64 div64_u64(u64 dividend, u64 divisor)
143{
144	u32 high = divisor >> 32;
145	u64 quot;
146
147	if (high == 0) {
148		quot = div_u64(dividend, divisor);
149	} else {
150		int n = fls(high);
151		quot = div_u64(dividend >> n, divisor >> n);
152
153		if (quot != 0)
154			quot--;
155		if ((dividend - quot * divisor) >= divisor)
156			quot++;
157	}
158
159	return quot;
160}
161EXPORT_SYMBOL(div64_u64);
162#endif
163
164/**
165 * div64_s64 - signed 64bit divide with 64bit divisor
166 * @dividend:	64bit dividend
167 * @divisor:	64bit divisor
168 */
169#ifndef div64_s64
170s64 div64_s64(s64 dividend, s64 divisor)
171{
172	s64 quot, t;
173
174	quot = div64_u64(abs(dividend), abs(divisor));
175	t = (dividend ^ divisor) >> 63;
176
177	return (quot ^ t) - t;
178}
179EXPORT_SYMBOL(div64_s64);
180#endif
181
182#endif /* BITS_PER_LONG == 32 */
183
184/*
185 * Iterative div/mod for use when dividend is not expected to be much
186 * bigger than divisor.
187 */
188u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
189{
190	return __iter_div_u64_rem(dividend, divisor, remainder);
191}
192EXPORT_SYMBOL(iter_div_u64_rem);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
  4 *
  5 * Based on former do_div() implementation from asm-parisc/div64.h:
  6 *	Copyright (C) 1999 Hewlett-Packard Co
  7 *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *
 10 * Generic C version of 64bit/32bit division and modulo, with
 11 * 64bit result and 32bit remainder.
 12 *
 13 * The fast case for (n>>32 == 0) is handled inline by do_div().
 14 *
 15 * Code generated for this function might be very inefficient
 16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
 17 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
 18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
 19 */
 20
 21#include <linux/bitops.h>
 22#include <linux/export.h>
 23#include <linux/math.h>
 24#include <linux/math64.h>
 25#include <linux/log2.h>
 26
 27/* Not needed on 64bit architectures */
 28#if BITS_PER_LONG == 32
 29
 30#ifndef __div64_32
 31uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 32{
 33	uint64_t rem = *n;
 34	uint64_t b = base;
 35	uint64_t res, d = 1;
 36	uint32_t high = rem >> 32;
 37
 38	/* Reduce the thing a bit first */
 39	res = 0;
 40	if (high >= base) {
 41		high /= base;
 42		res = (uint64_t) high << 32;
 43		rem -= (uint64_t) (high*base) << 32;
 44	}
 45
 46	while ((int64_t)b > 0 && b < rem) {
 47		b = b+b;
 48		d = d+d;
 49	}
 50
 51	do {
 52		if (rem >= b) {
 53			rem -= b;
 54			res += d;
 55		}
 56		b >>= 1;
 57		d >>= 1;
 58	} while (d);
 59
 60	*n = res;
 61	return rem;
 62}
 63EXPORT_SYMBOL(__div64_32);
 64#endif
 65
 
 
 
 
 
 
 66#ifndef div_s64_rem
 67s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
 68{
 69	u64 quotient;
 70
 71	if (dividend < 0) {
 72		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
 73		*remainder = -*remainder;
 74		if (divisor > 0)
 75			quotient = -quotient;
 76	} else {
 77		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
 78		if (divisor < 0)
 79			quotient = -quotient;
 80	}
 81	return quotient;
 82}
 83EXPORT_SYMBOL(div_s64_rem);
 84#endif
 85
 86/*
 87 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
 88 * @dividend:	64bit dividend
 89 * @divisor:	64bit divisor
 90 * @remainder:  64bit remainder
 91 *
 92 * This implementation is a comparable to algorithm used by div64_u64.
 93 * But this operation, which includes math for calculating the remainder,
 94 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
 95 * systems.
 96 */
 97#ifndef div64_u64_rem
 98u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
 99{
100	u32 high = divisor >> 32;
101	u64 quot;
102
103	if (high == 0) {
104		u32 rem32;
105		quot = div_u64_rem(dividend, divisor, &rem32);
106		*remainder = rem32;
107	} else {
108		int n = fls(high);
109		quot = div_u64(dividend >> n, divisor >> n);
110
111		if (quot != 0)
112			quot--;
113
114		*remainder = dividend - quot * divisor;
115		if (*remainder >= divisor) {
116			quot++;
117			*remainder -= divisor;
118		}
119	}
120
121	return quot;
122}
123EXPORT_SYMBOL(div64_u64_rem);
124#endif
125
126/*
127 * div64_u64 - unsigned 64bit divide with 64bit divisor
128 * @dividend:	64bit dividend
129 * @divisor:	64bit divisor
130 *
131 * This implementation is a modified version of the algorithm proposed
132 * by the book 'Hacker's Delight'.  The original source and full proof
133 * can be found here and is available for use without restriction.
134 *
135 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
136 */
137#ifndef div64_u64
138u64 div64_u64(u64 dividend, u64 divisor)
139{
140	u32 high = divisor >> 32;
141	u64 quot;
142
143	if (high == 0) {
144		quot = div_u64(dividend, divisor);
145	} else {
146		int n = fls(high);
147		quot = div_u64(dividend >> n, divisor >> n);
148
149		if (quot != 0)
150			quot--;
151		if ((dividend - quot * divisor) >= divisor)
152			quot++;
153	}
154
155	return quot;
156}
157EXPORT_SYMBOL(div64_u64);
158#endif
159
 
 
 
 
 
160#ifndef div64_s64
161s64 div64_s64(s64 dividend, s64 divisor)
162{
163	s64 quot, t;
164
165	quot = div64_u64(abs(dividend), abs(divisor));
166	t = (dividend ^ divisor) >> 63;
167
168	return (quot ^ t) - t;
169}
170EXPORT_SYMBOL(div64_s64);
171#endif
172
173#endif /* BITS_PER_LONG == 32 */
174
175/*
176 * Iterative div/mod for use when dividend is not expected to be much
177 * bigger than divisor.
178 */
179u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
180{
181	return __iter_div_u64_rem(dividend, divisor, remainder);
182}
183EXPORT_SYMBOL(iter_div_u64_rem);
184
185#ifndef mul_u64_u64_div_u64
186u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
187{
188	u64 res = 0, div, rem;
189	int shift;
190
191	/* can a * b overflow ? */
192	if (ilog2(a) + ilog2(b) > 62) {
193		/*
194		 * (b * a) / c is equal to
195		 *
196		 *      (b / c) * a +
197		 *      (b % c) * a / c
198		 *
199		 * if nothing overflows. Can the 1st multiplication
200		 * overflow? Yes, but we do not care: this can only
201		 * happen if the end result can't fit in u64 anyway.
202		 *
203		 * So the code below does
204		 *
205		 *      res = (b / c) * a;
206		 *      b = b % c;
207		 */
208		div = div64_u64_rem(b, c, &rem);
209		res = div * a;
210		b = rem;
211
212		shift = ilog2(a) + ilog2(b) - 62;
213		if (shift > 0) {
214			/* drop precision */
215			b >>= shift;
216			c >>= shift;
217			if (!c)
218				return res;
219		}
220	}
221
222	return res + div64_u64(a * b, c);
223}
224EXPORT_SYMBOL(mul_u64_u64_div_u64);
225#endif