Linux Audio

Check our new training course

Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_ARM_DIV64
  3#define __ASM_ARM_DIV64
  4
  5#include <linux/types.h>
  6#include <asm/compiler.h>
  7
  8/*
  9 * The semantics of __div64_32() are:
 10 *
 11 * uint32_t __div64_32(uint64_t *n, uint32_t base)
 12 * {
 13 * 	uint32_t remainder = *n % base;
 14 * 	*n = *n / base;
 15 * 	return remainder;
 16 * }
 17 *
 18 * In other words, a 64-bit dividend with a 32-bit divisor producing
 19 * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
 20 * we override the generic version in lib/div64.c to call our __do_div64
 21 * assembly implementation with completely non standard calling convention
 22 * for arguments and results (beware).
 23 */
 24static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
 25{
 26	register unsigned int __base      asm("r4") = base;
 27	register unsigned long long __n   asm("r0") = *n;
 28	register unsigned long long __res asm("r2");
 29	unsigned int __rem;
 30	asm(	__asmeq("%0", "r0")
 31		__asmeq("%1", "r2")
 32		__asmeq("%2", "r4")
 33		"bl	__do_div64"
 34		: "+r" (__n), "=r" (__res)
 35		: "r" (__base)
 36		: "ip", "lr", "cc");
 37	__rem = __n >> 32;
 38	*n = __res;
 39	return __rem;
 40}
 41#define __div64_32 __div64_32
 42
 43#if !defined(CONFIG_AEABI)
 44
 45/*
 46 * In OABI configurations, some uses of the do_div function
 47 * cause gcc to run out of registers. To work around that,
 48 * we can force the use of the out-of-line version for
 49 * configurations that build a OABI kernel.
 50 */
 51#define do_div(n, base) __div64_32(&(n), base)
 52
 53#else
 54
 55static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
 
 
 
 
 
 56{
 57	unsigned long long res;
 58	register unsigned int tmp asm("ip") = 0;
 
 
 59
 60	if (!bias) {
 61		asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"
 62			"mov	%Q0, #0"
 63			: "=&r" (res)
 64			: "r" (m), "r" (n)
 65			: "cc");
 66	} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
 67		res = m;
 68		asm (	"umlal	%Q0, %R0, %Q1, %Q2\n\t"
 69			"mov	%Q0, #0"
 70			: "+&r" (res)
 71			: "r" (m), "r" (n)
 72			: "cc");
 73	} else {
 74		asm (	"umull	%Q0, %R0, %Q2, %Q3\n\t"
 75			"cmn	%Q0, %Q2\n\t"
 76			"adcs	%R0, %R0, %R2\n\t"
 77			"adc	%Q0, %1, #0"
 78			: "=&r" (res), "+&r" (tmp)
 79			: "r" (m), "r" (n)
 80			: "cc");
 81	}
 82
 83	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
 84		asm (	"umlal	%R0, %Q0, %R1, %Q2\n\t"
 85			"umlal	%R0, %Q0, %Q1, %R2\n\t"
 86			"mov	%R0, #0\n\t"
 87			"umlal	%Q0, %R0, %R1, %R2"
 88			: "+&r" (res)
 89			: "r" (m), "r" (n)
 90			: "cc");
 91	} else {
 92		asm (	"umlal	%R0, %Q0, %R2, %Q3\n\t"
 93			"umlal	%R0, %1, %Q2, %R3\n\t"
 94			"mov	%R0, #0\n\t"
 95			"adds	%Q0, %1, %Q0\n\t"
 96			"adc	%R0, %R0, #0\n\t"
 97			"umlal	%Q0, %R0, %R2, %R3"
 98			: "+&r" (res), "+&r" (tmp)
 99			: "r" (m), "r" (n)
100			: "cc");
101	}
102
103	return res;
104}
105#define __arch_xprod_64 __arch_xprod_64
106
107#include <asm-generic/div64.h>
108
109#endif
110
111#endif
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_ARM_DIV64
  3#define __ASM_ARM_DIV64
  4
  5#include <linux/types.h>
  6#include <asm/compiler.h>
  7
  8/*
  9 * The semantics of __div64_32() are:
 10 *
 11 * uint32_t __div64_32(uint64_t *n, uint32_t base)
 12 * {
 13 * 	uint32_t remainder = *n % base;
 14 * 	*n = *n / base;
 15 * 	return remainder;
 16 * }
 17 *
 18 * In other words, a 64-bit dividend with a 32-bit divisor producing
 19 * a 64-bit result and a 32-bit remainder.  To accomplish this optimally
 20 * we override the generic version in lib/div64.c to call our __do_div64
 21 * assembly implementation with completely non standard calling convention
 22 * for arguments and results (beware).
 23 */
 24static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
 25{
 26	register unsigned int __base      asm("r4") = base;
 27	register unsigned long long __n   asm("r0") = *n;
 28	register unsigned long long __res asm("r2");
 29	unsigned int __rem;
 30	asm(	__asmeq("%0", "r0")
 31		__asmeq("%1", "r2")
 32		__asmeq("%2", "r4")
 33		"bl	__do_div64"
 34		: "+r" (__n), "=r" (__res)
 35		: "r" (__base)
 36		: "ip", "lr", "cc");
 37	__rem = __n >> 32;
 38	*n = __res;
 39	return __rem;
 40}
 41#define __div64_32 __div64_32
 42
 43#if !defined(CONFIG_AEABI)
 44
 45/*
 46 * In OABI configurations, some uses of the do_div function
 47 * cause gcc to run out of registers. To work around that,
 48 * we can force the use of the out-of-line version for
 49 * configurations that build a OABI kernel.
 50 */
 51#define do_div(n, base) __div64_32(&(n), base)
 52
 53#else
 54
 55#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
 56static __always_inline
 57#else
 58static inline
 59#endif
 60uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
 61{
 62	unsigned long long res;
 63	register unsigned int tmp asm("ip") = 0;
 64	bool no_ovf = __builtin_constant_p(m) &&
 65		      ((m >> 32) + (m & 0xffffffff) < 0x100000000);
 66
 67	if (!bias) {
 68		asm (	"umull	%Q0, %R0, %Q1, %Q2\n\t"
 69			"mov	%Q0, #0"
 70			: "=&r" (res)
 71			: "r" (m), "r" (n)
 72			: "cc");
 73	} else if (no_ovf) {
 74		res = m;
 75		asm (	"umlal	%Q0, %R0, %Q1, %Q2\n\t"
 76			"mov	%Q0, #0"
 77			: "+&r" (res)
 78			: "r" (m), "r" (n)
 79			: "cc");
 80	} else {
 81		asm (	"umull	%Q0, %R0, %Q2, %Q3\n\t"
 82			"cmn	%Q0, %Q2\n\t"
 83			"adcs	%R0, %R0, %R2\n\t"
 84			"adc	%Q0, %1, #0"
 85			: "=&r" (res), "+&r" (tmp)
 86			: "r" (m), "r" (n)
 87			: "cc");
 88	}
 89
 90	if (no_ovf) {
 91		asm (	"umlal	%R0, %Q0, %R1, %Q2\n\t"
 92			"umlal	%R0, %Q0, %Q1, %R2\n\t"
 93			"mov	%R0, #0\n\t"
 94			"umlal	%Q0, %R0, %R1, %R2"
 95			: "+&r" (res)
 96			: "r" (m), "r" (n)
 97			: "cc");
 98	} else {
 99		asm (	"umlal	%R0, %Q0, %R2, %Q3\n\t"
100			"umlal	%R0, %1, %Q2, %R3\n\t"
101			"mov	%R0, #0\n\t"
102			"adds	%Q0, %1, %Q0\n\t"
103			"adc	%R0, %R0, #0\n\t"
104			"umlal	%Q0, %R0, %R2, %R3"
105			: "+&r" (res), "+&r" (tmp)
106			: "r" (m), "r" (n)
107			: "cc");
108	}
109
110	return res;
111}
112#define __arch_xprod_64 __arch_xprod_64
113
114#include <asm-generic/div64.h>
115
116#endif
117
118#endif