Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*---------------------------------------------------------------------------+
3 | mul_Xsig.S |
4 | |
5 | Multiply a 12 byte fixed point number by another fixed point number. |
6 | |
7 | Copyright (C) 1992,1994,1995 |
8 | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
9 | Australia. E-mail billm@jacobi.maths.monash.edu.au |
10 | |
11 | Call from C as: |
12 | void mul32_Xsig(Xsig *x, unsigned b) |
13 | |
14 | void mul64_Xsig(Xsig *x, unsigned long long *b) |
15 | |
16 | void mul_Xsig_Xsig(Xsig *x, unsigned *b) |
17 | |
18 | The result is neither rounded nor normalized, and the ls bit or so may |
19 | be wrong. |
20 | |
21 +---------------------------------------------------------------------------*/
22 .file "mul_Xsig.S"
23
24
25#include "fpu_emu.h"
26
27.text
28SYM_FUNC_START(mul32_Xsig)
29 pushl %ebp
30 movl %esp,%ebp
31 subl $16,%esp
32 pushl %esi
33
34 movl PARAM1,%esi
35 movl PARAM2,%ecx
36
37 xor %eax,%eax
38 movl %eax,-4(%ebp)
39 movl %eax,-8(%ebp)
40
41 movl (%esi),%eax /* lsl of Xsig */
42 mull %ecx /* msl of b */
43 movl %edx,-12(%ebp)
44
45 movl 4(%esi),%eax /* midl of Xsig */
46 mull %ecx /* msl of b */
47 addl %eax,-12(%ebp)
48 adcl %edx,-8(%ebp)
49 adcl $0,-4(%ebp)
50
51 movl 8(%esi),%eax /* msl of Xsig */
52 mull %ecx /* msl of b */
53 addl %eax,-8(%ebp)
54 adcl %edx,-4(%ebp)
55
56 movl -12(%ebp),%eax
57 movl %eax,(%esi)
58 movl -8(%ebp),%eax
59 movl %eax,4(%esi)
60 movl -4(%ebp),%eax
61 movl %eax,8(%esi)
62
63 popl %esi
64 leave
65 RET
66SYM_FUNC_END(mul32_Xsig)
67
68
69SYM_FUNC_START(mul64_Xsig)
70 pushl %ebp
71 movl %esp,%ebp
72 subl $16,%esp
73 pushl %esi
74
75 movl PARAM1,%esi
76 movl PARAM2,%ecx
77
78 xor %eax,%eax
79 movl %eax,-4(%ebp)
80 movl %eax,-8(%ebp)
81
82 movl (%esi),%eax /* lsl of Xsig */
83 mull 4(%ecx) /* msl of b */
84 movl %edx,-12(%ebp)
85
86 movl 4(%esi),%eax /* midl of Xsig */
87 mull (%ecx) /* lsl of b */
88 addl %edx,-12(%ebp)
89 adcl $0,-8(%ebp)
90 adcl $0,-4(%ebp)
91
92 movl 4(%esi),%eax /* midl of Xsig */
93 mull 4(%ecx) /* msl of b */
94 addl %eax,-12(%ebp)
95 adcl %edx,-8(%ebp)
96 adcl $0,-4(%ebp)
97
98 movl 8(%esi),%eax /* msl of Xsig */
99 mull (%ecx) /* lsl of b */
100 addl %eax,-12(%ebp)
101 adcl %edx,-8(%ebp)
102 adcl $0,-4(%ebp)
103
104 movl 8(%esi),%eax /* msl of Xsig */
105 mull 4(%ecx) /* msl of b */
106 addl %eax,-8(%ebp)
107 adcl %edx,-4(%ebp)
108
109 movl -12(%ebp),%eax
110 movl %eax,(%esi)
111 movl -8(%ebp),%eax
112 movl %eax,4(%esi)
113 movl -4(%ebp),%eax
114 movl %eax,8(%esi)
115
116 popl %esi
117 leave
118 RET
119SYM_FUNC_END(mul64_Xsig)
120
121
122
123SYM_FUNC_START(mul_Xsig_Xsig)
124 pushl %ebp
125 movl %esp,%ebp
126 subl $16,%esp
127 pushl %esi
128
129 movl PARAM1,%esi
130 movl PARAM2,%ecx
131
132 xor %eax,%eax
133 movl %eax,-4(%ebp)
134 movl %eax,-8(%ebp)
135
136 movl (%esi),%eax /* lsl of Xsig */
137 mull 8(%ecx) /* msl of b */
138 movl %edx,-12(%ebp)
139
140 movl 4(%esi),%eax /* midl of Xsig */
141 mull 4(%ecx) /* midl of b */
142 addl %edx,-12(%ebp)
143 adcl $0,-8(%ebp)
144 adcl $0,-4(%ebp)
145
146 movl 8(%esi),%eax /* msl of Xsig */
147 mull (%ecx) /* lsl of b */
148 addl %edx,-12(%ebp)
149 adcl $0,-8(%ebp)
150 adcl $0,-4(%ebp)
151
152 movl 4(%esi),%eax /* midl of Xsig */
153 mull 8(%ecx) /* msl of b */
154 addl %eax,-12(%ebp)
155 adcl %edx,-8(%ebp)
156 adcl $0,-4(%ebp)
157
158 movl 8(%esi),%eax /* msl of Xsig */
159 mull 4(%ecx) /* midl of b */
160 addl %eax,-12(%ebp)
161 adcl %edx,-8(%ebp)
162 adcl $0,-4(%ebp)
163
164 movl 8(%esi),%eax /* msl of Xsig */
165 mull 8(%ecx) /* msl of b */
166 addl %eax,-8(%ebp)
167 adcl %edx,-4(%ebp)
168
169 movl -12(%ebp),%edx
170 movl %edx,(%esi)
171 movl -8(%ebp),%edx
172 movl %edx,4(%esi)
173 movl -4(%ebp),%edx
174 movl %edx,8(%esi)
175
176 popl %esi
177 leave
178 RET
179SYM_FUNC_END(mul_Xsig_Xsig)
1/*---------------------------------------------------------------------------+
2 | mul_Xsig.S |
3 | |
4 | Multiply a 12 byte fixed point number by another fixed point number. |
5 | |
6 | Copyright (C) 1992,1994,1995 |
7 | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
8 | Australia. E-mail billm@jacobi.maths.monash.edu.au |
9 | |
10 | Call from C as: |
11 | void mul32_Xsig(Xsig *x, unsigned b) |
12 | |
13 | void mul64_Xsig(Xsig *x, unsigned long long *b) |
14 | |
15 | void mul_Xsig_Xsig(Xsig *x, unsigned *b) |
16 | |
17 | The result is neither rounded nor normalized, and the ls bit or so may |
18 | be wrong. |
19 | |
20 +---------------------------------------------------------------------------*/
21 .file "mul_Xsig.S"
22
23
24#include "fpu_emu.h"
25
26.text
27ENTRY(mul32_Xsig)
28 pushl %ebp
29 movl %esp,%ebp
30 subl $16,%esp
31 pushl %esi
32
33 movl PARAM1,%esi
34 movl PARAM2,%ecx
35
36 xor %eax,%eax
37 movl %eax,-4(%ebp)
38 movl %eax,-8(%ebp)
39
40 movl (%esi),%eax /* lsl of Xsig */
41 mull %ecx /* msl of b */
42 movl %edx,-12(%ebp)
43
44 movl 4(%esi),%eax /* midl of Xsig */
45 mull %ecx /* msl of b */
46 addl %eax,-12(%ebp)
47 adcl %edx,-8(%ebp)
48 adcl $0,-4(%ebp)
49
50 movl 8(%esi),%eax /* msl of Xsig */
51 mull %ecx /* msl of b */
52 addl %eax,-8(%ebp)
53 adcl %edx,-4(%ebp)
54
55 movl -12(%ebp),%eax
56 movl %eax,(%esi)
57 movl -8(%ebp),%eax
58 movl %eax,4(%esi)
59 movl -4(%ebp),%eax
60 movl %eax,8(%esi)
61
62 popl %esi
63 leave
64 ret
65
66
67ENTRY(mul64_Xsig)
68 pushl %ebp
69 movl %esp,%ebp
70 subl $16,%esp
71 pushl %esi
72
73 movl PARAM1,%esi
74 movl PARAM2,%ecx
75
76 xor %eax,%eax
77 movl %eax,-4(%ebp)
78 movl %eax,-8(%ebp)
79
80 movl (%esi),%eax /* lsl of Xsig */
81 mull 4(%ecx) /* msl of b */
82 movl %edx,-12(%ebp)
83
84 movl 4(%esi),%eax /* midl of Xsig */
85 mull (%ecx) /* lsl of b */
86 addl %edx,-12(%ebp)
87 adcl $0,-8(%ebp)
88 adcl $0,-4(%ebp)
89
90 movl 4(%esi),%eax /* midl of Xsig */
91 mull 4(%ecx) /* msl of b */
92 addl %eax,-12(%ebp)
93 adcl %edx,-8(%ebp)
94 adcl $0,-4(%ebp)
95
96 movl 8(%esi),%eax /* msl of Xsig */
97 mull (%ecx) /* lsl of b */
98 addl %eax,-12(%ebp)
99 adcl %edx,-8(%ebp)
100 adcl $0,-4(%ebp)
101
102 movl 8(%esi),%eax /* msl of Xsig */
103 mull 4(%ecx) /* msl of b */
104 addl %eax,-8(%ebp)
105 adcl %edx,-4(%ebp)
106
107 movl -12(%ebp),%eax
108 movl %eax,(%esi)
109 movl -8(%ebp),%eax
110 movl %eax,4(%esi)
111 movl -4(%ebp),%eax
112 movl %eax,8(%esi)
113
114 popl %esi
115 leave
116 ret
117
118
119
120ENTRY(mul_Xsig_Xsig)
121 pushl %ebp
122 movl %esp,%ebp
123 subl $16,%esp
124 pushl %esi
125
126 movl PARAM1,%esi
127 movl PARAM2,%ecx
128
129 xor %eax,%eax
130 movl %eax,-4(%ebp)
131 movl %eax,-8(%ebp)
132
133 movl (%esi),%eax /* lsl of Xsig */
134 mull 8(%ecx) /* msl of b */
135 movl %edx,-12(%ebp)
136
137 movl 4(%esi),%eax /* midl of Xsig */
138 mull 4(%ecx) /* midl of b */
139 addl %edx,-12(%ebp)
140 adcl $0,-8(%ebp)
141 adcl $0,-4(%ebp)
142
143 movl 8(%esi),%eax /* msl of Xsig */
144 mull (%ecx) /* lsl of b */
145 addl %edx,-12(%ebp)
146 adcl $0,-8(%ebp)
147 adcl $0,-4(%ebp)
148
149 movl 4(%esi),%eax /* midl of Xsig */
150 mull 8(%ecx) /* msl of b */
151 addl %eax,-12(%ebp)
152 adcl %edx,-8(%ebp)
153 adcl $0,-4(%ebp)
154
155 movl 8(%esi),%eax /* msl of Xsig */
156 mull 4(%ecx) /* midl of b */
157 addl %eax,-12(%ebp)
158 adcl %edx,-8(%ebp)
159 adcl $0,-4(%ebp)
160
161 movl 8(%esi),%eax /* msl of Xsig */
162 mull 8(%ecx) /* msl of b */
163 addl %eax,-8(%ebp)
164 adcl %edx,-4(%ebp)
165
166 movl -12(%ebp),%edx
167 movl %edx,(%esi)
168 movl -8(%ebp),%edx
169 movl %edx,4(%esi)
170 movl -4(%ebp),%edx
171 movl %edx,8(%esi)
172
173 popl %esi
174 leave
175 ret
176