Loading...
1/*---------------------------------------------------------------------------+
2 | round_Xsig.S |
3 | |
4 | Copyright (C) 1992,1993,1994,1995 |
5 | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
6 | Australia. E-mail billm@jacobi.maths.monash.edu.au |
7 | |
8 | Normalize and round a 12 byte quantity. |
9 | Call from C as: |
10 | int round_Xsig(Xsig *n) |
11 | |
12 | Normalize a 12 byte quantity. |
13 | Call from C as: |
14 | int norm_Xsig(Xsig *n) |
15 | |
16 | Each function returns the size of the shift (nr of bits). |
17 | |
18 +---------------------------------------------------------------------------*/
19 .file "round_Xsig.S"
20
21#include "fpu_emu.h"
22
23
24.text
25ENTRY(round_Xsig)
26 pushl %ebp
27 movl %esp,%ebp
28 pushl %ebx /* Reserve some space */
29 pushl %ebx
30 pushl %esi
31
32 movl PARAM1,%esi
33
34 movl 8(%esi),%edx
35 movl 4(%esi),%ebx
36 movl (%esi),%eax
37
38 movl $0,-4(%ebp)
39
40 orl %edx,%edx /* ms bits */
41 js L_round /* Already normalized */
42 jnz L_shift_1 /* Shift left 1 - 31 bits */
43
44 movl %ebx,%edx
45 movl %eax,%ebx
46 xorl %eax,%eax
47 movl $-32,-4(%ebp)
48
49/* We need to shift left by 1 - 31 bits */
50L_shift_1:
51 bsrl %edx,%ecx /* get the required shift in %ecx */
52 subl $31,%ecx
53 negl %ecx
54 subl %ecx,-4(%ebp)
55 shld %cl,%ebx,%edx
56 shld %cl,%eax,%ebx
57 shl %cl,%eax
58
59L_round:
60 testl $0x80000000,%eax
61 jz L_exit
62
63 addl $1,%ebx
64 adcl $0,%edx
65 jnz L_exit
66
67 movl $0x80000000,%edx
68 incl -4(%ebp)
69
70L_exit:
71 movl %edx,8(%esi)
72 movl %ebx,4(%esi)
73 movl %eax,(%esi)
74
75 movl -4(%ebp),%eax
76
77 popl %esi
78 popl %ebx
79 leave
80 ret
81
82
83
84
85ENTRY(norm_Xsig)
86 pushl %ebp
87 movl %esp,%ebp
88 pushl %ebx /* Reserve some space */
89 pushl %ebx
90 pushl %esi
91
92 movl PARAM1,%esi
93
94 movl 8(%esi),%edx
95 movl 4(%esi),%ebx
96 movl (%esi),%eax
97
98 movl $0,-4(%ebp)
99
100 orl %edx,%edx /* ms bits */
101 js L_n_exit /* Already normalized */
102 jnz L_n_shift_1 /* Shift left 1 - 31 bits */
103
104 movl %ebx,%edx
105 movl %eax,%ebx
106 xorl %eax,%eax
107 movl $-32,-4(%ebp)
108
109 orl %edx,%edx /* ms bits */
110 js L_n_exit /* Normalized now */
111 jnz L_n_shift_1 /* Shift left 1 - 31 bits */
112
113 movl %ebx,%edx
114 movl %eax,%ebx
115 xorl %eax,%eax
116 addl $-32,-4(%ebp)
117 jmp L_n_exit /* Might not be normalized,
118 but shift no more. */
119
120/* We need to shift left by 1 - 31 bits */
121L_n_shift_1:
122 bsrl %edx,%ecx /* get the required shift in %ecx */
123 subl $31,%ecx
124 negl %ecx
125 subl %ecx,-4(%ebp)
126 shld %cl,%ebx,%edx
127 shld %cl,%eax,%ebx
128 shl %cl,%eax
129
130L_n_exit:
131 movl %edx,8(%esi)
132 movl %ebx,4(%esi)
133 movl %eax,(%esi)
134
135 movl -4(%ebp),%eax
136
137 popl %esi
138 popl %ebx
139 leave
140 ret
141
1/* SPDX-License-Identifier: GPL-2.0 */
2/*---------------------------------------------------------------------------+
3 | round_Xsig.S |
4 | |
5 | Copyright (C) 1992,1993,1994,1995 |
6 | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
7 | Australia. E-mail billm@jacobi.maths.monash.edu.au |
8 | |
9 | Normalize and round a 12 byte quantity. |
10 | Call from C as: |
11 | int round_Xsig(Xsig *n) |
12 | |
13 | Normalize a 12 byte quantity. |
14 | Call from C as: |
15 | int norm_Xsig(Xsig *n) |
16 | |
17 | Each function returns the size of the shift (nr of bits). |
18 | |
19 +---------------------------------------------------------------------------*/
20 .file "round_Xsig.S"
21
22#include "fpu_emu.h"
23
24
25.text
26SYM_FUNC_START(round_Xsig)
27 pushl %ebp
28 movl %esp,%ebp
29 pushl %ebx /* Reserve some space */
30 pushl %ebx
31 pushl %esi
32
33 movl PARAM1,%esi
34
35 movl 8(%esi),%edx
36 movl 4(%esi),%ebx
37 movl (%esi),%eax
38
39 movl $0,-4(%ebp)
40
41 orl %edx,%edx /* ms bits */
42 js L_round /* Already normalized */
43 jnz L_shift_1 /* Shift left 1 - 31 bits */
44
45 movl %ebx,%edx
46 movl %eax,%ebx
47 xorl %eax,%eax
48 movl $-32,-4(%ebp)
49
50/* We need to shift left by 1 - 31 bits */
51L_shift_1:
52 bsrl %edx,%ecx /* get the required shift in %ecx */
53 subl $31,%ecx
54 negl %ecx
55 subl %ecx,-4(%ebp)
56 shld %cl,%ebx,%edx
57 shld %cl,%eax,%ebx
58 shl %cl,%eax
59
60L_round:
61 testl $0x80000000,%eax
62 jz L_exit
63
64 addl $1,%ebx
65 adcl $0,%edx
66 jnz L_exit
67
68 movl $0x80000000,%edx
69 incl -4(%ebp)
70
71L_exit:
72 movl %edx,8(%esi)
73 movl %ebx,4(%esi)
74 movl %eax,(%esi)
75
76 movl -4(%ebp),%eax
77
78 popl %esi
79 popl %ebx
80 leave
81 RET
82SYM_FUNC_END(round_Xsig)
83
84
85
86SYM_FUNC_START(norm_Xsig)
87 pushl %ebp
88 movl %esp,%ebp
89 pushl %ebx /* Reserve some space */
90 pushl %ebx
91 pushl %esi
92
93 movl PARAM1,%esi
94
95 movl 8(%esi),%edx
96 movl 4(%esi),%ebx
97 movl (%esi),%eax
98
99 movl $0,-4(%ebp)
100
101 orl %edx,%edx /* ms bits */
102 js L_n_exit /* Already normalized */
103 jnz L_n_shift_1 /* Shift left 1 - 31 bits */
104
105 movl %ebx,%edx
106 movl %eax,%ebx
107 xorl %eax,%eax
108 movl $-32,-4(%ebp)
109
110 orl %edx,%edx /* ms bits */
111 js L_n_exit /* Normalized now */
112 jnz L_n_shift_1 /* Shift left 1 - 31 bits */
113
114 movl %ebx,%edx
115 movl %eax,%ebx
116 xorl %eax,%eax
117 addl $-32,-4(%ebp)
118 jmp L_n_exit /* Might not be normalized,
119 but shift no more. */
120
121/* We need to shift left by 1 - 31 bits */
122L_n_shift_1:
123 bsrl %edx,%ecx /* get the required shift in %ecx */
124 subl $31,%ecx
125 negl %ecx
126 subl %ecx,-4(%ebp)
127 shld %cl,%ebx,%edx
128 shld %cl,%eax,%ebx
129 shl %cl,%eax
130
131L_n_exit:
132 movl %edx,8(%esi)
133 movl %ebx,4(%esi)
134 movl %eax,(%esi)
135
136 movl -4(%ebp),%eax
137
138 popl %esi
139 popl %ebx
140 leave
141 RET
142SYM_FUNC_END(norm_Xsig)