Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3
4 fp_arith.c: floating-point math routines for the Linux-m68k
5 floating point emulator.
6
7 Copyright (c) 1998-1999 David Huggins-Daines.
8
9 Somewhat based on the AlphaLinux floating point emulator, by David
10 Mosberger-Tang.
11
12 */
13
14#include "fp_emu.h"
15#include "multi_arith.h"
16#include "fp_arith.h"
17
18const struct fp_ext fp_QNaN =
19{
20 .exp = 0x7fff,
21 .mant = { .m64 = ~0 }
22};
23
24const struct fp_ext fp_Inf =
25{
26 .exp = 0x7fff,
27};
28
29/* let's start with the easy ones */
30
31struct fp_ext *fp_fabs(struct fp_ext *dest, struct fp_ext *src)
32{
33 dprint(PINSTR, "fabs\n");
34
35 fp_monadic_check(dest, src);
36
37 dest->sign = 0;
38
39 return dest;
40}
41
42struct fp_ext *fp_fneg(struct fp_ext *dest, struct fp_ext *src)
43{
44 dprint(PINSTR, "fneg\n");
45
46 fp_monadic_check(dest, src);
47
48 dest->sign = !dest->sign;
49
50 return dest;
51}
52
53/* Now, the slightly harder ones */
54
55/* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
56 FDSUB, and FCMP instructions. */
57
58struct fp_ext *fp_fadd(struct fp_ext *dest, struct fp_ext *src)
59{
60 int diff;
61
62 dprint(PINSTR, "fadd\n");
63
64 fp_dyadic_check(dest, src);
65
66 if (IS_INF(dest)) {
67 /* infinity - infinity == NaN */
68 if (IS_INF(src) && (src->sign != dest->sign))
69 fp_set_nan(dest);
70 return dest;
71 }
72 if (IS_INF(src)) {
73 fp_copy_ext(dest, src);
74 return dest;
75 }
76
77 if (IS_ZERO(dest)) {
78 if (IS_ZERO(src)) {
79 if (src->sign != dest->sign) {
80 if (FPDATA->rnd == FPCR_ROUND_RM)
81 dest->sign = 1;
82 else
83 dest->sign = 0;
84 }
85 } else
86 fp_copy_ext(dest, src);
87 return dest;
88 }
89
90 dest->lowmant = src->lowmant = 0;
91
92 if ((diff = dest->exp - src->exp) > 0)
93 fp_denormalize(src, diff);
94 else if ((diff = -diff) > 0)
95 fp_denormalize(dest, diff);
96
97 if (dest->sign == src->sign) {
98 if (fp_addmant(dest, src))
99 if (!fp_addcarry(dest))
100 return dest;
101 } else {
102 if (dest->mant.m64 < src->mant.m64) {
103 fp_submant(dest, src, dest);
104 dest->sign = !dest->sign;
105 } else
106 fp_submant(dest, dest, src);
107 }
108
109 return dest;
110}
111
112/* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
113 instructions.
114
115 Remember that the arguments are in assembler-syntax order! */
116
117struct fp_ext *fp_fsub(struct fp_ext *dest, struct fp_ext *src)
118{
119 dprint(PINSTR, "fsub ");
120
121 src->sign = !src->sign;
122 return fp_fadd(dest, src);
123}
124
125
126struct fp_ext *fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
127{
128 dprint(PINSTR, "fcmp ");
129
130 FPDATA->temp[1] = *dest;
131 src->sign = !src->sign;
132 return fp_fadd(&FPDATA->temp[1], src);
133}
134
135struct fp_ext *fp_ftst(struct fp_ext *dest, struct fp_ext *src)
136{
137 dprint(PINSTR, "ftst\n");
138
139 (void)dest;
140
141 return src;
142}
143
144struct fp_ext *fp_fmul(struct fp_ext *dest, struct fp_ext *src)
145{
146 union fp_mant128 temp;
147 int exp;
148
149 dprint(PINSTR, "fmul\n");
150
151 fp_dyadic_check(dest, src);
152
153 /* calculate the correct sign now, as it's necessary for infinities */
154 dest->sign = src->sign ^ dest->sign;
155
156 /* Handle infinities */
157 if (IS_INF(dest)) {
158 if (IS_ZERO(src))
159 fp_set_nan(dest);
160 return dest;
161 }
162 if (IS_INF(src)) {
163 if (IS_ZERO(dest))
164 fp_set_nan(dest);
165 else
166 fp_copy_ext(dest, src);
167 return dest;
168 }
169
170 /* Of course, as we all know, zero * anything = zero. You may
171 not have known that it might be a positive or negative
172 zero... */
173 if (IS_ZERO(dest) || IS_ZERO(src)) {
174 dest->exp = 0;
175 dest->mant.m64 = 0;
176 dest->lowmant = 0;
177
178 return dest;
179 }
180
181 exp = dest->exp + src->exp - 0x3ffe;
182
183 /* shift up the mantissa for denormalized numbers,
184 so that the highest bit is set, this makes the
185 shift of the result below easier */
186 if ((long)dest->mant.m32[0] >= 0)
187 exp -= fp_overnormalize(dest);
188 if ((long)src->mant.m32[0] >= 0)
189 exp -= fp_overnormalize(src);
190
191 /* now, do a 64-bit multiply with expansion */
192 fp_multiplymant(&temp, dest, src);
193
194 /* normalize it back to 64 bits and stuff it back into the
195 destination struct */
196 if ((long)temp.m32[0] > 0) {
197 exp--;
198 fp_putmant128(dest, &temp, 1);
199 } else
200 fp_putmant128(dest, &temp, 0);
201
202 if (exp >= 0x7fff) {
203 fp_set_ovrflw(dest);
204 return dest;
205 }
206 dest->exp = exp;
207 if (exp < 0) {
208 fp_set_sr(FPSR_EXC_UNFL);
209 fp_denormalize(dest, -exp);
210 }
211
212 return dest;
213}
214
215/* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
216 FSGLDIV instructions.
217
218 Note that the order of the operands is counter-intuitive: instead
219 of src / dest, the result is actually dest / src. */
220
221struct fp_ext *fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
222{
223 union fp_mant128 temp;
224 int exp;
225
226 dprint(PINSTR, "fdiv\n");
227
228 fp_dyadic_check(dest, src);
229
230 /* calculate the correct sign now, as it's necessary for infinities */
231 dest->sign = src->sign ^ dest->sign;
232
233 /* Handle infinities */
234 if (IS_INF(dest)) {
235 /* infinity / infinity = NaN (quiet, as always) */
236 if (IS_INF(src))
237 fp_set_nan(dest);
238 /* infinity / anything else = infinity (with appropriate sign) */
239 return dest;
240 }
241 if (IS_INF(src)) {
242 /* anything / infinity = zero (with appropriate sign) */
243 dest->exp = 0;
244 dest->mant.m64 = 0;
245 dest->lowmant = 0;
246
247 return dest;
248 }
249
250 /* zeroes */
251 if (IS_ZERO(dest)) {
252 /* zero / zero = NaN */
253 if (IS_ZERO(src))
254 fp_set_nan(dest);
255 /* zero / anything else = zero */
256 return dest;
257 }
258 if (IS_ZERO(src)) {
259 /* anything / zero = infinity (with appropriate sign) */
260 fp_set_sr(FPSR_EXC_DZ);
261 dest->exp = 0x7fff;
262 dest->mant.m64 = 0;
263
264 return dest;
265 }
266
267 exp = dest->exp - src->exp + 0x3fff;
268
269 /* shift up the mantissa for denormalized numbers,
270 so that the highest bit is set, this makes lots
271 of things below easier */
272 if ((long)dest->mant.m32[0] >= 0)
273 exp -= fp_overnormalize(dest);
274 if ((long)src->mant.m32[0] >= 0)
275 exp -= fp_overnormalize(src);
276
277 /* now, do the 64-bit divide */
278 fp_dividemant(&temp, dest, src);
279
280 /* normalize it back to 64 bits and stuff it back into the
281 destination struct */
282 if (!temp.m32[0]) {
283 exp--;
284 fp_putmant128(dest, &temp, 32);
285 } else
286 fp_putmant128(dest, &temp, 31);
287
288 if (exp >= 0x7fff) {
289 fp_set_ovrflw(dest);
290 return dest;
291 }
292 dest->exp = exp;
293 if (exp < 0) {
294 fp_set_sr(FPSR_EXC_UNFL);
295 fp_denormalize(dest, -exp);
296 }
297
298 return dest;
299}
300
301struct fp_ext *fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
302{
303 int exp;
304
305 dprint(PINSTR, "fsglmul\n");
306
307 fp_dyadic_check(dest, src);
308
309 /* calculate the correct sign now, as it's necessary for infinities */
310 dest->sign = src->sign ^ dest->sign;
311
312 /* Handle infinities */
313 if (IS_INF(dest)) {
314 if (IS_ZERO(src))
315 fp_set_nan(dest);
316 return dest;
317 }
318 if (IS_INF(src)) {
319 if (IS_ZERO(dest))
320 fp_set_nan(dest);
321 else
322 fp_copy_ext(dest, src);
323 return dest;
324 }
325
326 /* Of course, as we all know, zero * anything = zero. You may
327 not have known that it might be a positive or negative
328 zero... */
329 if (IS_ZERO(dest) || IS_ZERO(src)) {
330 dest->exp = 0;
331 dest->mant.m64 = 0;
332 dest->lowmant = 0;
333
334 return dest;
335 }
336
337 exp = dest->exp + src->exp - 0x3ffe;
338
339 /* do a 32-bit multiply */
340 fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
341 dest->mant.m32[0] & 0xffffff00,
342 src->mant.m32[0] & 0xffffff00);
343
344 if (exp >= 0x7fff) {
345 fp_set_ovrflw(dest);
346 return dest;
347 }
348 dest->exp = exp;
349 if (exp < 0) {
350 fp_set_sr(FPSR_EXC_UNFL);
351 fp_denormalize(dest, -exp);
352 }
353
354 return dest;
355}
356
357struct fp_ext *fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
358{
359 int exp;
360 unsigned long quot, rem;
361
362 dprint(PINSTR, "fsgldiv\n");
363
364 fp_dyadic_check(dest, src);
365
366 /* calculate the correct sign now, as it's necessary for infinities */
367 dest->sign = src->sign ^ dest->sign;
368
369 /* Handle infinities */
370 if (IS_INF(dest)) {
371 /* infinity / infinity = NaN (quiet, as always) */
372 if (IS_INF(src))
373 fp_set_nan(dest);
374 /* infinity / anything else = infinity (with approprate sign) */
375 return dest;
376 }
377 if (IS_INF(src)) {
378 /* anything / infinity = zero (with appropriate sign) */
379 dest->exp = 0;
380 dest->mant.m64 = 0;
381 dest->lowmant = 0;
382
383 return dest;
384 }
385
386 /* zeroes */
387 if (IS_ZERO(dest)) {
388 /* zero / zero = NaN */
389 if (IS_ZERO(src))
390 fp_set_nan(dest);
391 /* zero / anything else = zero */
392 return dest;
393 }
394 if (IS_ZERO(src)) {
395 /* anything / zero = infinity (with appropriate sign) */
396 fp_set_sr(FPSR_EXC_DZ);
397 dest->exp = 0x7fff;
398 dest->mant.m64 = 0;
399
400 return dest;
401 }
402
403 exp = dest->exp - src->exp + 0x3fff;
404
405 dest->mant.m32[0] &= 0xffffff00;
406 src->mant.m32[0] &= 0xffffff00;
407
408 /* do the 32-bit divide */
409 if (dest->mant.m32[0] >= src->mant.m32[0]) {
410 fp_sub64(dest->mant, src->mant);
411 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
412 dest->mant.m32[0] = 0x80000000 | (quot >> 1);
413 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */
414 } else {
415 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
416 dest->mant.m32[0] = quot;
417 dest->mant.m32[1] = rem; /* only for rounding */
418 exp--;
419 }
420
421 if (exp >= 0x7fff) {
422 fp_set_ovrflw(dest);
423 return dest;
424 }
425 dest->exp = exp;
426 if (exp < 0) {
427 fp_set_sr(FPSR_EXC_UNFL);
428 fp_denormalize(dest, -exp);
429 }
430
431 return dest;
432}
433
434/* fp_roundint: Internal rounding function for use by several of these
435 emulated instructions.
436
437 This one rounds off the fractional part using the rounding mode
438 specified. */
439
440static void fp_roundint(struct fp_ext *dest, int mode)
441{
442 union fp_mant64 oldmant;
443 unsigned long mask;
444
445 if (!fp_normalize_ext(dest))
446 return;
447
448 /* infinities and zeroes */
449 if (IS_INF(dest) || IS_ZERO(dest))
450 return;
451
452 /* first truncate the lower bits */
453 oldmant = dest->mant;
454 switch (dest->exp) {
455 case 0 ... 0x3ffe:
456 dest->mant.m64 = 0;
457 break;
458 case 0x3fff ... 0x401e:
459 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
460 dest->mant.m32[1] = 0;
461 if (oldmant.m64 == dest->mant.m64)
462 return;
463 break;
464 case 0x401f ... 0x403e:
465 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
466 if (oldmant.m32[1] == dest->mant.m32[1])
467 return;
468 break;
469 default:
470 return;
471 }
472 fp_set_sr(FPSR_EXC_INEX2);
473
474 /* We might want to normalize upwards here... however, since
475 we know that this is only called on the output of fp_fdiv,
476 or with the input to fp_fint or fp_fintrz, and the inputs
477 to all these functions are either normal or denormalized
478 (no subnormals allowed!), there's really no need.
479
480 In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
481 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
482 smallest possible normal dividend and the largest possible normal
483 divisor will still produce a normal quotient, therefore, (normal
484 << 64) / normal is normal in all cases) */
485
486 switch (mode) {
487 case FPCR_ROUND_RN:
488 switch (dest->exp) {
489 case 0 ... 0x3ffd:
490 return;
491 case 0x3ffe:
492 /* As noted above, the input is always normal, so the
493 guard bit (bit 63) is always set. therefore, the
494 only case in which we will NOT round to 1.0 is when
495 the input is exactly 0.5. */
496 if (oldmant.m64 == (1ULL << 63))
497 return;
498 break;
499 case 0x3fff ... 0x401d:
500 mask = 1 << (0x401d - dest->exp);
501 if (!(oldmant.m32[0] & mask))
502 return;
503 if (oldmant.m32[0] & (mask << 1))
504 break;
505 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
506 !oldmant.m32[1])
507 return;
508 break;
509 case 0x401e:
510 if (oldmant.m32[1] & 0x80000000)
511 return;
512 if (oldmant.m32[0] & 1)
513 break;
514 if (!(oldmant.m32[1] << 1))
515 return;
516 break;
517 case 0x401f ... 0x403d:
518 mask = 1 << (0x403d - dest->exp);
519 if (!(oldmant.m32[1] & mask))
520 return;
521 if (oldmant.m32[1] & (mask << 1))
522 break;
523 if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
524 return;
525 break;
526 default:
527 return;
528 }
529 break;
530 case FPCR_ROUND_RZ:
531 return;
532 default:
533 if (dest->sign ^ (mode - FPCR_ROUND_RM))
534 break;
535 return;
536 }
537
538 switch (dest->exp) {
539 case 0 ... 0x3ffe:
540 dest->exp = 0x3fff;
541 dest->mant.m64 = 1ULL << 63;
542 break;
543 case 0x3fff ... 0x401e:
544 mask = 1 << (0x401e - dest->exp);
545 if (dest->mant.m32[0] += mask)
546 break;
547 dest->mant.m32[0] = 0x80000000;
548 dest->exp++;
549 break;
550 case 0x401f ... 0x403e:
551 mask = 1 << (0x403e - dest->exp);
552 if (dest->mant.m32[1] += mask)
553 break;
554 if (dest->mant.m32[0] += 1)
555 break;
556 dest->mant.m32[0] = 0x80000000;
557 dest->exp++;
558 break;
559 }
560}
561
562/* modrem_kernel: Implementation of the FREM and FMOD instructions
563 (which are exactly the same, except for the rounding used on the
564 intermediate value) */
565
566static struct fp_ext *modrem_kernel(struct fp_ext *dest, struct fp_ext *src,
567 int mode)
568{
569 struct fp_ext tmp;
570
571 fp_dyadic_check(dest, src);
572
573 /* Infinities and zeros */
574 if (IS_INF(dest) || IS_ZERO(src)) {
575 fp_set_nan(dest);
576 return dest;
577 }
578 if (IS_ZERO(dest) || IS_INF(src))
579 return dest;
580
581 /* FIXME: there is almost certainly a smarter way to do this */
582 fp_copy_ext(&tmp, dest);
583 fp_fdiv(&tmp, src); /* NOTE: src might be modified */
584 fp_roundint(&tmp, mode);
585 fp_fmul(&tmp, src);
586 fp_fsub(dest, &tmp);
587
588 /* set the quotient byte */
589 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
590 return dest;
591}
592
593/* fp_fmod: Implements the kernel of the FMOD instruction.
594
595 Again, the argument order is backwards. The result, as defined in
596 the Motorola manuals, is:
597
598 fmod(src,dest) = (dest - (src * floor(dest / src))) */
599
600struct fp_ext *fp_fmod(struct fp_ext *dest, struct fp_ext *src)
601{
602 dprint(PINSTR, "fmod\n");
603 return modrem_kernel(dest, src, FPCR_ROUND_RZ);
604}
605
606/* fp_frem: Implements the kernel of the FREM instruction.
607
608 frem(src,dest) = (dest - (src * round(dest / src)))
609 */
610
611struct fp_ext *fp_frem(struct fp_ext *dest, struct fp_ext *src)
612{
613 dprint(PINSTR, "frem\n");
614 return modrem_kernel(dest, src, FPCR_ROUND_RN);
615}
616
617struct fp_ext *fp_fint(struct fp_ext *dest, struct fp_ext *src)
618{
619 dprint(PINSTR, "fint\n");
620
621 fp_copy_ext(dest, src);
622
623 fp_roundint(dest, FPDATA->rnd);
624
625 return dest;
626}
627
628struct fp_ext *fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
629{
630 dprint(PINSTR, "fintrz\n");
631
632 fp_copy_ext(dest, src);
633
634 fp_roundint(dest, FPCR_ROUND_RZ);
635
636 return dest;
637}
638
639struct fp_ext *fp_fscale(struct fp_ext *dest, struct fp_ext *src)
640{
641 int scale, oldround;
642
643 dprint(PINSTR, "fscale\n");
644
645 fp_dyadic_check(dest, src);
646
647 /* Infinities */
648 if (IS_INF(src)) {
649 fp_set_nan(dest);
650 return dest;
651 }
652 if (IS_INF(dest))
653 return dest;
654
655 /* zeroes */
656 if (IS_ZERO(src) || IS_ZERO(dest))
657 return dest;
658
659 /* Source exponent out of range */
660 if (src->exp >= 0x400c) {
661 fp_set_ovrflw(dest);
662 return dest;
663 }
664
665 /* src must be rounded with round to zero. */
666 oldround = FPDATA->rnd;
667 FPDATA->rnd = FPCR_ROUND_RZ;
668 scale = fp_conv_ext2long(src);
669 FPDATA->rnd = oldround;
670
671 /* new exponent */
672 scale += dest->exp;
673
674 if (scale >= 0x7fff) {
675 fp_set_ovrflw(dest);
676 } else if (scale <= 0) {
677 fp_set_sr(FPSR_EXC_UNFL);
678 fp_denormalize(dest, -scale);
679 } else
680 dest->exp = scale;
681
682 return dest;
683}
684
1/*
2
3 fp_arith.c: floating-point math routines for the Linux-m68k
4 floating point emulator.
5
6 Copyright (c) 1998-1999 David Huggins-Daines.
7
8 Somewhat based on the AlphaLinux floating point emulator, by David
9 Mosberger-Tang.
10
11 You may copy, modify, and redistribute this file under the terms of
12 the GNU General Public License, version 2, or any later version, at
13 your convenience.
14 */
15
16#include "fp_emu.h"
17#include "multi_arith.h"
18#include "fp_arith.h"
19
20const struct fp_ext fp_QNaN =
21{
22 .exp = 0x7fff,
23 .mant = { .m64 = ~0 }
24};
25
26const struct fp_ext fp_Inf =
27{
28 .exp = 0x7fff,
29};
30
31/* let's start with the easy ones */
32
33struct fp_ext *
34fp_fabs(struct fp_ext *dest, struct fp_ext *src)
35{
36 dprint(PINSTR, "fabs\n");
37
38 fp_monadic_check(dest, src);
39
40 dest->sign = 0;
41
42 return dest;
43}
44
45struct fp_ext *
46fp_fneg(struct fp_ext *dest, struct fp_ext *src)
47{
48 dprint(PINSTR, "fneg\n");
49
50 fp_monadic_check(dest, src);
51
52 dest->sign = !dest->sign;
53
54 return dest;
55}
56
57/* Now, the slightly harder ones */
58
59/* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
60 FDSUB, and FCMP instructions. */
61
62struct fp_ext *
63fp_fadd(struct fp_ext *dest, struct fp_ext *src)
64{
65 int diff;
66
67 dprint(PINSTR, "fadd\n");
68
69 fp_dyadic_check(dest, src);
70
71 if (IS_INF(dest)) {
72 /* infinity - infinity == NaN */
73 if (IS_INF(src) && (src->sign != dest->sign))
74 fp_set_nan(dest);
75 return dest;
76 }
77 if (IS_INF(src)) {
78 fp_copy_ext(dest, src);
79 return dest;
80 }
81
82 if (IS_ZERO(dest)) {
83 if (IS_ZERO(src)) {
84 if (src->sign != dest->sign) {
85 if (FPDATA->rnd == FPCR_ROUND_RM)
86 dest->sign = 1;
87 else
88 dest->sign = 0;
89 }
90 } else
91 fp_copy_ext(dest, src);
92 return dest;
93 }
94
95 dest->lowmant = src->lowmant = 0;
96
97 if ((diff = dest->exp - src->exp) > 0)
98 fp_denormalize(src, diff);
99 else if ((diff = -diff) > 0)
100 fp_denormalize(dest, diff);
101
102 if (dest->sign == src->sign) {
103 if (fp_addmant(dest, src))
104 if (!fp_addcarry(dest))
105 return dest;
106 } else {
107 if (dest->mant.m64 < src->mant.m64) {
108 fp_submant(dest, src, dest);
109 dest->sign = !dest->sign;
110 } else
111 fp_submant(dest, dest, src);
112 }
113
114 return dest;
115}
116
117/* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
118 instructions.
119
120 Remember that the arguments are in assembler-syntax order! */
121
122struct fp_ext *
123fp_fsub(struct fp_ext *dest, struct fp_ext *src)
124{
125 dprint(PINSTR, "fsub ");
126
127 src->sign = !src->sign;
128 return fp_fadd(dest, src);
129}
130
131
132struct fp_ext *
133fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
134{
135 dprint(PINSTR, "fcmp ");
136
137 FPDATA->temp[1] = *dest;
138 src->sign = !src->sign;
139 return fp_fadd(&FPDATA->temp[1], src);
140}
141
142struct fp_ext *
143fp_ftst(struct fp_ext *dest, struct fp_ext *src)
144{
145 dprint(PINSTR, "ftst\n");
146
147 (void)dest;
148
149 return src;
150}
151
152struct fp_ext *
153fp_fmul(struct fp_ext *dest, struct fp_ext *src)
154{
155 union fp_mant128 temp;
156 int exp;
157
158 dprint(PINSTR, "fmul\n");
159
160 fp_dyadic_check(dest, src);
161
162 /* calculate the correct sign now, as it's necessary for infinities */
163 dest->sign = src->sign ^ dest->sign;
164
165 /* Handle infinities */
166 if (IS_INF(dest)) {
167 if (IS_ZERO(src))
168 fp_set_nan(dest);
169 return dest;
170 }
171 if (IS_INF(src)) {
172 if (IS_ZERO(dest))
173 fp_set_nan(dest);
174 else
175 fp_copy_ext(dest, src);
176 return dest;
177 }
178
179 /* Of course, as we all know, zero * anything = zero. You may
180 not have known that it might be a positive or negative
181 zero... */
182 if (IS_ZERO(dest) || IS_ZERO(src)) {
183 dest->exp = 0;
184 dest->mant.m64 = 0;
185 dest->lowmant = 0;
186
187 return dest;
188 }
189
190 exp = dest->exp + src->exp - 0x3ffe;
191
192 /* shift up the mantissa for denormalized numbers,
193 so that the highest bit is set, this makes the
194 shift of the result below easier */
195 if ((long)dest->mant.m32[0] >= 0)
196 exp -= fp_overnormalize(dest);
197 if ((long)src->mant.m32[0] >= 0)
198 exp -= fp_overnormalize(src);
199
200 /* now, do a 64-bit multiply with expansion */
201 fp_multiplymant(&temp, dest, src);
202
203 /* normalize it back to 64 bits and stuff it back into the
204 destination struct */
205 if ((long)temp.m32[0] > 0) {
206 exp--;
207 fp_putmant128(dest, &temp, 1);
208 } else
209 fp_putmant128(dest, &temp, 0);
210
211 if (exp >= 0x7fff) {
212 fp_set_ovrflw(dest);
213 return dest;
214 }
215 dest->exp = exp;
216 if (exp < 0) {
217 fp_set_sr(FPSR_EXC_UNFL);
218 fp_denormalize(dest, -exp);
219 }
220
221 return dest;
222}
223
224/* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
225 FSGLDIV instructions.
226
227 Note that the order of the operands is counter-intuitive: instead
228 of src / dest, the result is actually dest / src. */
229
230struct fp_ext *
231fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
232{
233 union fp_mant128 temp;
234 int exp;
235
236 dprint(PINSTR, "fdiv\n");
237
238 fp_dyadic_check(dest, src);
239
240 /* calculate the correct sign now, as it's necessary for infinities */
241 dest->sign = src->sign ^ dest->sign;
242
243 /* Handle infinities */
244 if (IS_INF(dest)) {
245 /* infinity / infinity = NaN (quiet, as always) */
246 if (IS_INF(src))
247 fp_set_nan(dest);
248 /* infinity / anything else = infinity (with approprate sign) */
249 return dest;
250 }
251 if (IS_INF(src)) {
252 /* anything / infinity = zero (with appropriate sign) */
253 dest->exp = 0;
254 dest->mant.m64 = 0;
255 dest->lowmant = 0;
256
257 return dest;
258 }
259
260 /* zeroes */
261 if (IS_ZERO(dest)) {
262 /* zero / zero = NaN */
263 if (IS_ZERO(src))
264 fp_set_nan(dest);
265 /* zero / anything else = zero */
266 return dest;
267 }
268 if (IS_ZERO(src)) {
269 /* anything / zero = infinity (with appropriate sign) */
270 fp_set_sr(FPSR_EXC_DZ);
271 dest->exp = 0x7fff;
272 dest->mant.m64 = 0;
273
274 return dest;
275 }
276
277 exp = dest->exp - src->exp + 0x3fff;
278
279 /* shift up the mantissa for denormalized numbers,
280 so that the highest bit is set, this makes lots
281 of things below easier */
282 if ((long)dest->mant.m32[0] >= 0)
283 exp -= fp_overnormalize(dest);
284 if ((long)src->mant.m32[0] >= 0)
285 exp -= fp_overnormalize(src);
286
287 /* now, do the 64-bit divide */
288 fp_dividemant(&temp, dest, src);
289
290 /* normalize it back to 64 bits and stuff it back into the
291 destination struct */
292 if (!temp.m32[0]) {
293 exp--;
294 fp_putmant128(dest, &temp, 32);
295 } else
296 fp_putmant128(dest, &temp, 31);
297
298 if (exp >= 0x7fff) {
299 fp_set_ovrflw(dest);
300 return dest;
301 }
302 dest->exp = exp;
303 if (exp < 0) {
304 fp_set_sr(FPSR_EXC_UNFL);
305 fp_denormalize(dest, -exp);
306 }
307
308 return dest;
309}
310
311struct fp_ext *
312fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
313{
314 int exp;
315
316 dprint(PINSTR, "fsglmul\n");
317
318 fp_dyadic_check(dest, src);
319
320 /* calculate the correct sign now, as it's necessary for infinities */
321 dest->sign = src->sign ^ dest->sign;
322
323 /* Handle infinities */
324 if (IS_INF(dest)) {
325 if (IS_ZERO(src))
326 fp_set_nan(dest);
327 return dest;
328 }
329 if (IS_INF(src)) {
330 if (IS_ZERO(dest))
331 fp_set_nan(dest);
332 else
333 fp_copy_ext(dest, src);
334 return dest;
335 }
336
337 /* Of course, as we all know, zero * anything = zero. You may
338 not have known that it might be a positive or negative
339 zero... */
340 if (IS_ZERO(dest) || IS_ZERO(src)) {
341 dest->exp = 0;
342 dest->mant.m64 = 0;
343 dest->lowmant = 0;
344
345 return dest;
346 }
347
348 exp = dest->exp + src->exp - 0x3ffe;
349
350 /* do a 32-bit multiply */
351 fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
352 dest->mant.m32[0] & 0xffffff00,
353 src->mant.m32[0] & 0xffffff00);
354
355 if (exp >= 0x7fff) {
356 fp_set_ovrflw(dest);
357 return dest;
358 }
359 dest->exp = exp;
360 if (exp < 0) {
361 fp_set_sr(FPSR_EXC_UNFL);
362 fp_denormalize(dest, -exp);
363 }
364
365 return dest;
366}
367
368struct fp_ext *
369fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
370{
371 int exp;
372 unsigned long quot, rem;
373
374 dprint(PINSTR, "fsgldiv\n");
375
376 fp_dyadic_check(dest, src);
377
378 /* calculate the correct sign now, as it's necessary for infinities */
379 dest->sign = src->sign ^ dest->sign;
380
381 /* Handle infinities */
382 if (IS_INF(dest)) {
383 /* infinity / infinity = NaN (quiet, as always) */
384 if (IS_INF(src))
385 fp_set_nan(dest);
386 /* infinity / anything else = infinity (with approprate sign) */
387 return dest;
388 }
389 if (IS_INF(src)) {
390 /* anything / infinity = zero (with appropriate sign) */
391 dest->exp = 0;
392 dest->mant.m64 = 0;
393 dest->lowmant = 0;
394
395 return dest;
396 }
397
398 /* zeroes */
399 if (IS_ZERO(dest)) {
400 /* zero / zero = NaN */
401 if (IS_ZERO(src))
402 fp_set_nan(dest);
403 /* zero / anything else = zero */
404 return dest;
405 }
406 if (IS_ZERO(src)) {
407 /* anything / zero = infinity (with appropriate sign) */
408 fp_set_sr(FPSR_EXC_DZ);
409 dest->exp = 0x7fff;
410 dest->mant.m64 = 0;
411
412 return dest;
413 }
414
415 exp = dest->exp - src->exp + 0x3fff;
416
417 dest->mant.m32[0] &= 0xffffff00;
418 src->mant.m32[0] &= 0xffffff00;
419
420 /* do the 32-bit divide */
421 if (dest->mant.m32[0] >= src->mant.m32[0]) {
422 fp_sub64(dest->mant, src->mant);
423 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
424 dest->mant.m32[0] = 0x80000000 | (quot >> 1);
425 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */
426 } else {
427 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
428 dest->mant.m32[0] = quot;
429 dest->mant.m32[1] = rem; /* only for rounding */
430 exp--;
431 }
432
433 if (exp >= 0x7fff) {
434 fp_set_ovrflw(dest);
435 return dest;
436 }
437 dest->exp = exp;
438 if (exp < 0) {
439 fp_set_sr(FPSR_EXC_UNFL);
440 fp_denormalize(dest, -exp);
441 }
442
443 return dest;
444}
445
446/* fp_roundint: Internal rounding function for use by several of these
447 emulated instructions.
448
449 This one rounds off the fractional part using the rounding mode
450 specified. */
451
452static void fp_roundint(struct fp_ext *dest, int mode)
453{
454 union fp_mant64 oldmant;
455 unsigned long mask;
456
457 if (!fp_normalize_ext(dest))
458 return;
459
460 /* infinities and zeroes */
461 if (IS_INF(dest) || IS_ZERO(dest))
462 return;
463
464 /* first truncate the lower bits */
465 oldmant = dest->mant;
466 switch (dest->exp) {
467 case 0 ... 0x3ffe:
468 dest->mant.m64 = 0;
469 break;
470 case 0x3fff ... 0x401e:
471 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
472 dest->mant.m32[1] = 0;
473 if (oldmant.m64 == dest->mant.m64)
474 return;
475 break;
476 case 0x401f ... 0x403e:
477 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
478 if (oldmant.m32[1] == dest->mant.m32[1])
479 return;
480 break;
481 default:
482 return;
483 }
484 fp_set_sr(FPSR_EXC_INEX2);
485
486 /* We might want to normalize upwards here... however, since
487 we know that this is only called on the output of fp_fdiv,
488 or with the input to fp_fint or fp_fintrz, and the inputs
489 to all these functions are either normal or denormalized
490 (no subnormals allowed!), there's really no need.
491
492 In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
493 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
494 smallest possible normal dividend and the largest possible normal
495 divisor will still produce a normal quotient, therefore, (normal
496 << 64) / normal is normal in all cases) */
497
498 switch (mode) {
499 case FPCR_ROUND_RN:
500 switch (dest->exp) {
501 case 0 ... 0x3ffd:
502 return;
503 case 0x3ffe:
504 /* As noted above, the input is always normal, so the
505 guard bit (bit 63) is always set. therefore, the
506 only case in which we will NOT round to 1.0 is when
507 the input is exactly 0.5. */
508 if (oldmant.m64 == (1ULL << 63))
509 return;
510 break;
511 case 0x3fff ... 0x401d:
512 mask = 1 << (0x401d - dest->exp);
513 if (!(oldmant.m32[0] & mask))
514 return;
515 if (oldmant.m32[0] & (mask << 1))
516 break;
517 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
518 !oldmant.m32[1])
519 return;
520 break;
521 case 0x401e:
522 if (!(oldmant.m32[1] >= 0))
523 return;
524 if (oldmant.m32[0] & 1)
525 break;
526 if (!(oldmant.m32[1] << 1))
527 return;
528 break;
529 case 0x401f ... 0x403d:
530 mask = 1 << (0x403d - dest->exp);
531 if (!(oldmant.m32[1] & mask))
532 return;
533 if (oldmant.m32[1] & (mask << 1))
534 break;
535 if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
536 return;
537 break;
538 default:
539 return;
540 }
541 break;
542 case FPCR_ROUND_RZ:
543 return;
544 default:
545 if (dest->sign ^ (mode - FPCR_ROUND_RM))
546 break;
547 return;
548 }
549
550 switch (dest->exp) {
551 case 0 ... 0x3ffe:
552 dest->exp = 0x3fff;
553 dest->mant.m64 = 1ULL << 63;
554 break;
555 case 0x3fff ... 0x401e:
556 mask = 1 << (0x401e - dest->exp);
557 if (dest->mant.m32[0] += mask)
558 break;
559 dest->mant.m32[0] = 0x80000000;
560 dest->exp++;
561 break;
562 case 0x401f ... 0x403e:
563 mask = 1 << (0x403e - dest->exp);
564 if (dest->mant.m32[1] += mask)
565 break;
566 if (dest->mant.m32[0] += 1)
567 break;
568 dest->mant.m32[0] = 0x80000000;
569 dest->exp++;
570 break;
571 }
572}
573
574/* modrem_kernel: Implementation of the FREM and FMOD instructions
575 (which are exactly the same, except for the rounding used on the
576 intermediate value) */
577
578static struct fp_ext *
579modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode)
580{
581 struct fp_ext tmp;
582
583 fp_dyadic_check(dest, src);
584
585 /* Infinities and zeros */
586 if (IS_INF(dest) || IS_ZERO(src)) {
587 fp_set_nan(dest);
588 return dest;
589 }
590 if (IS_ZERO(dest) || IS_INF(src))
591 return dest;
592
593 /* FIXME: there is almost certainly a smarter way to do this */
594 fp_copy_ext(&tmp, dest);
595 fp_fdiv(&tmp, src); /* NOTE: src might be modified */
596 fp_roundint(&tmp, mode);
597 fp_fmul(&tmp, src);
598 fp_fsub(dest, &tmp);
599
600 /* set the quotient byte */
601 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
602 return dest;
603}
604
605/* fp_fmod: Implements the kernel of the FMOD instruction.
606
607 Again, the argument order is backwards. The result, as defined in
608 the Motorola manuals, is:
609
610 fmod(src,dest) = (dest - (src * floor(dest / src))) */
611
612struct fp_ext *
613fp_fmod(struct fp_ext *dest, struct fp_ext *src)
614{
615 dprint(PINSTR, "fmod\n");
616 return modrem_kernel(dest, src, FPCR_ROUND_RZ);
617}
618
619/* fp_frem: Implements the kernel of the FREM instruction.
620
621 frem(src,dest) = (dest - (src * round(dest / src)))
622 */
623
624struct fp_ext *
625fp_frem(struct fp_ext *dest, struct fp_ext *src)
626{
627 dprint(PINSTR, "frem\n");
628 return modrem_kernel(dest, src, FPCR_ROUND_RN);
629}
630
631struct fp_ext *
632fp_fint(struct fp_ext *dest, struct fp_ext *src)
633{
634 dprint(PINSTR, "fint\n");
635
636 fp_copy_ext(dest, src);
637
638 fp_roundint(dest, FPDATA->rnd);
639
640 return dest;
641}
642
643struct fp_ext *
644fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
645{
646 dprint(PINSTR, "fintrz\n");
647
648 fp_copy_ext(dest, src);
649
650 fp_roundint(dest, FPCR_ROUND_RZ);
651
652 return dest;
653}
654
655struct fp_ext *
656fp_fscale(struct fp_ext *dest, struct fp_ext *src)
657{
658 int scale, oldround;
659
660 dprint(PINSTR, "fscale\n");
661
662 fp_dyadic_check(dest, src);
663
664 /* Infinities */
665 if (IS_INF(src)) {
666 fp_set_nan(dest);
667 return dest;
668 }
669 if (IS_INF(dest))
670 return dest;
671
672 /* zeroes */
673 if (IS_ZERO(src) || IS_ZERO(dest))
674 return dest;
675
676 /* Source exponent out of range */
677 if (src->exp >= 0x400c) {
678 fp_set_ovrflw(dest);
679 return dest;
680 }
681
682 /* src must be rounded with round to zero. */
683 oldround = FPDATA->rnd;
684 FPDATA->rnd = FPCR_ROUND_RZ;
685 scale = fp_conv_ext2long(src);
686 FPDATA->rnd = oldround;
687
688 /* new exponent */
689 scale += dest->exp;
690
691 if (scale >= 0x7fff) {
692 fp_set_ovrflw(dest);
693 } else if (scale <= 0) {
694 fp_set_sr(FPSR_EXC_UNFL);
695 fp_denormalize(dest, -scale);
696 } else
697 dest->exp = scale;
698
699 return dest;
700}
701