Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3
4 fp_arith.c: floating-point math routines for the Linux-m68k
5 floating point emulator.
6
7 Copyright (c) 1998-1999 David Huggins-Daines.
8
9 Somewhat based on the AlphaLinux floating point emulator, by David
10 Mosberger-Tang.
11
12 */
13
14#include "fp_emu.h"
15#include "multi_arith.h"
16#include "fp_arith.h"
17
18const struct fp_ext fp_QNaN =
19{
20 .exp = 0x7fff,
21 .mant = { .m64 = ~0 }
22};
23
24const struct fp_ext fp_Inf =
25{
26 .exp = 0x7fff,
27};
28
29/* let's start with the easy ones */
30
31struct fp_ext *
32fp_fabs(struct fp_ext *dest, struct fp_ext *src)
33{
34 dprint(PINSTR, "fabs\n");
35
36 fp_monadic_check(dest, src);
37
38 dest->sign = 0;
39
40 return dest;
41}
42
43struct fp_ext *
44fp_fneg(struct fp_ext *dest, struct fp_ext *src)
45{
46 dprint(PINSTR, "fneg\n");
47
48 fp_monadic_check(dest, src);
49
50 dest->sign = !dest->sign;
51
52 return dest;
53}
54
55/* Now, the slightly harder ones */
56
57/* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
58 FDSUB, and FCMP instructions. */
59
60struct fp_ext *
61fp_fadd(struct fp_ext *dest, struct fp_ext *src)
62{
63 int diff;
64
65 dprint(PINSTR, "fadd\n");
66
67 fp_dyadic_check(dest, src);
68
69 if (IS_INF(dest)) {
70 /* infinity - infinity == NaN */
71 if (IS_INF(src) && (src->sign != dest->sign))
72 fp_set_nan(dest);
73 return dest;
74 }
75 if (IS_INF(src)) {
76 fp_copy_ext(dest, src);
77 return dest;
78 }
79
80 if (IS_ZERO(dest)) {
81 if (IS_ZERO(src)) {
82 if (src->sign != dest->sign) {
83 if (FPDATA->rnd == FPCR_ROUND_RM)
84 dest->sign = 1;
85 else
86 dest->sign = 0;
87 }
88 } else
89 fp_copy_ext(dest, src);
90 return dest;
91 }
92
93 dest->lowmant = src->lowmant = 0;
94
95 if ((diff = dest->exp - src->exp) > 0)
96 fp_denormalize(src, diff);
97 else if ((diff = -diff) > 0)
98 fp_denormalize(dest, diff);
99
100 if (dest->sign == src->sign) {
101 if (fp_addmant(dest, src))
102 if (!fp_addcarry(dest))
103 return dest;
104 } else {
105 if (dest->mant.m64 < src->mant.m64) {
106 fp_submant(dest, src, dest);
107 dest->sign = !dest->sign;
108 } else
109 fp_submant(dest, dest, src);
110 }
111
112 return dest;
113}
114
115/* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
116 instructions.
117
118 Remember that the arguments are in assembler-syntax order! */
119
120struct fp_ext *
121fp_fsub(struct fp_ext *dest, struct fp_ext *src)
122{
123 dprint(PINSTR, "fsub ");
124
125 src->sign = !src->sign;
126 return fp_fadd(dest, src);
127}
128
129
130struct fp_ext *
131fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
132{
133 dprint(PINSTR, "fcmp ");
134
135 FPDATA->temp[1] = *dest;
136 src->sign = !src->sign;
137 return fp_fadd(&FPDATA->temp[1], src);
138}
139
140struct fp_ext *
141fp_ftst(struct fp_ext *dest, struct fp_ext *src)
142{
143 dprint(PINSTR, "ftst\n");
144
145 (void)dest;
146
147 return src;
148}
149
150struct fp_ext *
151fp_fmul(struct fp_ext *dest, struct fp_ext *src)
152{
153 union fp_mant128 temp;
154 int exp;
155
156 dprint(PINSTR, "fmul\n");
157
158 fp_dyadic_check(dest, src);
159
160 /* calculate the correct sign now, as it's necessary for infinities */
161 dest->sign = src->sign ^ dest->sign;
162
163 /* Handle infinities */
164 if (IS_INF(dest)) {
165 if (IS_ZERO(src))
166 fp_set_nan(dest);
167 return dest;
168 }
169 if (IS_INF(src)) {
170 if (IS_ZERO(dest))
171 fp_set_nan(dest);
172 else
173 fp_copy_ext(dest, src);
174 return dest;
175 }
176
177 /* Of course, as we all know, zero * anything = zero. You may
178 not have known that it might be a positive or negative
179 zero... */
180 if (IS_ZERO(dest) || IS_ZERO(src)) {
181 dest->exp = 0;
182 dest->mant.m64 = 0;
183 dest->lowmant = 0;
184
185 return dest;
186 }
187
188 exp = dest->exp + src->exp - 0x3ffe;
189
190 /* shift up the mantissa for denormalized numbers,
191 so that the highest bit is set, this makes the
192 shift of the result below easier */
193 if ((long)dest->mant.m32[0] >= 0)
194 exp -= fp_overnormalize(dest);
195 if ((long)src->mant.m32[0] >= 0)
196 exp -= fp_overnormalize(src);
197
198 /* now, do a 64-bit multiply with expansion */
199 fp_multiplymant(&temp, dest, src);
200
201 /* normalize it back to 64 bits and stuff it back into the
202 destination struct */
203 if ((long)temp.m32[0] > 0) {
204 exp--;
205 fp_putmant128(dest, &temp, 1);
206 } else
207 fp_putmant128(dest, &temp, 0);
208
209 if (exp >= 0x7fff) {
210 fp_set_ovrflw(dest);
211 return dest;
212 }
213 dest->exp = exp;
214 if (exp < 0) {
215 fp_set_sr(FPSR_EXC_UNFL);
216 fp_denormalize(dest, -exp);
217 }
218
219 return dest;
220}
221
222/* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
223 FSGLDIV instructions.
224
225 Note that the order of the operands is counter-intuitive: instead
226 of src / dest, the result is actually dest / src. */
227
228struct fp_ext *
229fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
230{
231 union fp_mant128 temp;
232 int exp;
233
234 dprint(PINSTR, "fdiv\n");
235
236 fp_dyadic_check(dest, src);
237
238 /* calculate the correct sign now, as it's necessary for infinities */
239 dest->sign = src->sign ^ dest->sign;
240
241 /* Handle infinities */
242 if (IS_INF(dest)) {
243 /* infinity / infinity = NaN (quiet, as always) */
244 if (IS_INF(src))
245 fp_set_nan(dest);
246 /* infinity / anything else = infinity (with approprate sign) */
247 return dest;
248 }
249 if (IS_INF(src)) {
250 /* anything / infinity = zero (with appropriate sign) */
251 dest->exp = 0;
252 dest->mant.m64 = 0;
253 dest->lowmant = 0;
254
255 return dest;
256 }
257
258 /* zeroes */
259 if (IS_ZERO(dest)) {
260 /* zero / zero = NaN */
261 if (IS_ZERO(src))
262 fp_set_nan(dest);
263 /* zero / anything else = zero */
264 return dest;
265 }
266 if (IS_ZERO(src)) {
267 /* anything / zero = infinity (with appropriate sign) */
268 fp_set_sr(FPSR_EXC_DZ);
269 dest->exp = 0x7fff;
270 dest->mant.m64 = 0;
271
272 return dest;
273 }
274
275 exp = dest->exp - src->exp + 0x3fff;
276
277 /* shift up the mantissa for denormalized numbers,
278 so that the highest bit is set, this makes lots
279 of things below easier */
280 if ((long)dest->mant.m32[0] >= 0)
281 exp -= fp_overnormalize(dest);
282 if ((long)src->mant.m32[0] >= 0)
283 exp -= fp_overnormalize(src);
284
285 /* now, do the 64-bit divide */
286 fp_dividemant(&temp, dest, src);
287
288 /* normalize it back to 64 bits and stuff it back into the
289 destination struct */
290 if (!temp.m32[0]) {
291 exp--;
292 fp_putmant128(dest, &temp, 32);
293 } else
294 fp_putmant128(dest, &temp, 31);
295
296 if (exp >= 0x7fff) {
297 fp_set_ovrflw(dest);
298 return dest;
299 }
300 dest->exp = exp;
301 if (exp < 0) {
302 fp_set_sr(FPSR_EXC_UNFL);
303 fp_denormalize(dest, -exp);
304 }
305
306 return dest;
307}
308
309struct fp_ext *
310fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
311{
312 int exp;
313
314 dprint(PINSTR, "fsglmul\n");
315
316 fp_dyadic_check(dest, src);
317
318 /* calculate the correct sign now, as it's necessary for infinities */
319 dest->sign = src->sign ^ dest->sign;
320
321 /* Handle infinities */
322 if (IS_INF(dest)) {
323 if (IS_ZERO(src))
324 fp_set_nan(dest);
325 return dest;
326 }
327 if (IS_INF(src)) {
328 if (IS_ZERO(dest))
329 fp_set_nan(dest);
330 else
331 fp_copy_ext(dest, src);
332 return dest;
333 }
334
335 /* Of course, as we all know, zero * anything = zero. You may
336 not have known that it might be a positive or negative
337 zero... */
338 if (IS_ZERO(dest) || IS_ZERO(src)) {
339 dest->exp = 0;
340 dest->mant.m64 = 0;
341 dest->lowmant = 0;
342
343 return dest;
344 }
345
346 exp = dest->exp + src->exp - 0x3ffe;
347
348 /* do a 32-bit multiply */
349 fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
350 dest->mant.m32[0] & 0xffffff00,
351 src->mant.m32[0] & 0xffffff00);
352
353 if (exp >= 0x7fff) {
354 fp_set_ovrflw(dest);
355 return dest;
356 }
357 dest->exp = exp;
358 if (exp < 0) {
359 fp_set_sr(FPSR_EXC_UNFL);
360 fp_denormalize(dest, -exp);
361 }
362
363 return dest;
364}
365
366struct fp_ext *
367fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
368{
369 int exp;
370 unsigned long quot, rem;
371
372 dprint(PINSTR, "fsgldiv\n");
373
374 fp_dyadic_check(dest, src);
375
376 /* calculate the correct sign now, as it's necessary for infinities */
377 dest->sign = src->sign ^ dest->sign;
378
379 /* Handle infinities */
380 if (IS_INF(dest)) {
381 /* infinity / infinity = NaN (quiet, as always) */
382 if (IS_INF(src))
383 fp_set_nan(dest);
384 /* infinity / anything else = infinity (with approprate sign) */
385 return dest;
386 }
387 if (IS_INF(src)) {
388 /* anything / infinity = zero (with appropriate sign) */
389 dest->exp = 0;
390 dest->mant.m64 = 0;
391 dest->lowmant = 0;
392
393 return dest;
394 }
395
396 /* zeroes */
397 if (IS_ZERO(dest)) {
398 /* zero / zero = NaN */
399 if (IS_ZERO(src))
400 fp_set_nan(dest);
401 /* zero / anything else = zero */
402 return dest;
403 }
404 if (IS_ZERO(src)) {
405 /* anything / zero = infinity (with appropriate sign) */
406 fp_set_sr(FPSR_EXC_DZ);
407 dest->exp = 0x7fff;
408 dest->mant.m64 = 0;
409
410 return dest;
411 }
412
413 exp = dest->exp - src->exp + 0x3fff;
414
415 dest->mant.m32[0] &= 0xffffff00;
416 src->mant.m32[0] &= 0xffffff00;
417
418 /* do the 32-bit divide */
419 if (dest->mant.m32[0] >= src->mant.m32[0]) {
420 fp_sub64(dest->mant, src->mant);
421 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
422 dest->mant.m32[0] = 0x80000000 | (quot >> 1);
423 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */
424 } else {
425 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
426 dest->mant.m32[0] = quot;
427 dest->mant.m32[1] = rem; /* only for rounding */
428 exp--;
429 }
430
431 if (exp >= 0x7fff) {
432 fp_set_ovrflw(dest);
433 return dest;
434 }
435 dest->exp = exp;
436 if (exp < 0) {
437 fp_set_sr(FPSR_EXC_UNFL);
438 fp_denormalize(dest, -exp);
439 }
440
441 return dest;
442}
443
444/* fp_roundint: Internal rounding function for use by several of these
445 emulated instructions.
446
447 This one rounds off the fractional part using the rounding mode
448 specified. */
449
450static void fp_roundint(struct fp_ext *dest, int mode)
451{
452 union fp_mant64 oldmant;
453 unsigned long mask;
454
455 if (!fp_normalize_ext(dest))
456 return;
457
458 /* infinities and zeroes */
459 if (IS_INF(dest) || IS_ZERO(dest))
460 return;
461
462 /* first truncate the lower bits */
463 oldmant = dest->mant;
464 switch (dest->exp) {
465 case 0 ... 0x3ffe:
466 dest->mant.m64 = 0;
467 break;
468 case 0x3fff ... 0x401e:
469 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
470 dest->mant.m32[1] = 0;
471 if (oldmant.m64 == dest->mant.m64)
472 return;
473 break;
474 case 0x401f ... 0x403e:
475 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
476 if (oldmant.m32[1] == dest->mant.m32[1])
477 return;
478 break;
479 default:
480 return;
481 }
482 fp_set_sr(FPSR_EXC_INEX2);
483
484 /* We might want to normalize upwards here... however, since
485 we know that this is only called on the output of fp_fdiv,
486 or with the input to fp_fint or fp_fintrz, and the inputs
487 to all these functions are either normal or denormalized
488 (no subnormals allowed!), there's really no need.
489
490 In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
491 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
492 smallest possible normal dividend and the largest possible normal
493 divisor will still produce a normal quotient, therefore, (normal
494 << 64) / normal is normal in all cases) */
495
496 switch (mode) {
497 case FPCR_ROUND_RN:
498 switch (dest->exp) {
499 case 0 ... 0x3ffd:
500 return;
501 case 0x3ffe:
502 /* As noted above, the input is always normal, so the
503 guard bit (bit 63) is always set. therefore, the
504 only case in which we will NOT round to 1.0 is when
505 the input is exactly 0.5. */
506 if (oldmant.m64 == (1ULL << 63))
507 return;
508 break;
509 case 0x3fff ... 0x401d:
510 mask = 1 << (0x401d - dest->exp);
511 if (!(oldmant.m32[0] & mask))
512 return;
513 if (oldmant.m32[0] & (mask << 1))
514 break;
515 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
516 !oldmant.m32[1])
517 return;
518 break;
519 case 0x401e:
520 if (oldmant.m32[1] & 0x80000000)
521 return;
522 if (oldmant.m32[0] & 1)
523 break;
524 if (!(oldmant.m32[1] << 1))
525 return;
526 break;
527 case 0x401f ... 0x403d:
528 mask = 1 << (0x403d - dest->exp);
529 if (!(oldmant.m32[1] & mask))
530 return;
531 if (oldmant.m32[1] & (mask << 1))
532 break;
533 if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
534 return;
535 break;
536 default:
537 return;
538 }
539 break;
540 case FPCR_ROUND_RZ:
541 return;
542 default:
543 if (dest->sign ^ (mode - FPCR_ROUND_RM))
544 break;
545 return;
546 }
547
548 switch (dest->exp) {
549 case 0 ... 0x3ffe:
550 dest->exp = 0x3fff;
551 dest->mant.m64 = 1ULL << 63;
552 break;
553 case 0x3fff ... 0x401e:
554 mask = 1 << (0x401e - dest->exp);
555 if (dest->mant.m32[0] += mask)
556 break;
557 dest->mant.m32[0] = 0x80000000;
558 dest->exp++;
559 break;
560 case 0x401f ... 0x403e:
561 mask = 1 << (0x403e - dest->exp);
562 if (dest->mant.m32[1] += mask)
563 break;
564 if (dest->mant.m32[0] += 1)
565 break;
566 dest->mant.m32[0] = 0x80000000;
567 dest->exp++;
568 break;
569 }
570}
571
572/* modrem_kernel: Implementation of the FREM and FMOD instructions
573 (which are exactly the same, except for the rounding used on the
574 intermediate value) */
575
576static struct fp_ext *
577modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode)
578{
579 struct fp_ext tmp;
580
581 fp_dyadic_check(dest, src);
582
583 /* Infinities and zeros */
584 if (IS_INF(dest) || IS_ZERO(src)) {
585 fp_set_nan(dest);
586 return dest;
587 }
588 if (IS_ZERO(dest) || IS_INF(src))
589 return dest;
590
591 /* FIXME: there is almost certainly a smarter way to do this */
592 fp_copy_ext(&tmp, dest);
593 fp_fdiv(&tmp, src); /* NOTE: src might be modified */
594 fp_roundint(&tmp, mode);
595 fp_fmul(&tmp, src);
596 fp_fsub(dest, &tmp);
597
598 /* set the quotient byte */
599 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
600 return dest;
601}
602
603/* fp_fmod: Implements the kernel of the FMOD instruction.
604
605 Again, the argument order is backwards. The result, as defined in
606 the Motorola manuals, is:
607
608 fmod(src,dest) = (dest - (src * floor(dest / src))) */
609
610struct fp_ext *
611fp_fmod(struct fp_ext *dest, struct fp_ext *src)
612{
613 dprint(PINSTR, "fmod\n");
614 return modrem_kernel(dest, src, FPCR_ROUND_RZ);
615}
616
617/* fp_frem: Implements the kernel of the FREM instruction.
618
619 frem(src,dest) = (dest - (src * round(dest / src)))
620 */
621
622struct fp_ext *
623fp_frem(struct fp_ext *dest, struct fp_ext *src)
624{
625 dprint(PINSTR, "frem\n");
626 return modrem_kernel(dest, src, FPCR_ROUND_RN);
627}
628
629struct fp_ext *
630fp_fint(struct fp_ext *dest, struct fp_ext *src)
631{
632 dprint(PINSTR, "fint\n");
633
634 fp_copy_ext(dest, src);
635
636 fp_roundint(dest, FPDATA->rnd);
637
638 return dest;
639}
640
641struct fp_ext *
642fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
643{
644 dprint(PINSTR, "fintrz\n");
645
646 fp_copy_ext(dest, src);
647
648 fp_roundint(dest, FPCR_ROUND_RZ);
649
650 return dest;
651}
652
653struct fp_ext *
654fp_fscale(struct fp_ext *dest, struct fp_ext *src)
655{
656 int scale, oldround;
657
658 dprint(PINSTR, "fscale\n");
659
660 fp_dyadic_check(dest, src);
661
662 /* Infinities */
663 if (IS_INF(src)) {
664 fp_set_nan(dest);
665 return dest;
666 }
667 if (IS_INF(dest))
668 return dest;
669
670 /* zeroes */
671 if (IS_ZERO(src) || IS_ZERO(dest))
672 return dest;
673
674 /* Source exponent out of range */
675 if (src->exp >= 0x400c) {
676 fp_set_ovrflw(dest);
677 return dest;
678 }
679
680 /* src must be rounded with round to zero. */
681 oldround = FPDATA->rnd;
682 FPDATA->rnd = FPCR_ROUND_RZ;
683 scale = fp_conv_ext2long(src);
684 FPDATA->rnd = oldround;
685
686 /* new exponent */
687 scale += dest->exp;
688
689 if (scale >= 0x7fff) {
690 fp_set_ovrflw(dest);
691 } else if (scale <= 0) {
692 fp_set_sr(FPSR_EXC_UNFL);
693 fp_denormalize(dest, -scale);
694 } else
695 dest->exp = scale;
696
697 return dest;
698}
699
1/*
2
3 fp_arith.c: floating-point math routines for the Linux-m68k
4 floating point emulator.
5
6 Copyright (c) 1998-1999 David Huggins-Daines.
7
8 Somewhat based on the AlphaLinux floating point emulator, by David
9 Mosberger-Tang.
10
11 You may copy, modify, and redistribute this file under the terms of
12 the GNU General Public License, version 2, or any later version, at
13 your convenience.
14 */
15
16#include "fp_emu.h"
17#include "multi_arith.h"
18#include "fp_arith.h"
19
20const struct fp_ext fp_QNaN =
21{
22 .exp = 0x7fff,
23 .mant = { .m64 = ~0 }
24};
25
26const struct fp_ext fp_Inf =
27{
28 .exp = 0x7fff,
29};
30
31/* let's start with the easy ones */
32
33struct fp_ext *
34fp_fabs(struct fp_ext *dest, struct fp_ext *src)
35{
36 dprint(PINSTR, "fabs\n");
37
38 fp_monadic_check(dest, src);
39
40 dest->sign = 0;
41
42 return dest;
43}
44
45struct fp_ext *
46fp_fneg(struct fp_ext *dest, struct fp_ext *src)
47{
48 dprint(PINSTR, "fneg\n");
49
50 fp_monadic_check(dest, src);
51
52 dest->sign = !dest->sign;
53
54 return dest;
55}
56
57/* Now, the slightly harder ones */
58
59/* fp_fadd: Implements the kernel of the FADD, FSADD, FDADD, FSUB,
60 FDSUB, and FCMP instructions. */
61
62struct fp_ext *
63fp_fadd(struct fp_ext *dest, struct fp_ext *src)
64{
65 int diff;
66
67 dprint(PINSTR, "fadd\n");
68
69 fp_dyadic_check(dest, src);
70
71 if (IS_INF(dest)) {
72 /* infinity - infinity == NaN */
73 if (IS_INF(src) && (src->sign != dest->sign))
74 fp_set_nan(dest);
75 return dest;
76 }
77 if (IS_INF(src)) {
78 fp_copy_ext(dest, src);
79 return dest;
80 }
81
82 if (IS_ZERO(dest)) {
83 if (IS_ZERO(src)) {
84 if (src->sign != dest->sign) {
85 if (FPDATA->rnd == FPCR_ROUND_RM)
86 dest->sign = 1;
87 else
88 dest->sign = 0;
89 }
90 } else
91 fp_copy_ext(dest, src);
92 return dest;
93 }
94
95 dest->lowmant = src->lowmant = 0;
96
97 if ((diff = dest->exp - src->exp) > 0)
98 fp_denormalize(src, diff);
99 else if ((diff = -diff) > 0)
100 fp_denormalize(dest, diff);
101
102 if (dest->sign == src->sign) {
103 if (fp_addmant(dest, src))
104 if (!fp_addcarry(dest))
105 return dest;
106 } else {
107 if (dest->mant.m64 < src->mant.m64) {
108 fp_submant(dest, src, dest);
109 dest->sign = !dest->sign;
110 } else
111 fp_submant(dest, dest, src);
112 }
113
114 return dest;
115}
116
117/* fp_fsub: Implements the kernel of the FSUB, FSSUB, and FDSUB
118 instructions.
119
120 Remember that the arguments are in assembler-syntax order! */
121
122struct fp_ext *
123fp_fsub(struct fp_ext *dest, struct fp_ext *src)
124{
125 dprint(PINSTR, "fsub ");
126
127 src->sign = !src->sign;
128 return fp_fadd(dest, src);
129}
130
131
132struct fp_ext *
133fp_fcmp(struct fp_ext *dest, struct fp_ext *src)
134{
135 dprint(PINSTR, "fcmp ");
136
137 FPDATA->temp[1] = *dest;
138 src->sign = !src->sign;
139 return fp_fadd(&FPDATA->temp[1], src);
140}
141
142struct fp_ext *
143fp_ftst(struct fp_ext *dest, struct fp_ext *src)
144{
145 dprint(PINSTR, "ftst\n");
146
147 (void)dest;
148
149 return src;
150}
151
152struct fp_ext *
153fp_fmul(struct fp_ext *dest, struct fp_ext *src)
154{
155 union fp_mant128 temp;
156 int exp;
157
158 dprint(PINSTR, "fmul\n");
159
160 fp_dyadic_check(dest, src);
161
162 /* calculate the correct sign now, as it's necessary for infinities */
163 dest->sign = src->sign ^ dest->sign;
164
165 /* Handle infinities */
166 if (IS_INF(dest)) {
167 if (IS_ZERO(src))
168 fp_set_nan(dest);
169 return dest;
170 }
171 if (IS_INF(src)) {
172 if (IS_ZERO(dest))
173 fp_set_nan(dest);
174 else
175 fp_copy_ext(dest, src);
176 return dest;
177 }
178
179 /* Of course, as we all know, zero * anything = zero. You may
180 not have known that it might be a positive or negative
181 zero... */
182 if (IS_ZERO(dest) || IS_ZERO(src)) {
183 dest->exp = 0;
184 dest->mant.m64 = 0;
185 dest->lowmant = 0;
186
187 return dest;
188 }
189
190 exp = dest->exp + src->exp - 0x3ffe;
191
192 /* shift up the mantissa for denormalized numbers,
193 so that the highest bit is set, this makes the
194 shift of the result below easier */
195 if ((long)dest->mant.m32[0] >= 0)
196 exp -= fp_overnormalize(dest);
197 if ((long)src->mant.m32[0] >= 0)
198 exp -= fp_overnormalize(src);
199
200 /* now, do a 64-bit multiply with expansion */
201 fp_multiplymant(&temp, dest, src);
202
203 /* normalize it back to 64 bits and stuff it back into the
204 destination struct */
205 if ((long)temp.m32[0] > 0) {
206 exp--;
207 fp_putmant128(dest, &temp, 1);
208 } else
209 fp_putmant128(dest, &temp, 0);
210
211 if (exp >= 0x7fff) {
212 fp_set_ovrflw(dest);
213 return dest;
214 }
215 dest->exp = exp;
216 if (exp < 0) {
217 fp_set_sr(FPSR_EXC_UNFL);
218 fp_denormalize(dest, -exp);
219 }
220
221 return dest;
222}
223
224/* fp_fdiv: Implements the "kernel" of the FDIV, FSDIV, FDDIV and
225 FSGLDIV instructions.
226
227 Note that the order of the operands is counter-intuitive: instead
228 of src / dest, the result is actually dest / src. */
229
230struct fp_ext *
231fp_fdiv(struct fp_ext *dest, struct fp_ext *src)
232{
233 union fp_mant128 temp;
234 int exp;
235
236 dprint(PINSTR, "fdiv\n");
237
238 fp_dyadic_check(dest, src);
239
240 /* calculate the correct sign now, as it's necessary for infinities */
241 dest->sign = src->sign ^ dest->sign;
242
243 /* Handle infinities */
244 if (IS_INF(dest)) {
245 /* infinity / infinity = NaN (quiet, as always) */
246 if (IS_INF(src))
247 fp_set_nan(dest);
248 /* infinity / anything else = infinity (with approprate sign) */
249 return dest;
250 }
251 if (IS_INF(src)) {
252 /* anything / infinity = zero (with appropriate sign) */
253 dest->exp = 0;
254 dest->mant.m64 = 0;
255 dest->lowmant = 0;
256
257 return dest;
258 }
259
260 /* zeroes */
261 if (IS_ZERO(dest)) {
262 /* zero / zero = NaN */
263 if (IS_ZERO(src))
264 fp_set_nan(dest);
265 /* zero / anything else = zero */
266 return dest;
267 }
268 if (IS_ZERO(src)) {
269 /* anything / zero = infinity (with appropriate sign) */
270 fp_set_sr(FPSR_EXC_DZ);
271 dest->exp = 0x7fff;
272 dest->mant.m64 = 0;
273
274 return dest;
275 }
276
277 exp = dest->exp - src->exp + 0x3fff;
278
279 /* shift up the mantissa for denormalized numbers,
280 so that the highest bit is set, this makes lots
281 of things below easier */
282 if ((long)dest->mant.m32[0] >= 0)
283 exp -= fp_overnormalize(dest);
284 if ((long)src->mant.m32[0] >= 0)
285 exp -= fp_overnormalize(src);
286
287 /* now, do the 64-bit divide */
288 fp_dividemant(&temp, dest, src);
289
290 /* normalize it back to 64 bits and stuff it back into the
291 destination struct */
292 if (!temp.m32[0]) {
293 exp--;
294 fp_putmant128(dest, &temp, 32);
295 } else
296 fp_putmant128(dest, &temp, 31);
297
298 if (exp >= 0x7fff) {
299 fp_set_ovrflw(dest);
300 return dest;
301 }
302 dest->exp = exp;
303 if (exp < 0) {
304 fp_set_sr(FPSR_EXC_UNFL);
305 fp_denormalize(dest, -exp);
306 }
307
308 return dest;
309}
310
311struct fp_ext *
312fp_fsglmul(struct fp_ext *dest, struct fp_ext *src)
313{
314 int exp;
315
316 dprint(PINSTR, "fsglmul\n");
317
318 fp_dyadic_check(dest, src);
319
320 /* calculate the correct sign now, as it's necessary for infinities */
321 dest->sign = src->sign ^ dest->sign;
322
323 /* Handle infinities */
324 if (IS_INF(dest)) {
325 if (IS_ZERO(src))
326 fp_set_nan(dest);
327 return dest;
328 }
329 if (IS_INF(src)) {
330 if (IS_ZERO(dest))
331 fp_set_nan(dest);
332 else
333 fp_copy_ext(dest, src);
334 return dest;
335 }
336
337 /* Of course, as we all know, zero * anything = zero. You may
338 not have known that it might be a positive or negative
339 zero... */
340 if (IS_ZERO(dest) || IS_ZERO(src)) {
341 dest->exp = 0;
342 dest->mant.m64 = 0;
343 dest->lowmant = 0;
344
345 return dest;
346 }
347
348 exp = dest->exp + src->exp - 0x3ffe;
349
350 /* do a 32-bit multiply */
351 fp_mul64(dest->mant.m32[0], dest->mant.m32[1],
352 dest->mant.m32[0] & 0xffffff00,
353 src->mant.m32[0] & 0xffffff00);
354
355 if (exp >= 0x7fff) {
356 fp_set_ovrflw(dest);
357 return dest;
358 }
359 dest->exp = exp;
360 if (exp < 0) {
361 fp_set_sr(FPSR_EXC_UNFL);
362 fp_denormalize(dest, -exp);
363 }
364
365 return dest;
366}
367
368struct fp_ext *
369fp_fsgldiv(struct fp_ext *dest, struct fp_ext *src)
370{
371 int exp;
372 unsigned long quot, rem;
373
374 dprint(PINSTR, "fsgldiv\n");
375
376 fp_dyadic_check(dest, src);
377
378 /* calculate the correct sign now, as it's necessary for infinities */
379 dest->sign = src->sign ^ dest->sign;
380
381 /* Handle infinities */
382 if (IS_INF(dest)) {
383 /* infinity / infinity = NaN (quiet, as always) */
384 if (IS_INF(src))
385 fp_set_nan(dest);
386 /* infinity / anything else = infinity (with approprate sign) */
387 return dest;
388 }
389 if (IS_INF(src)) {
390 /* anything / infinity = zero (with appropriate sign) */
391 dest->exp = 0;
392 dest->mant.m64 = 0;
393 dest->lowmant = 0;
394
395 return dest;
396 }
397
398 /* zeroes */
399 if (IS_ZERO(dest)) {
400 /* zero / zero = NaN */
401 if (IS_ZERO(src))
402 fp_set_nan(dest);
403 /* zero / anything else = zero */
404 return dest;
405 }
406 if (IS_ZERO(src)) {
407 /* anything / zero = infinity (with appropriate sign) */
408 fp_set_sr(FPSR_EXC_DZ);
409 dest->exp = 0x7fff;
410 dest->mant.m64 = 0;
411
412 return dest;
413 }
414
415 exp = dest->exp - src->exp + 0x3fff;
416
417 dest->mant.m32[0] &= 0xffffff00;
418 src->mant.m32[0] &= 0xffffff00;
419
420 /* do the 32-bit divide */
421 if (dest->mant.m32[0] >= src->mant.m32[0]) {
422 fp_sub64(dest->mant, src->mant);
423 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
424 dest->mant.m32[0] = 0x80000000 | (quot >> 1);
425 dest->mant.m32[1] = (quot & 1) | rem; /* only for rounding */
426 } else {
427 fp_div64(quot, rem, dest->mant.m32[0], 0, src->mant.m32[0]);
428 dest->mant.m32[0] = quot;
429 dest->mant.m32[1] = rem; /* only for rounding */
430 exp--;
431 }
432
433 if (exp >= 0x7fff) {
434 fp_set_ovrflw(dest);
435 return dest;
436 }
437 dest->exp = exp;
438 if (exp < 0) {
439 fp_set_sr(FPSR_EXC_UNFL);
440 fp_denormalize(dest, -exp);
441 }
442
443 return dest;
444}
445
446/* fp_roundint: Internal rounding function for use by several of these
447 emulated instructions.
448
449 This one rounds off the fractional part using the rounding mode
450 specified. */
451
452static void fp_roundint(struct fp_ext *dest, int mode)
453{
454 union fp_mant64 oldmant;
455 unsigned long mask;
456
457 if (!fp_normalize_ext(dest))
458 return;
459
460 /* infinities and zeroes */
461 if (IS_INF(dest) || IS_ZERO(dest))
462 return;
463
464 /* first truncate the lower bits */
465 oldmant = dest->mant;
466 switch (dest->exp) {
467 case 0 ... 0x3ffe:
468 dest->mant.m64 = 0;
469 break;
470 case 0x3fff ... 0x401e:
471 dest->mant.m32[0] &= 0xffffffffU << (0x401e - dest->exp);
472 dest->mant.m32[1] = 0;
473 if (oldmant.m64 == dest->mant.m64)
474 return;
475 break;
476 case 0x401f ... 0x403e:
477 dest->mant.m32[1] &= 0xffffffffU << (0x403e - dest->exp);
478 if (oldmant.m32[1] == dest->mant.m32[1])
479 return;
480 break;
481 default:
482 return;
483 }
484 fp_set_sr(FPSR_EXC_INEX2);
485
486 /* We might want to normalize upwards here... however, since
487 we know that this is only called on the output of fp_fdiv,
488 or with the input to fp_fint or fp_fintrz, and the inputs
489 to all these functions are either normal or denormalized
490 (no subnormals allowed!), there's really no need.
491
492 In the case of fp_fdiv, observe that 0x80000000 / 0xffff =
493 0xffff8000, and the same holds for 128-bit / 64-bit. (i.e. the
494 smallest possible normal dividend and the largest possible normal
495 divisor will still produce a normal quotient, therefore, (normal
496 << 64) / normal is normal in all cases) */
497
498 switch (mode) {
499 case FPCR_ROUND_RN:
500 switch (dest->exp) {
501 case 0 ... 0x3ffd:
502 return;
503 case 0x3ffe:
504 /* As noted above, the input is always normal, so the
505 guard bit (bit 63) is always set. therefore, the
506 only case in which we will NOT round to 1.0 is when
507 the input is exactly 0.5. */
508 if (oldmant.m64 == (1ULL << 63))
509 return;
510 break;
511 case 0x3fff ... 0x401d:
512 mask = 1 << (0x401d - dest->exp);
513 if (!(oldmant.m32[0] & mask))
514 return;
515 if (oldmant.m32[0] & (mask << 1))
516 break;
517 if (!(oldmant.m32[0] << (dest->exp - 0x3ffd)) &&
518 !oldmant.m32[1])
519 return;
520 break;
521 case 0x401e:
522 if (oldmant.m32[1] & 0x80000000)
523 return;
524 if (oldmant.m32[0] & 1)
525 break;
526 if (!(oldmant.m32[1] << 1))
527 return;
528 break;
529 case 0x401f ... 0x403d:
530 mask = 1 << (0x403d - dest->exp);
531 if (!(oldmant.m32[1] & mask))
532 return;
533 if (oldmant.m32[1] & (mask << 1))
534 break;
535 if (!(oldmant.m32[1] << (dest->exp - 0x401d)))
536 return;
537 break;
538 default:
539 return;
540 }
541 break;
542 case FPCR_ROUND_RZ:
543 return;
544 default:
545 if (dest->sign ^ (mode - FPCR_ROUND_RM))
546 break;
547 return;
548 }
549
550 switch (dest->exp) {
551 case 0 ... 0x3ffe:
552 dest->exp = 0x3fff;
553 dest->mant.m64 = 1ULL << 63;
554 break;
555 case 0x3fff ... 0x401e:
556 mask = 1 << (0x401e - dest->exp);
557 if (dest->mant.m32[0] += mask)
558 break;
559 dest->mant.m32[0] = 0x80000000;
560 dest->exp++;
561 break;
562 case 0x401f ... 0x403e:
563 mask = 1 << (0x403e - dest->exp);
564 if (dest->mant.m32[1] += mask)
565 break;
566 if (dest->mant.m32[0] += 1)
567 break;
568 dest->mant.m32[0] = 0x80000000;
569 dest->exp++;
570 break;
571 }
572}
573
574/* modrem_kernel: Implementation of the FREM and FMOD instructions
575 (which are exactly the same, except for the rounding used on the
576 intermediate value) */
577
578static struct fp_ext *
579modrem_kernel(struct fp_ext *dest, struct fp_ext *src, int mode)
580{
581 struct fp_ext tmp;
582
583 fp_dyadic_check(dest, src);
584
585 /* Infinities and zeros */
586 if (IS_INF(dest) || IS_ZERO(src)) {
587 fp_set_nan(dest);
588 return dest;
589 }
590 if (IS_ZERO(dest) || IS_INF(src))
591 return dest;
592
593 /* FIXME: there is almost certainly a smarter way to do this */
594 fp_copy_ext(&tmp, dest);
595 fp_fdiv(&tmp, src); /* NOTE: src might be modified */
596 fp_roundint(&tmp, mode);
597 fp_fmul(&tmp, src);
598 fp_fsub(dest, &tmp);
599
600 /* set the quotient byte */
601 fp_set_quotient((dest->mant.m64 & 0x7f) | (dest->sign << 7));
602 return dest;
603}
604
605/* fp_fmod: Implements the kernel of the FMOD instruction.
606
607 Again, the argument order is backwards. The result, as defined in
608 the Motorola manuals, is:
609
610 fmod(src,dest) = (dest - (src * floor(dest / src))) */
611
612struct fp_ext *
613fp_fmod(struct fp_ext *dest, struct fp_ext *src)
614{
615 dprint(PINSTR, "fmod\n");
616 return modrem_kernel(dest, src, FPCR_ROUND_RZ);
617}
618
619/* fp_frem: Implements the kernel of the FREM instruction.
620
621 frem(src,dest) = (dest - (src * round(dest / src)))
622 */
623
624struct fp_ext *
625fp_frem(struct fp_ext *dest, struct fp_ext *src)
626{
627 dprint(PINSTR, "frem\n");
628 return modrem_kernel(dest, src, FPCR_ROUND_RN);
629}
630
631struct fp_ext *
632fp_fint(struct fp_ext *dest, struct fp_ext *src)
633{
634 dprint(PINSTR, "fint\n");
635
636 fp_copy_ext(dest, src);
637
638 fp_roundint(dest, FPDATA->rnd);
639
640 return dest;
641}
642
643struct fp_ext *
644fp_fintrz(struct fp_ext *dest, struct fp_ext *src)
645{
646 dprint(PINSTR, "fintrz\n");
647
648 fp_copy_ext(dest, src);
649
650 fp_roundint(dest, FPCR_ROUND_RZ);
651
652 return dest;
653}
654
655struct fp_ext *
656fp_fscale(struct fp_ext *dest, struct fp_ext *src)
657{
658 int scale, oldround;
659
660 dprint(PINSTR, "fscale\n");
661
662 fp_dyadic_check(dest, src);
663
664 /* Infinities */
665 if (IS_INF(src)) {
666 fp_set_nan(dest);
667 return dest;
668 }
669 if (IS_INF(dest))
670 return dest;
671
672 /* zeroes */
673 if (IS_ZERO(src) || IS_ZERO(dest))
674 return dest;
675
676 /* Source exponent out of range */
677 if (src->exp >= 0x400c) {
678 fp_set_ovrflw(dest);
679 return dest;
680 }
681
682 /* src must be rounded with round to zero. */
683 oldround = FPDATA->rnd;
684 FPDATA->rnd = FPCR_ROUND_RZ;
685 scale = fp_conv_ext2long(src);
686 FPDATA->rnd = oldround;
687
688 /* new exponent */
689 scale += dest->exp;
690
691 if (scale >= 0x7fff) {
692 fp_set_ovrflw(dest);
693 } else if (scale <= 0) {
694 fp_set_sr(FPSR_EXC_UNFL);
695 fp_denormalize(dest, -scale);
696 } else
697 dest->exp = scale;
698
699 return dest;
700}
701