Open64 (mfef90, whirl2f, and IR tools)  TAG: version-openad; SVN changeset: 916
convert.c
Go to the documentation of this file.
00001 /*
00002 
00003   Copyright (C) 2000, 2001 Silicon Graphics, Inc.  All Rights Reserved.
00004 
00005   This program is free software; you can redistribute it and/or modify it
00006   under the terms of version 2 of the GNU General Public License as
00007   published by the Free Software Foundation.
00008 
00009   This program is distributed in the hope that it would be useful, but
00010   WITHOUT ANY WARRANTY; without even the implied warranty of
00011   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
00012 
00013   Further, this software is distributed without any warranty that it is
00014   free of the rightful claim of any third person regarding infringement 
00015   or the like.  Any license provided herein, whether implied or 
00016   otherwise, applies only to this software file.  Patent licenses, if 
00017   any, provided herein do not apply to combinations of this program with 
00018   other software, or any other product whatsoever.  
00019 
00020   You should have received a copy of the GNU General Public License along
00021   with this program; if not, write the Free Software Foundation, Inc., 59
00022   Temple Place - Suite 330, Boston MA 02111-1307, USA.
00023 
00024   Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pky,
00025   Mountain View, CA 94043, or:
00026 
00027   http://www.sgi.com
00028 
00029   For further information regarding this notice, see:
00030 
00031   http://oss.sgi.com/projects/GenInfo/NoticeExplan
00032 
00033 */
00034 
00035 
00036 /*
00037  *      Conversions between floating-point and two's-complement fixed
00038  *      point representations. All rounding is toward zero (i.e. chopped)
00039  *      for the Cray format conversions.
00040  */
00041 
00042 #include "arith.internal.h"
00043 #include "int64.h"
00044 
00045 #include <string.h> /* for memcpy() */
00046 
00047 /* Cray single -> integer */
00048 int
00049 ar_cfix64 (AR_INT_64 *fix,
00050            const AR_CRAY_64 *flt,
00051            int bitsize) {
00052 
00053         int res = AR_STAT_OK, neg;
00054         int shift = AR_CRAY_EXPO_BIAS + AR_CRAY64_COEFF_BITS - flt->expo - 1;
00055         AR_CRAY_64 a = *flt;
00056 
00057         if (!(a.coeff0 | a.coeff1 | a.coeff2)) {
00058                 ZERO64 (*fix);
00059                 return AR_STAT_ZERO;
00060         }
00061         if (shift >= AR_CRAY64_COEFF_BITS) {
00062                 ZERO64 (*fix);
00063                 return AR_STAT_ZERO | AR_STAT_UNDERFLOW;
00064         }
00065         if (shift < AR_CRAY64_COEFF_BITS - bitsize)
00066                 res |= AR_STAT_OVERFLOW;
00067 
00068         neg = a.sign;
00069         a.sign = 0;
00070         a.expo = 0;
00071         CRAY64TOINT64 (*fix, a);
00072 
00073         for ( ; shift < 0; shift++)
00074                 SHLEFT64 (*fix);
00075         for ( ; shift > 0; shift--) {
00076                 if (fix->part4 & 1)
00077                         res |= AR_STAT_INEXACT;
00078                 SHRIGHT64X (*fix);
00079         }
00080 
00081         if (fix->part1&0x8000)
00082                 res |= (AR_STAT_OVERFLOW|AR_STAT_SEMIVALID);
00083 
00084         if (neg) {
00085                 NOT64 (*fix);
00086                 INC64 (*fix);
00087         }
00088 
00089         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4))
00090                 res |= AR_STAT_ZERO;
00091         else if (neg)
00092                 res |= AR_STAT_NEGATIVE;
00093 
00094         return res;
00095 }
00096 
00097 
00098 /* integer -> Cray single */
00099 int
00100 ar_cflt64 (AR_CRAY_64 *flt,
00101            const AR_INT_64 *fix,
00102            int is_unsigned) {
00103 
00104         int res = AR_STAT_OK, neg = 0;
00105         long expo = AR_CRAY_EXPO_BIAS + AR_CRAY64_COEFF_BITS - 1;
00106         AR_INT_64 val;
00107 
00108         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4)) {
00109                 ZEROCRAY64 (*flt);
00110                 return AR_STAT_ZERO;
00111         }
00112 
00113         COPY64 (val, *fix);
00114         if (SIGNBIT (*fix) && !is_unsigned) {
00115                 NEG64 (val);
00116                 neg = 1;
00117                 res |= AR_STAT_NEGATIVE;
00118         }
00119 
00120         while (val.part1 >> (16 - (AR_CRAY_EXPO_BITS + 1))) {
00121                 if (val.part4 & 1)
00122                         res |= AR_STAT_INEXACT;
00123                 expo++;
00124                 SHRIGHT64 (val);
00125         }
00126 
00127         INT64TOCRAY64 (*flt, val);
00128         while (!(flt->coeff0 >> (AR_CRAY_C0_BITS - 1))) {
00129                 expo--;
00130                 SHLEFTCRAY64 (*flt);
00131         }
00132 
00133         flt->sign = neg;
00134         flt->expo = expo;
00135         return res;
00136 }
00137 
00138 
00139 /* Cray double -> integer */
00140 int
00141 ar_cfix128 (AR_INT_64 *fix,
00142             const AR_CRAY_128 *flt,
00143             int bitsize) {
00144 
00145         int res = AR_STAT_OK, neg;
00146         int shift = AR_CRAY_EXPO_BIAS + AR_CRAY64_COEFF_BITS - flt->expo - 1;
00147         AR_CRAY_128 a = *flt;
00148         AR_CRAY_64 b;
00149 
00150         if (!(a.coeff0 | a.coeff1 | a.coeff2 |
00151               a.coeff3 | a.coeff4 | a.coeff5)) {
00152                 ZERO64 (*fix);
00153                 return AR_STAT_ZERO;
00154         }
00155         if (shift >= AR_CRAY64_COEFF_BITS) {
00156                 ZERO64 (*fix);
00157                 return AR_STAT_ZERO | AR_STAT_UNDERFLOW;
00158         }
00159         if (shift < AR_CRAY64_COEFF_BITS - bitsize)
00160                 res |= AR_STAT_OVERFLOW;
00161 
00162         neg = a.sign;
00163         a.sign = 0;
00164         a.expo = 0;
00165         CRAY128TO64 (b, a);
00166         CRAY64TOINT64 (*fix, b);
00167 
00168         for ( ; shift < 0; shift++) {
00169                 SHLEFT64 (*fix);
00170                 SHLEFTCRAY128 (a);
00171                 fix->part4 |= a.coeff2 & 1;
00172         }
00173         if (a.coeff3 | a.coeff4 | a.coeff5)
00174                 res |= AR_STAT_INEXACT;
00175         for ( ; shift > 0; shift--) {
00176                 if (fix->part4 & 1)
00177                         res |= AR_STAT_INEXACT;
00178                 SHRIGHT64X (*fix);
00179         }
00180 
00181         if (fix->part1&0x8000)
00182                 res |= (AR_STAT_OVERFLOW|AR_STAT_SEMIVALID);
00183 
00184         if (neg) {
00185                 NOT64 (*fix);
00186                 INC64 (*fix);
00187         }
00188 
00189         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4))
00190                 res |= AR_STAT_ZERO;
00191         else if (neg)
00192                 res |= AR_STAT_NEGATIVE;
00193 
00194         return res;
00195 }
00196 
00197 
00198 /* integer -> Cray double */
00199 int
00200 ar_cflt128 (AR_CRAY_128 *flt,
00201             const AR_INT_64 *fix,
00202             int is_unsigned) {
00203 
00204         int res = AR_STAT_OK, neg = 0;
00205         long expo = AR_CRAY_EXPO_BIAS + AR_CRAY64_COEFF_BITS - 1;
00206         AR_INT_64 val;
00207         AR_CRAY_64 sing;
00208         unsigned long chop = 0;
00209 
00210         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4)) {
00211                 ZEROCRAY128 (*flt);
00212                 return AR_STAT_ZERO;
00213         }
00214 
00215         COPY64 (val, *fix);
00216         if (SIGNBIT (*fix) && !is_unsigned) {
00217                 NEG64 (val);
00218                 neg = 1;
00219                 res |= AR_STAT_NEGATIVE;
00220         }
00221 
00222         while (val.part1 >> (16 - (AR_CRAY_EXPO_BITS + 1))) {
00223                 chop = (chop >> 1) |
00224                        ((val.part4 & 1) << (AR_CRAY_C3_BITS - 1));
00225                 expo++;
00226                 SHRIGHT64 (val);
00227         }
00228 
00229         INT64TOCRAY64 (sing, val);
00230         CRAY64TO128 (*flt, sing);
00231         flt->coeff3 = chop;
00232         while (!(flt->coeff0 >> (AR_CRAY_C0_BITS - 1))) {
00233                 expo--;
00234                 SHLEFTCRAY128 (*flt);
00235         }
00236 
00237         flt->sign = neg;
00238         flt->expo = expo;
00239         return res;
00240 }
00241 
00242 
00243 /* IEEE 32 bit -> integer */
00244 int
00245 ar_ifix32 (AR_INT_64 *fix,
00246            const AR_IEEE_32 *flt,
00247            int bitsize,
00248            int roundmode) {
00249 
00250         int res = AR_STAT_OK, neg;
00251         int shift = AR_IEEE32_EXPO_BIAS + AR_IEEE32_COEFF_BITS - flt->expo;
00252         unsigned long rbits = 0;
00253         AR_IEEE_32 a = *flt;
00254 
00255         if (a.expo == 0 && !IS_IEEE32_NZ_COEFF(&a)) {
00256                 ZERO64 (*fix);
00257                 return AR_STAT_ZERO;
00258         }
00259         if (shift > AR_IEEE32_COEFF_BITS + AR_IEEE32_ROUND_BITS)
00260                 shift = AR_IEEE32_COEFF_BITS + AR_IEEE32_ROUND_BITS;
00261 
00262         if (shift < AR_IEEE32_COEFF_BITS + 1 - bitsize)
00263                 res |= AR_STAT_OVERFLOW;
00264 
00265         neg = a.sign;
00266         a.sign = 0;
00267         a.expo = !!a.expo;
00268         IEEE32TOINT64 (*fix, a);
00269 
00270         for ( ; shift < 0; shift++)
00271                 SHLEFT64 (*fix);
00272         for ( ; shift > 0; shift--) {
00273                 rbits = rbits & 1 | (rbits >> 1) |
00274                         ((fix->part4 & 1) << (AR_IEEE32_ROUND_BITS - 1));
00275                 SHRIGHT64 (*fix);
00276         }
00277         if (rbits)
00278                 res |= AR_STAT_INEXACT;
00279 
00280         switch (roundmode) {
00281         case AR_ROUND_PLUS_INFINITY:
00282                 if (!a.sign && rbits)
00283                         INC64 (*fix);
00284                 break;
00285         case AR_ROUND_MINUS_INFINITY:
00286                 if (a.sign && rbits)
00287                         DEC64 (*fix);
00288                 break;
00289         case AR_ROUND_ZERO:
00290                 break;
00291         default:
00292                 if (rbits >> (AR_IEEE32_ROUND_BITS - 1) &&
00293                     (rbits & MASKR (AR_IEEE32_ROUND_BITS - 1) ||
00294                      fix->part4 & 1))
00295                         INC64 (*fix);
00296                 break;
00297         }
00298 
00299         if (fix->part1&0x8000)
00300                 res |= (AR_STAT_OVERFLOW|AR_STAT_SEMIVALID);
00301 
00302         if (neg) {
00303                 NOT64 (*fix);
00304                 INC64 (*fix);
00305         }
00306 
00307         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4))
00308                 res |= AR_STAT_ZERO;
00309         else if (neg)
00310                 res |= AR_STAT_NEGATIVE;
00311 
00312         return res;
00313 }
00314 
00315 
00316 /* IEEE 64 bit -> integer */
00317 int
00318 ar_ifix64 (AR_INT_64 *fix,
00319            const AR_IEEE_64 *flt,
00320            int bitsize,
00321            int roundmode) {
00322 
00323         int res = AR_STAT_OK, neg;
00324         int shift = AR_IEEE64_EXPO_BIAS + AR_IEEE64_COEFF_BITS - flt->expo;
00325         unsigned long rbits = 0;
00326         AR_IEEE_64 a = *flt;
00327 
00328         if (a.expo == 0 && !IS_IEEE64_NZ_COEFF(&a)) {
00329                 ZERO64 (*fix);
00330                 return AR_STAT_ZERO;
00331         }
00332         if (shift > AR_IEEE64_COEFF_BITS + AR_IEEE64_ROUND_BITS)
00333                 shift = AR_IEEE64_COEFF_BITS + AR_IEEE64_ROUND_BITS;
00334 
00335         if (shift < AR_IEEE64_COEFF_BITS + 1 - bitsize)
00336                 res |= AR_STAT_OVERFLOW;
00337 
00338         neg = a.sign;
00339         a.sign = 0;
00340         a.expo = !!a.expo;
00341         IEEE64TOINT64 (*fix, a);
00342 
00343         for ( ; shift < 0; shift++)
00344                 SHLEFT64 (*fix);
00345         for ( ; shift > 0; shift--) {
00346                 rbits = rbits & 1 | (rbits >> 1) |
00347                         ((fix->part4 & 1) << (AR_IEEE64_ROUND_BITS - 1));
00348                 SHRIGHT64 (*fix);
00349         }
00350         if (rbits)
00351                 res |= AR_STAT_INEXACT;
00352 
00353         switch (roundmode) {
00354         case AR_ROUND_PLUS_INFINITY:
00355                 if (!a.sign && rbits)
00356                         INC64 (*fix);
00357                 break;
00358         case AR_ROUND_MINUS_INFINITY:
00359                 if (a.sign && rbits)
00360                         DEC64 (*fix);
00361                 break;
00362         case AR_ROUND_ZERO:
00363                 break;
00364         default:
00365                 if (rbits >> (AR_IEEE64_ROUND_BITS - 1) &&
00366                     (rbits & MASKR (AR_IEEE64_ROUND_BITS - 1) ||
00367                      fix->part4 & 1))
00368                         INC64 (*fix);
00369                 break;
00370         }
00371 
00372         if (fix->part1&0x8000)
00373                 res |= (AR_STAT_OVERFLOW|AR_STAT_SEMIVALID);
00374 
00375         if (neg) {
00376                 NOT64 (*fix);
00377                 INC64 (*fix);
00378         }
00379 
00380         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4))
00381                 res |= AR_STAT_ZERO;
00382         else if (neg)
00383                 res |= AR_STAT_NEGATIVE;
00384 
00385         return res;
00386 }
00387 
00388 
00389 /* IEEE 128 bit -> integer */
00390 int
00391 ar_ifix128 (AR_INT_64 *fix,
00392             const AR_IEEE_128 *flt,
00393             int bitsize,
00394             int roundmode)
00395 {
00396         int res = AR_STAT_OK, neg;
00397         int shift = AR_IEEE128_EXPO_BIAS + AR_IEEE128_COEFF_BITS-64+15 - flt->expo;
00398         unsigned long rbits = 0;
00399         AR_IEEE_128 a = *flt;
00400 
00401         /*
00402          * Use native arithmetic for MIPS.
00403          */
00404         if (HOST_IS_MIPS) {
00405                 AR_TYPE ty = AR_Int_64_S;
00406                 long double ld;
00407 
00408                 ld = *(long double *) flt;
00409                 *(long long *) fix = ld;
00410                 return AR_status ((AR_DATA *) fix, &ty);
00411         }
00412 
00413         if (a.expo == 0 && !IS_IEEE128_NZ_COEFF(&a)) {
00414                 ZERO64 (*fix);
00415                 return AR_STAT_ZERO;
00416         }
00417         if (shift > AR_IEEE128_COEFF_BITS-64+15 + AR_IEEE128_ROUND_BITS)
00418                 shift = AR_IEEE128_COEFF_BITS-64+15 + AR_IEEE128_ROUND_BITS;
00419 
00420         if (shift <= 0) {
00421                 if(shift == 0)
00422                         res |= AR_STAT_SEMIVALID;
00423                 res |= AR_STAT_OVERFLOW;
00424                 shift = 0;
00425         }
00426 
00427         neg = a.sign;
00428         fix->part1 = ((!!a.expo)<<15) | (a.coeff0>>1);
00429         fix->part2 =   (a.coeff0<<15) | (a.coeff1>>1);
00430         fix->part3 =   (a.coeff1<<15) | (a.coeff2>>1);
00431         fix->part4 =   (a.coeff2<<15) | (a.coeff3>>1);
00432 
00433         for ( ; shift > 0; shift--) {
00434                 rbits = rbits & 1 | (rbits >> 1) |
00435                         ((fix->part4 & 1) << (AR_IEEE128_ROUND_BITS - 1));
00436                 SHRIGHT64 (*fix);
00437         }
00438         if (rbits)
00439                 res |= AR_STAT_INEXACT;
00440 
00441         switch (roundmode) {
00442         case AR_ROUND_PLUS_INFINITY:
00443                 if (!a.sign && rbits)
00444                         INC64 (*fix);
00445                 break;
00446         case AR_ROUND_MINUS_INFINITY:
00447                 if (a.sign && rbits)
00448                         DEC64 (*fix);
00449                 break;
00450         case AR_ROUND_ZERO:
00451                 break;
00452         default:
00453                 if (rbits >> (AR_IEEE128_ROUND_BITS - 1) &&
00454                     (rbits & MASKR (AR_IEEE128_ROUND_BITS - 1) ||
00455                      fix->part4 & 1))
00456                         INC64 (*fix);
00457                 break;
00458         }
00459 
00460         if (neg) {
00461                 NOT64 (*fix);
00462                 INC64 (*fix);
00463         }
00464 
00465         if (!(fix->part1 | fix->part2 | fix->part3 | fix->part4))
00466                 res |= AR_STAT_ZERO;
00467         else if (neg)
00468                 res |= AR_STAT_NEGATIVE;
00469 
00470         return res;
00471 }
00472 
00473 
00474 /* integer -> IEEE 32 bit */
00475 int
00476 ar_iflt32 (AR_IEEE_32 *flt,
00477            const AR_INT_64 *fix,
00478            int is_unsigned,
00479            int roundmode) {
00480 
00481         int neg = 0;
00482         unsigned long lbits, rbits;
00483         AR_INT_64 val;
00484 
00485         COPY64 (val, *fix);
00486         if (SIGNBIT (val) && !is_unsigned) {
00487                 NEG64 (val);
00488                 neg = 1;
00489         }
00490 
00491         rbits = 0;
00492         while (val.part1 | val.part2) {
00493                 rbits = rbits & 1 | (rbits >> 1) |
00494                         ((val.part4 & 1) << (AR_IEEE32_ROUND_BITS - 1));
00495                 SHRIGHT64 (val);
00496         }
00497 
00498         INT64TOIEEE32 (*flt, val);
00499         lbits = val.part3 >> (16 - (AR_IEEE32_EXPO_BITS + 1));
00500         flt->sign = neg;
00501 
00502         return ar_i32norm (AR_IEEE32_EXPO_BIAS + AR_IEEE32_COEFF_BITS,
00503                            lbits,
00504                            rbits,
00505                            flt,
00506                            roundmode);
00507 }
00508 
00509 
00510 /* integer -> IEEE 64 bit */
00511 int
00512 ar_iflt64 (AR_IEEE_64 *flt,
00513            const AR_INT_64 *fix,
00514            int is_unsigned,
00515            int roundmode) {
00516 
00517         int neg = 0;
00518         unsigned long lbits;
00519         AR_INT_64 val;
00520 
00521         if (SIGNBIT (*fix) && !is_unsigned) {
00522                 COPY64 (val, *fix);
00523                 NEG64 (val);
00524                 fix = &val;
00525                 neg = 1;
00526         }
00527 
00528         INT64TOIEEE64 (*flt, *fix);
00529         lbits = fix->part1 >> (16 - (AR_IEEE64_EXPO_BITS + 1));
00530         flt->sign = neg;
00531 
00532         return ar_i64norm (AR_IEEE64_EXPO_BIAS + AR_IEEE64_COEFF_BITS,
00533                            lbits,
00534                            0, /* rbits */
00535                            flt,
00536                            roundmode);
00537 }
00538 
00539 
00540 /* integer -> IEEE 128 bit */
00541 int
00542 ar_iflt128 (AR_IEEE_128 *flt,
00543            const AR_INT_64 *fix,
00544            int is_unsigned,
00545            int roundmode) {
00546 
00547         int neg = 0;
00548         unsigned long lbits;
00549         AR_INT_64 val;
00550 
00551         /*
00552          * Use native arithmetic for MIPS.
00553          */
00554         if (HOST_IS_MIPS) {
00555                 long double ld;
00556                 long long li;
00557                 AR_TYPE ty = AR_Float_IEEE_NR_128;
00558 
00559                 li = *(long long *) fix;
00560                 ld = li;
00561                 memcpy(flt,&ld,sizeof(long double));
00562                 return AR_status ((AR_DATA *) flt, &ty);
00563         }
00564 
00565         if (SIGNBIT (*fix) && !is_unsigned) {
00566                 COPY64 (val, *fix);
00567                 NEG64 (val);
00568                 fix = &val;
00569                 neg = 1;
00570         }
00571 
00572         flt->coeff3 = flt->coeff4 = flt->coeff5 = flt->coeff6 = 0;
00573         INT64TOIEEE128 (*flt, *fix);
00574         lbits = fix->part1 >> (16 - (AR_IEEE128_EXPO_BITS + 1));
00575         flt->sign = neg;
00576 
00577         return ar_i128norm (AR_IEEE128_EXPO_BIAS + AR_IEEE128_COEFF_BITS-64,
00578                            lbits,
00579                            0, /* rbits */
00580                            flt,
00581                            roundmode);
00582 }
00583 
00584 
00585 /* IEEE 64 -> Cray single */
00586 int
00587 ar_itoc64 (AR_CRAY_64 *to,
00588            const AR_IEEE_64 *from,
00589            int roundmode) {
00590 
00591         int toexpo;
00592         AR_INT_64 coeff;
00593         AR_IEEE_64 v, fact;
00594         AR_CRAY_64 zero;
00595 
00596         if (from->expo > AR_IEEE64_MAX_EXPO) {
00597                 to->expo = AR_CRAY_MAX_EXPO + 1;
00598                 to->coeff0 = 1 << (AR_CRAY_C0_BITS - 1);
00599                 to->coeff1 = to->coeff2 = 0;
00600                 return AR_STAT_OVERFLOW;
00601         }
00602 
00603         /* Extract the coefficient as an unsigned integer, rounded to
00604          * Cray precision.
00605          */
00606         v = *from;
00607         v.sign = 0;
00608         toexpo = from->expo + AR_CRAY_EXPO_BIAS - AR_IEEE64_EXPO_BIAS;
00609         if (!v.expo) {
00610                 /* Denormalized value; try to normalize it */
00611                 ZEROIEEE64 (fact);
00612                 fact.expo = AR_IEEE64_EXPO_BIAS + AR_IEEE64_COEFF_BITS;
00613                 (void) ar_ifmul64 (&v, &v, &fact, roundmode);
00614                 if (!v.expo) {
00615                         ZEROCRAY64 (*to);
00616                         return AR_STAT_ZERO;
00617                 }
00618                 toexpo = v.expo + 1 + AR_CRAY_EXPO_BIAS - AR_IEEE64_EXPO_BIAS -
00619                                 AR_IEEE64_COEFF_BITS;
00620         }
00621         v.expo = AR_IEEE64_EXPO_BIAS + AR_CRAY64_COEFF_BITS - 1;
00622         (void) ar_ifix64 (&coeff, &v, AR_IEEE64_COEFF_BITS, roundmode);
00623         INT64TOCRAY64 (*to, coeff);
00624         if (to->expo) {
00625                 SHRIGHTCRAY64 (*to);
00626                 to->coeff0 |= 1 << (AR_CRAY_C0_BITS - 1);
00627                 toexpo++;
00628         }
00629         to->sign = from->sign;
00630         to->expo = toexpo;
00631 
00632         /* Normalize and return */
00633         ZEROCRAY64 (zero);
00634         return ar_cfadd64 (to, to, &zero);
00635 }
00636 
00637 
00638 /* IEEE 128 -> Cray double */
00639 int
00640 ar_itoc128(AR_CRAY_128 *to,
00641            const AR_IEEE_128 *from,
00642            int roundmode) {
00643 
00644         int toexpo;
00645         AR_IEEE_128 v, fact;
00646         AR_CRAY_128 zero;
00647 
00648         if (from->expo > AR_IEEE128_MAX_EXPO) {
00649                 to->expo = AR_CRAY_MAX_EXPO + 1;
00650                 to->coeff0 = 1 << (AR_CRAY_C0_BITS - 1);
00651                 to->coeff1 = to->coeff2 = to->coeff3 = to->coeff4 = to->coeff5 = 0;
00652                 return AR_STAT_OVERFLOW;
00653         }
00654 
00655         v = *from;
00656         v.sign = 0;
00657         toexpo = from->expo + AR_CRAY_EXPO_BIAS - AR_IEEE128_EXPO_BIAS;
00658         if (!v.expo) {
00659                 /* Denormalized value; try to normalize it */
00660                 ZEROIEEE128 (fact);
00661                 fact.expo = AR_IEEE128_EXPO_BIAS + AR_IEEE128_COEFF_BITS;
00662                 (void) ar_ifmul128 (&v, &v, &fact, roundmode);
00663                 if (!v.expo) {
00664                         ZEROCRAY128 (*to);
00665                         return AR_STAT_ZERO;
00666                 }
00667                 toexpo = v.expo + 2 + AR_CRAY_EXPO_BIAS - AR_IEEE128_EXPO_BIAS -
00668                                 AR_IEEE128_COEFF_BITS;
00669         }
00670 
00671         if(toexpo > AR_CRAY_MAX_EXPO) {
00672                 to->expo = AR_CRAY_MAX_EXPO + 1;
00673                 to->coeff0 = 1 << (AR_CRAY_C0_BITS - 1);
00674                 to->coeff1 = to->coeff2 = to->coeff3 = to->coeff4 = to->coeff5 = 0;
00675                 return AR_STAT_OVERFLOW;
00676         }
00677 
00678         to->coeff0 = v.coeff0;
00679         to->coeff1 = v.coeff1;
00680         to->coeff2 = v.coeff2;
00681         to->zero   = 0;
00682         to->coeff3 = v.coeff3;
00683         to->coeff4 = v.coeff4;
00684         to->coeff5 = v.coeff5;
00685         SHRIGHTCRAY128(*to);
00686         to->coeff0 |= 1 << (AR_CRAY_C0_BITS - 1);
00687 
00688         to->sign = from->sign;
00689         to->expo = toexpo;
00690 
00691         /* Normalize, set status, and return */
00692         ZEROCRAY128 (zero);
00693         return ar_cfadd128(to, to, &zero);
00694 }
00695 
00696 
00697 /* IEEE 64 -> Cray double */
00698 int
00699 ar_i64toc128 (AR_CRAY_128 *to, const AR_IEEE_64 *from) {
00700 
00701         int res, toexpo;
00702         AR_INT_64 coeff;
00703         AR_IEEE_64 v, fact;
00704         AR_CRAY_64 sing;
00705         AR_CRAY_128 zero;
00706 
00707         if (from->expo > AR_IEEE64_MAX_EXPO) {
00708                 to->expo = AR_CRAY_MAX_EXPO + 1;
00709                 to->coeff0 = 1 << (AR_CRAY_C0_BITS - 1);
00710                 to->coeff1 = to->coeff2 = 0;
00711                 return AR_STAT_OVERFLOW;
00712         }
00713 
00714         /* Extract the coefficient as an unsigned integer */
00715         v = *from;
00716         v.sign = 0;
00717         toexpo = from->expo + AR_CRAY_EXPO_BIAS - AR_IEEE64_EXPO_BIAS;
00718         if (!v.expo) {
00719                 /* Denormalized value; try to normalize it */
00720                 ZEROIEEE64 (fact);
00721                 fact.expo = AR_IEEE64_EXPO_BIAS + AR_IEEE64_COEFF_BITS;
00722                 (void) ar_ifmul64 (&v, &v, &fact, AR_ROUND_NEAREST);
00723                 if (!v.expo) {
00724                         ZEROCRAY128 (*to);
00725                         return AR_STAT_ZERO;
00726                 }
00727                 toexpo = v.expo + 1 + AR_CRAY_EXPO_BIAS - AR_IEEE64_EXPO_BIAS -
00728                                 AR_IEEE64_COEFF_BITS;
00729         }
00730         v.expo = AR_IEEE64_EXPO_BIAS + AR_IEEE64_COEFF_BITS;
00731         (void) ar_ifix64 (&coeff, &v, AR_IEEE64_COEFF_BITS, AR_ROUND_NEAREST);
00732         INT64TOCRAY64 (sing, coeff);
00733         CRAY64TO128 (*to, sing);
00734         toexpo -= AR_IEEE64_COEFF_BITS + 1 - AR_CRAY64_COEFF_BITS;
00735         while (to->expo) {
00736                 SHRIGHTCRAY128 (*to);
00737                 to->coeff0 |= to->expo << (AR_CRAY_C0_BITS - 1);
00738                 to->expo >>= 1;
00739                 toexpo++;
00740         }
00741         to->sign = from->sign;
00742         to->expo = toexpo;
00743 
00744         /* Normalize and return */
00745         ZEROCRAY128 (zero);
00746         return ar_cfadd128 (to, to, &zero);
00747 }
00748 
00749 
00750 /* Cray 64 -> IEEE 64 */
00751 int
00752 ar_ctoi64 (AR_IEEE_64 *to, const AR_CRAY_64 *from) {
00753 
00754         int res = AR_STAT_OK;
00755         AR_INT_64 coeff;
00756         AR_CRAY_64 v = *from;
00757         int expo;
00758 
00759         if (from->expo > AR_CRAY_MAX_EXPO) {
00760                 /* Overflow yields a quiet NaN */
00761                 if (HOST_IS_MIPS) {
00762                         QNaNIEEE64 (to);
00763                 }
00764                 else {
00765                         ZEROIEEE64 (*to);
00766                         if (to->sign = v.sign)
00767                                 res |= AR_STAT_NEGATIVE;
00768                         to->expo = AR_IEEE64_MAX_EXPO + 1;
00769                         to->coeff0 = 1 << (AR_IEEE64_C0_BITS - 1);
00770                 }
00771                 return res | AR_STAT_OVERFLOW;
00772         }
00773 
00774         v.sign = 0;
00775         v.expo = 0;
00776         CRAY64TOINT64 (coeff, v);
00777         INT64TOIEEE64 (*to, coeff);
00778         to->sign = from->sign;
00779         expo = from->expo + AR_IEEE64_EXPO_BIAS - AR_CRAY_EXPO_BIAS +
00780                             AR_IEEE64_COEFF_BITS + 1 - AR_CRAY64_COEFF_BITS;
00781         if (expo <= 0)
00782                 expo--;
00783 
00784         return ar_i64norm (expo,
00785                            0, /* lbits */
00786                            0, /* rbits */
00787                            to,
00788                            AR_ROUND_NEAREST);
00789 }
00790 
00791 
00792 /* Cray 128 -> IEEE 128 */
00793 int
00794 ar_ctoi128 (AR_IEEE_128 *to, const AR_CRAY_128 *from) {
00795 
00796         int res = AR_STAT_OK;
00797         AR_CRAY_128 v = *from;
00798         int expo;
00799 
00800         if (from->expo > AR_CRAY_MAX_EXPO) {
00801                 /* Overflow yields a quiet NaN */
00802                 if (HOST_IS_MIPS) {
00803                         QNaNIEEE128 (to);
00804                 }
00805                 else {
00806                         ZEROIEEE128 (*to);
00807                         if (to->sign = v.sign)
00808                                 res |= AR_STAT_NEGATIVE;
00809                         to->expo = AR_IEEE128_MAX_EXPO + 1;
00810                         to->coeff0 = 1 << (AR_IEEE128_C0_BITS - 1);
00811                 }
00812                 return res | AR_STAT_OVERFLOW;
00813         }
00814 
00815         to->sign = v.sign;
00816         to->expo = 0;
00817         to->coeff0 = v.coeff0;
00818         to->coeff1 = v.coeff1;
00819         to->coeff2 = v.coeff2;
00820         to->coeff3 = v.coeff3;
00821         to->coeff4 = v.coeff4;
00822         to->coeff5 = v.coeff5;
00823         to->coeff6 = 0;
00824         expo = v.expo + AR_IEEE128_EXPO_BIAS - AR_CRAY_EXPO_BIAS +
00825                             AR_IEEE128_COEFF_BITS + 1 - AR_CRAY128_COEFF_BITS;
00826         if (expo <= 0)
00827                 expo--;
00828 
00829         return ar_i128norm (expo,
00830                            0, /* lbits */
00831                            0, /* rbits */
00832                            to,
00833                            AR_ROUND_NEAREST);
00834 }
00835 
00836 
00837 /* Cray 128 -> IEEE 64 */
00838 int
00839 ar_c128toi64 (AR_IEEE_64 *to, const AR_CRAY_128 *from) {
00840 
00841         int res = AR_STAT_OK;
00842         AR_INT_64 coeff;
00843         AR_CRAY_128 v = *from;
00844         AR_CRAY_64 v64;
00845         int i;
00846         int rbits;
00847         int expo;
00848 
00849         if (from->expo > AR_CRAY_MAX_EXPO) {
00850                 /* Overflow yields a quiet NaN */
00851                 if (HOST_IS_MIPS) {
00852                         QNaNIEEE64 (to);
00853                 }
00854                 else {
00855                         ZEROIEEE64 (*to);
00856                         if (to->sign = v.sign)
00857                                 res |= AR_STAT_NEGATIVE;
00858                         to->expo = AR_IEEE64_MAX_EXPO + 1;
00859                         to->coeff0 = 1 << (AR_IEEE64_C0_BITS - 1);
00860                 }
00861                 return res | AR_STAT_OVERFLOW;
00862         }
00863 
00864         v.sign = 0;
00865         v.expo = 0;
00866         CRAY128TO64 (v64, v);
00867         CRAY64TOINT64 (coeff, v64);
00868         INT64TOIEEE64 (*to, coeff);
00869 
00870         /* Move upper bits of second part into low bits */
00871         for (i = 0; i < AR_IEEE64_COEFF_BITS - AR_CRAY64_COEFF_BITS; i++)
00872                 SHLEFTIEEE64 (*to);
00873         to->coeff3 |= v.coeff3 >> (AR_CRAY_C3_BITS -
00874                                    (AR_IEEE64_COEFF_BITS -
00875                                     AR_CRAY64_COEFF_BITS));
00876 
00877         /* Compute guard, round, and sticky bits */
00878         rbits = (v.coeff3 >> (AR_CRAY_C3_BITS - AR_IEEE64_ROUND_BITS -
00879                               (AR_IEEE64_COEFF_BITS - AR_CRAY64_COEFF_BITS))) &
00880                 MASKR (AR_IEEE64_ROUND_BITS);
00881         rbits |= !!(v.coeff3 & MASKR (AR_CRAY_C3_BITS - AR_IEEE64_ROUND_BITS -
00882                                       (AR_IEEE64_COEFF_BITS -
00883                                        AR_CRAY64_COEFF_BITS) - 1) |
00884                     v.coeff4 | v.coeff5);
00885 
00886         to->sign = from->sign;
00887         expo = from->expo + AR_IEEE64_EXPO_BIAS - AR_CRAY_EXPO_BIAS + 1;
00888         if (expo <= 0)
00889                 expo--;
00890 
00891         return ar_i64norm (expo,
00892                            0, /* lbits */
00893                            rbits,
00894                            to,
00895                            AR_ROUND_NEAREST);
00896 }
00897 
00898 
00899 /* Cray double -> single */
00900 int
00901 ar_c128to64 (AR_CRAY_64 *s, const AR_CRAY_128 *d) {
00902 
00903         int res = AR_STAT_OK;
00904 
00905         if (!(d->sign | d->expo | d->coeff0 | d->coeff1 | d->coeff2))
00906                 res |= AR_STAT_ZERO;
00907         else if (d->sign)
00908                 res |= AR_STAT_NEGATIVE;
00909 
00910         /* Truncated value is returned since that is what all compilers do */
00911         CRAY128TO64 (*s, *d);
00912 
00913         return res;
00914 }
00915 
00916 /* Cray single -> double */
00917 int
00918 ar_c64to128 (AR_CRAY_128 *d, const AR_CRAY_64 *s) {
00919 
00920         int res = AR_STAT_OK;
00921 
00922         if (!(s->sign | s->expo | s->coeff0 | s->coeff1 | s->coeff2))
00923                 res |= AR_STAT_ZERO;
00924         else if (s->sign)
00925                 res |= AR_STAT_NEGATIVE;
00926 
00927         CRAY64TO128(*d, *s);
00928 
00929         return res;
00930 }
00931 
00932 
00933 /* IEEE 64 -> IEEE 32 */
00934 int
00935 ar_i64to32 (AR_IEEE_32 *s, const AR_IEEE_64 *d, const int roundmode) {
00936 
00937         int res = AR_STAT_OK;
00938         int expo;
00939         unsigned long lbits, rbits;
00940 
00941 #       if AR_IEEE32_C0_BITS < AR_IEEE64_C0_BITS
00942 #               error ar_i64to32 has coefficient shifts miscoded.
00943 #       else
00944 #               define COEFF_BIT_OFF  (AR_IEEE32_C0_BITS - AR_IEEE64_C0_BITS)
00945 #       endif
00946 
00947         if (d->expo > AR_IEEE64_MAX_EXPO) {
00948                 if (IS_IEEE64_NZ_COEFF(d)) {
00949                         /* WARNING: the following code makes some assumptions about
00950                          * bit field sizes!
00951                          *
00952                          * Incoming quantity is a NaN; return a NaN with the
00953                          * same high AR_IEEE32_COEFF_BITS bits.  The result
00954                          * must have at least one non-zero coefficient bit.
00955                          */
00956                         ZEROIEEE32 (*s);
00957                         s->sign    = d->sign;
00958                         s->expo    = AR_IEEE32_MAX_EXPO + 1;
00959                         s->coeff1  = (d->coeff2 >> (AR_IEEE64_C2_BITS - COEFF_BIT_OFF)) |
00960                                                  (d->coeff1 << COEFF_BIT_OFF);
00961                         s->coeff0  = (d->coeff1 >> (AR_IEEE64_C1_BITS - COEFF_BIT_OFF)) |
00962                                                  (d->coeff0 << COEFF_BIT_OFF);
00963                         if (!IS_IEEE32_NZ_COEFF(s)) {
00964                                 s->coeff1 = 1;
00965                         }
00966                         return AR_STAT_UNDEFINED;
00967                 } else {
00968                         /* It's +/-Inf */
00969                         ZEROIEEE32 (*s);
00970                         s->expo = AR_IEEE32_MAX_EXPO + 1;
00971                         if (s->sign = d->sign)
00972                                 res |= AR_STAT_NEGATIVE;
00973                         return res | AR_STAT_OVERFLOW;
00974                 }
00975         }
00976 
00977         if (d->sign)
00978                 res |= AR_STAT_NEGATIVE;
00979 
00980         /* Incoming denorm must underflow to zero. */
00981         if (!d->expo) {
00982                 s->sign = d->sign;
00983                 s->zero = s->expo = s->coeff0 = s->coeff1 = 0;
00984                 res |= AR_STAT_ZERO;
00985                 if (IS_IEEE64_NZ_COEFF(d))
00986                     if(ar_state_register.ar_denorms_trap)
00987                                 res |= AR_STAT_UNDERFLOW;
00988                     else
00989                                 res |= AR_STAT_UNDEFINED;
00990                 return res;
00991         }
00992 
00993         lbits = 1;
00994         expo = d->expo - AR_IEEE64_EXPO_BIAS + AR_IEEE32_EXPO_BIAS;
00995         if (expo <= 0)
00996                 expo--;
00997 
00998         /* WARNING: the following code makes some assumptions about
00999          * bit field sizes!
01000          *
01001          * Compress rightmost 29 bits of incoming coefficient into
01002          * a 3-bit guard/round/sticky set of rounding bits.
01003          */
01004         rbits = ((d->coeff2 >> 10) & 07) |                      /* G and R */
01005                 !!(d->coeff2 & MASKR (10) | d->coeff3);         /* sticky  */
01006 
01007         /* Move upper 23 bits of incoming coefficient into place */
01008         s->coeff1 = (d->coeff2 >> (AR_IEEE64_C2_BITS - COEFF_BIT_OFF)) |
01009                                 (d->coeff1 << COEFF_BIT_OFF);
01010         s->coeff0 = (d->coeff1 >> (AR_IEEE64_C2_BITS - COEFF_BIT_OFF)) |
01011                                 (d->coeff0 << COEFF_BIT_OFF);
01012 
01013         s->sign = d->sign;
01014         s->zero = 0;
01015 
01016         return ar_i32norm (expo, lbits, rbits, s, roundmode);
01017 
01018 #       undef COEFF_BIT_OFF
01019 }
01020 
01021 
01022 /* IEEE 32 -> IEEE 64 */
01023 int
01024 ar_i32to64 (AR_IEEE_64 *d, const AR_IEEE_32 *s) {
01025 
01026         int expo;
01027 
01028 #       if AR_IEEE32_C0_BITS < AR_IEEE64_C0_BITS
01029 #               error ar_i32to64 has coefficient shifts miscoded.
01030 #       else
01031 #               define COEFF_BIT_OFF  (AR_IEEE32_C0_BITS - AR_IEEE64_C0_BITS)
01032 #       endif
01033 
01034         if (s->expo > AR_IEEE32_MAX_EXPO) {
01035                 if (IS_IEEE32_NZ_COEFF(s)) {
01036                         /* WARNING: the following code makes some assumptions about
01037                          * bit field sizes!
01038                          *
01039                          * Incoming quantity is a NaN; return a NaN with the
01040                          * same high AR_IEEE32_COEFF_BITS bits.  The result
01041                          * must have at least one non-zero coefficient bit.
01042                          */
01043                         ZEROIEEE64 (*d);
01044                         d->sign   = s->sign;
01045                         d->expo   = AR_IEEE64_MAX_EXPO + 1;
01046                         d->coeff0 = s->coeff0 >> COEFF_BIT_OFF;
01047                         d->coeff1 = (s->coeff0 << (AR_IEEE64_C1_BITS - COEFF_BIT_OFF)) |
01048                                                 (s->coeff1 >> COEFF_BIT_OFF);
01049                         d->coeff2 = s->coeff1 << (AR_IEEE64_C2_BITS - COEFF_BIT_OFF);
01050                         d->coeff3 = 0;
01051                         if (!IS_IEEE64_NZ_COEFF(d)) {
01052                                 d->coeff3 = 1;
01053                         }
01054                         return AR_STAT_UNDEFINED;
01055                 } else {
01056                         /* It's +/-Inf */
01057                         ZEROIEEE64 (*d);
01058                         d->expo = AR_IEEE64_MAX_EXPO + 1;
01059                         if (d->sign = s->sign)
01060                                 return AR_STAT_OVERFLOW | AR_STAT_NEGATIVE;
01061                         return AR_STAT_OVERFLOW;
01062                 }
01063         }
01064 
01065         d->sign = s->sign;
01066         if (s->expo)
01067                 expo = s->expo - AR_IEEE32_EXPO_BIAS + AR_IEEE64_EXPO_BIAS;
01068         else
01069                 expo = 0;
01070 
01071         /* WARNING: the following code makes some assumptions about
01072          * bit field sizes!
01073          *
01074          * Copy incoming coefficient into the upper 23 bits of the result.
01075          */
01076         d->coeff0 = s->coeff0 >> COEFF_BIT_OFF;
01077         d->coeff1 = (s->coeff0 << (AR_IEEE64_C1_BITS - COEFF_BIT_OFF)) |
01078                                 (s->coeff1 >> COEFF_BIT_OFF);
01079         d->coeff2 = s->coeff1 << (AR_IEEE64_C2_BITS - COEFF_BIT_OFF);
01080         d->coeff3 = 0;
01081 
01082         return ar_i64norm (expo,
01083                            !!s->expo /* lbits */,
01084                            0 /* rbits */,
01085                            d,
01086                            AR_ROUND_NEAREST /* ignored */);
01087 
01088 #       undef COEFF_BIT_OFF
01089 }
01090 
01091 
01092 /* IEEE 128 -> IEEE 64 */
01093 int
01094 ar_i128to64 (AR_IEEE_64 *d, const AR_IEEE_128 *q, const int roundmode) {
01095 
01096         int res = AR_STAT_OK;
01097         int expo;
01098         unsigned long lbits, rbits;
01099 
01100         /*
01101          * Use native arithmetic for MIPS.
01102          */
01103         if (HOST_IS_MIPS) {
01104                 AR_TYPE ty = AR_Float_IEEE_NR_64;
01105 
01106                 *(double *) d = *(long double *) q;
01107                 return AR_status ((AR_DATA *) d, &ty);
01108         }
01109 
01110 #       if AR_IEEE128_C0_BITS < AR_IEEE64_C0_BITS
01111 #               error ar_i128to64 has coefficient shifts miscoded.
01112 #       else
01113 #               define COEFF_BIT_OFF  (AR_IEEE128_C0_BITS - AR_IEEE64_C0_BITS)
01114 #       endif
01115 
01116         if (q->expo > AR_IEEE128_MAX_EXPO) {
01117                 if (IS_IEEE128_NZ_COEFF(q)) {
01118                         /* WARNING: the following code makes some assumptions about
01119                          * bit field sizes!
01120                          *
01121                          * Incoming quantity is a NaN; return a NaN with the
01122                          * same high AR_IEEE64_COEFF_BITS bits.  The result
01123                          * must have at least one non-zero coefficient bit.
01124                          */
01125                         ZEROIEEE64 (*d);
01126                         d->sign    = q->sign;
01127                         d->expo    = AR_IEEE64_MAX_EXPO + 1;
01128                         d->coeff3 = (q->coeff3 >> COEFF_BIT_OFF) |
01129                                                 (q->coeff2 << AR_IEEE64_C0_BITS);
01130                         d->coeff2 = (q->coeff2 >> COEFF_BIT_OFF) |
01131                                                 (q->coeff1 << AR_IEEE64_C0_BITS);
01132                         d->coeff1 = (q->coeff1 >> COEFF_BIT_OFF) |
01133                                                 (q->coeff0 << AR_IEEE64_C0_BITS);
01134                         d->coeff0 = (q->coeff0 >> COEFF_BIT_OFF);
01135                         if (!IS_IEEE64_NZ_COEFF(d)) {
01136                                 d->coeff3 = 1;
01137                         }
01138                         return AR_STAT_UNDEFINED;
01139                 } else {
01140                         /* It's +/-Inf */
01141                         ZEROIEEE64 (*d);
01142                         d->expo = AR_IEEE64_MAX_EXPO + 1;
01143                         if (d->sign = q->sign)
01144                                 res |= AR_STAT_NEGATIVE;
01145                         return res | AR_STAT_OVERFLOW;
01146                 }
01147         }
01148 
01149         if (q->sign)
01150                 res |= AR_STAT_NEGATIVE;
01151 
01152         /* Incoming denorm must underflow to zero. */
01153         if (!q->expo) {
01154                 d->sign = q->sign;
01155                 d->expo = d->coeff0 = d->coeff1 = d->coeff2 = d->coeff3 = 0;
01156                 res |= AR_STAT_ZERO;
01157                 if (IS_IEEE128_NZ_COEFF(q))
01158                     if(ar_state_register.ar_denorms_trap)
01159                                 res |= AR_STAT_UNDERFLOW;
01160                     else
01161                                 res |= AR_STAT_UNDEFINED;
01162                 return res;
01163         }
01164 
01165         lbits = 1;
01166         expo = q->expo - AR_IEEE128_EXPO_BIAS + AR_IEEE64_EXPO_BIAS;
01167         if (expo <= 0)
01168                 expo--;
01169 
01170         /* WARNING: the following code makes some assumptions about
01171          * bit field sizes!
01172          *
01173          * Compress rightmost bits of incoming coefficient into
01174          * a 3-bit guard/round/sticky set of rounding bits.
01175          */
01176         rbits = ((q->coeff3 >> 9) & 07) |                       /* G and R */
01177                 !!(q->coeff3 & MASKR (9) |
01178                    q->coeff4 | q->coeff5 | q->coeff6);          /* sticky */
01179 
01180         /* Move upper bits of incoming coefficient into place */
01181         d->coeff3 = (q->coeff3 >> COEFF_BIT_OFF) |
01182                                 (q->coeff2 << AR_IEEE64_C0_BITS);
01183         d->coeff2 = (q->coeff2 >> COEFF_BIT_OFF) |
01184                                 (q->coeff1 << AR_IEEE64_C0_BITS);
01185         d->coeff1 = (q->coeff1 >> COEFF_BIT_OFF) |
01186                                 (q->coeff0 << AR_IEEE64_C0_BITS);
01187         d->coeff0 = (q->coeff0 >> COEFF_BIT_OFF);
01188 
01189         d->sign = q->sign;
01190 
01191         return ar_i64norm (expo, lbits, rbits, d, roundmode);
01192 
01193 #       undef COEFF_BIT_OFF
01194 }
01195 
01196 
01197 /* IEEE 64 -> IEEE 128 */
01198 int
01199 ar_i64to128 (AR_IEEE_128 *q, const AR_IEEE_64 *d) {
01200 
01201         int expo;
01202 
01203         /*
01204          * Use native arithmetic for MIPS.
01205          */
01206         if (HOST_IS_MIPS) {
01207                 AR_TYPE ty = AR_Float_IEEE_NR_128;
01208 
01209                 *(long double *) q = *(double *) d;
01210                 return AR_status ((AR_DATA *) q, &ty);
01211         }
01212 
01213 #       if AR_IEEE128_C0_BITS < AR_IEEE64_C0_BITS
01214 #               error ar_i64to128 has coefficient shifts miscoded.
01215 #       else
01216 #               define COEFF_BIT_OFF  (AR_IEEE128_C0_BITS - AR_IEEE64_C0_BITS)
01217 #       endif
01218 
01219         if (d->expo > AR_IEEE64_MAX_EXPO) {
01220                 if (IS_IEEE64_NZ_COEFF(d)) {
01221                         /* WARNING: the following code makes some assumptions about
01222                          * bit field sizes!
01223                          *
01224                          * Incoming quantity is a NaN; return a NaN with the
01225                          * same high AR_IEEE64_COEFF_BITS bits.  The result
01226                          * must have at least one non-zero coefficient bit.
01227                          */
01228                         ZEROIEEE128 (*q);
01229                         q->sign   = d->sign;
01230                         q->expo   = AR_IEEE128_MAX_EXPO + 1;
01231                         q->coeff0 = (d->coeff0 << COEFF_BIT_OFF) |
01232                                                 (d->coeff1 >> AR_IEEE64_C0_BITS);
01233                         q->coeff1 = (d->coeff1 << COEFF_BIT_OFF) |
01234                                                 (d->coeff2 >> AR_IEEE64_C0_BITS);
01235                         q->coeff2 = (d->coeff2 << COEFF_BIT_OFF) |
01236                                                 (d->coeff3 >> AR_IEEE64_C0_BITS);
01237                         q->coeff3 = (d->coeff3 << COEFF_BIT_OFF);
01238                         q->coeff4 = 0;
01239                         q->coeff5 = 0;
01240                         q->coeff6 = 0;
01241                         if (!IS_IEEE128_NZ_COEFF(q)) {
01242                                 q->coeff6 = 1;
01243                         }
01244                         return AR_STAT_UNDEFINED;
01245                 } else {
01246                         /* It's +/-Inf */
01247                         ZEROIEEE128 (*q);
01248                         q->expo = AR_IEEE128_MAX_EXPO + 1;
01249                         if (q->sign = d->sign)
01250                                 return AR_STAT_OVERFLOW | AR_STAT_NEGATIVE;
01251                         return AR_STAT_OVERFLOW;
01252                 }
01253         }
01254 
01255         q->sign = d->sign;
01256         if (d->expo)
01257                 expo = d->expo - AR_IEEE64_EXPO_BIAS + AR_IEEE128_EXPO_BIAS;
01258         else
01259                 expo = 0;
01260 
01261         /* WARNING: the following code makes some assumptions about
01262          * bit field sizes!
01263          *
01264          * Copy incoming coefficient into the upper bits of the result.
01265          */
01266         q->coeff0 = (d->coeff0 << COEFF_BIT_OFF) |
01267                                 (d->coeff1 >> AR_IEEE64_C0_BITS);
01268         q->coeff1 = (d->coeff1 << COEFF_BIT_OFF) |
01269                                 (d->coeff2 >> AR_IEEE64_C0_BITS);
01270         q->coeff2 = (d->coeff2 << COEFF_BIT_OFF) |
01271                                 (d->coeff3 >> AR_IEEE64_C0_BITS);
01272         q->coeff3 = (d->coeff3 << COEFF_BIT_OFF);
01273         q->coeff4 = 0;
01274         q->coeff5 = 0;
01275         q->coeff6 = 0;
01276 
01277         return ar_i128norm (expo,
01278                            !!d->expo /* lbits */,
01279                            0 /* rbits */,
01280                            q,
01281                            AR_ROUND_NEAREST /* ignored */);
01282 
01283 #       undef COEFF_BIT_OFF
01284 }
01285 
01286 #ifdef __mips
01287 #if 0 
01288 /* Conversions to/from MIPS quad to IEEE quad */
01289  
01290 int
01291 ar_m128toi128(AR_IEEE_128 *out, long double *in)
01292 {
01293         int res;
01294 
01295         AR_IEEE_128 lo, hi;
01296 
01297         /* A MIPS quad is 2 doubles, so to convert to an IEEE
01298          * quad, just add them.
01299          */
01300 
01301         res  = ar_i64to128(&hi, &((AR_IEEE_64 *) in)[0]);
01302         res |= ar_i64to128(&lo, &((AR_IEEE_64 *) in)[1]);
01303         res |= ar_ifadd128(out, &lo, &hi, AR_ROUND_NEAREST);
01304 
01305         return (res);
01306 }
01307  
01308 int
01309 ar_i128tom128(long double *out, AR_IEEE_128 *in)
01310 {
01311         int res;
01312 
01313         AR_IEEE_64  lo64,   hi64;
01314         AR_IEEE_128 low128, hi128;
01315         long double lo1,    hi1;
01316 
01317         /* Convert by setting hi part of out to 
01318          * (double) in, low part of out to the rest.
01319          */
01320         res = ar_i128to64(&hi64, in, AR_ROUND_NEAREST);
01321         res |= ar_i64to128(&hi128, &hi64);
01322         res |= ar_ifsub128(&low128, in, &hi128, AR_ROUND_NEAREST);
01323         res |= ar_i128to64(&lo64, &low128, AR_ROUND_NEAREST);
01324 
01325         lo1 = *((double *) &lo64);
01326         hi1 = *((double *) &hi64);
01327         *out = lo1 + hi1;
01328         return (res);
01329 }
01330  
01331 /* Fortran interfaces for above 2 routines */
01332 void ar_m128toi128_(AR_IEEE_128 *out, long double *in)
01333 {
01334    (void) ar_m128toi128(out,in);
01335 }
01336 
01337 void ar_i128tom128_(AR_IEEE_128 *out, long double *in)
01338 {
01339    (void) ar_i128tom128(out,in);
01340 }
01341 #endif
01342 
01343 #endif /* __mips */
01344 
01345 /* Cray single strange rounding for "rounded integer division", per
01346  * code sequences in the CMCS backend.
01347  */
01348 int
01349 ar_crnd64 (AR_CRAY_64 *rnd,
01350                    const AR_CRAY_64 *flt) {
01351 
01352         int res;
01353         AR_CRAY_64 a = *flt;
01354 
01355         /* Construct a floating value with only the low-order four bits
01356          * left in the mantissa.
01357          */
01358         a = *flt;
01359         a.coeff0 = 0;
01360         a.coeff1 = 0;
01361         a.coeff2 &= 017;
01362 
01363         res = ar_cfadd64 (rnd, flt, &a);
01364 
01365         /* Trim off the low-order four bits */
01366         rnd->coeff2 &= ~017;
01367 
01368         return res;
01369 }
01370 
01371 int
01372 ar_crnd128 (AR_CRAY_128 *rnd,
01373             const AR_CRAY_128 *flt) {
01374 
01375         int res;
01376         AR_CRAY_128 a = *flt;
01377 
01378         /* Construct a floating value with only the low-order four bits
01379          * left in the mantissa.
01380          */
01381         a = *flt;
01382         a.coeff0 = 0;
01383         a.coeff1 = 0;
01384         a.coeff2 = 0;
01385         a.coeff3 = 0;
01386         a.coeff4 = 0;
01387         a.coeff5 &= 017;
01388 
01389         res = ar_cfadd128 (rnd, flt, &a);
01390 
01391         /* Trim off the low-order four bits */
01392         rnd->coeff5 &= ~017;
01393 
01394         return res;
01395 }
01396 
01397 
01398 static char USMID [] = "\n%Z%%M%        %I%     %G% %U%\n";
01399 static char rcsid [] = "$Id: convert.c,v 1.2 2003-11-04 16:04:58 eraxxon Exp $";
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines