Open64 (mfef90, whirl2f, and IR tools)  TAG: version-openad; SVN changeset: 916
whirl2c.h
Go to the documentation of this file.
00001 /*
00002 
00003   Copyright (C) 2000, 2001 Silicon Graphics, Inc.  All Rights Reserved.
00004 
00005   This program is free software; you can redistribute it and/or modify it
00006   under the terms of version 2 of the GNU General Public License as
00007   published by the Free Software Foundation.
00008 
00009   This program is distributed in the hope that it would be useful, but
00010   WITHOUT ANY WARRANTY; without even the implied warranty of
00011   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  
00012 
00013   Further, this software is distributed without any warranty that it is
00014   free of the rightful claim of any third person regarding infringement 
00015   or the like.  Any license provided herein, whether implied or 
00016   otherwise, applies only to this software file.  Patent licenses, if 
00017   any, provided herein do not apply to combinations of this program with 
00018   other software, or any other product whatsoever.  
00019 
00020   You should have received a copy of the GNU General Public License along
00021   with this program; if not, write the Free Software Foundation, Inc., 59
00022   Temple Place - Suite 330, Boston MA 02111-1307, USA.
00023 
00024   Contact information:  Silicon Graphics, Inc., 1600 Amphitheatre Pky,
00025   Mountain View, CA 94043, or:
00026 
00027   http://www.sgi.com
00028 
00029   For further information regarding this notice, see:
00030 
00031   http://oss.sgi.com/projects/GenInfo/NoticeExplan
00032 
00033 */
00034 
00035 
00036 #ifndef __WHIRL2C_H__
00037 #define __WHIRL2C_H__
00038 
00039 
00040 #include <string.h> /* Declares memmove */
00041 #ifdef _FORTRAN2C
00042 #include <libftn.h> /* Declares math and io functions */
00043 #else
00044 #include <math.h> /* Declares math functions */
00045 #endif /* _FORTRAN2C */
00046 
00047 /* use platform independent types from inttypes.h */
00048 #include <inttypes.h>
00049 /*----------- Types used in the whirl2c output files ---------- */
00050 
00051 typedef void __UNKNOWN_TYPE;
00052 typedef char _BOOLEAN;
00053 
00054 typedef int8_t _INT8;
00055 typedef int16_t _INT16;
00056 typedef int32_t _INT32;
00057 typedef int64_t _INT64;
00058 typedef uint8_t _UINT8;
00059 typedef uint16_t _UINT16;
00060 typedef uint32_t _UINT32;
00061 typedef uint64_t _UINT64;
00062 typedef float _IEEE32;
00063 typedef double _IEEE64;
00064 typedef long double _QUAD;
00065 
00066 typedef char _STRING[];
00067 
00068 #ifndef _FORTRAN2C
00069 
00070      /* Declare the complex types, since they are considered builtin
00071       * by whirl2c, even when the original source was a C program.
00072       */
00073 typedef struct {float r, i;} _COMPLEX32;
00074 typedef struct {double r, i;} _COMPLEX64;
00075 typedef struct {long double r, i;} _COMPLEXQD;
00076 
00077 #else /* defined(_FORTRAN2C) */
00078 
00079      /* Declare the complex types in terms of the libftn.h declarations
00080       * for the same.
00081       */
00082 typedef struct _cpx_float _COMPLEX32;
00083 typedef struct _cpx_double _COMPLEX64;
00084 typedef struct _cpx_long_double _COMPLEXQD;
00085 
00086      /* Declare a few temporary variables to be used for complex
00087       * arithmetics.
00088       */
00089 static _COMPLEXQD _Tmp_CQ, _Tmp1_CQ, _Tmp2_CQ;
00090 static _COMPLEX64 _Tmp_C8, _Tmp1_C8, _Tmp2_C8;
00091 static _COMPLEX32 _Tmp_C4, _Tmp1_C4, _Tmp2_C4;
00092 
00093 static const _COMPLEXQD _One_CQ = {1.0L, 0.0L};
00094 static const _COMPLEX64 _One_C8 = {1.0, 0.0};
00095 static const _COMPLEX32 _One_C4 = {1.0F, 0.0F};
00096 
00097 #endif /*_FORTRAN2C*/
00098 
00099 /*----------- Operator definitions ---------- */
00100 
00101 #define __MSTORE(cpy_from, from_offset, cpy_to, to_offset, number_of_bytes) \
00102    (void)memmove((char *)cpy_to + from_offset, \
00103                  (char *)cpy_from + to_offset, number_of_bytes)
00104 
00105 /* Complex negation is the negation of the real and imaginary
00106  * parts respectively.  Use the temporary variable for this.
00107  */
00108 #define _C4NEG(v) \
00109    (_Tmp_C4 = (v), \
00110     _Tmp_C4.realpart = -_Tmp_C4.realpart, \
00111     _Tmp_C4.imagpart = -_Tmp_C4.imagpart, \
00112     _Tmp_C4)
00113 #define _C8NEG(v) \
00114    (_Tmp_C8 = (v), \
00115     _Tmp_C8.realpart = -_Tmp_C8.realpart, \
00116     _Tmp_C8.imagpart = -_Tmp_C8.imagpart, \
00117     _Tmp_C8)
00118 #define _CQNEG(v) \
00119    (_Tmp_CQ = (v), \
00120     _Tmp_CQ.realpart = -_Tmp_CQ.realpart, \
00121     _Tmp_CQ.imagpart = -_Tmp_CQ.imagpart, \
00122     _Tmp_CQ)
00123 
00124 #define _I4ABS(v) ((v) > 0? (v) : -(v))
00125 #define _I8ABS(v) ((v) > 0LL? (v) : -(v))
00126 #define _F4ABS(v) ((v) > 0.0F? (v) : -(v))
00127 #define _F8ABS(v) ((v) > 0.0? (v) : -(v))
00128 #define _FQABS(v) ((v) > 0.0L? (v) : -(v))
00129 #define _C4ABS(v) (_Tmp1_C4 = (v), c_abs_(&_Tmp1_C4))
00130 #define _C8ABS(v) (_Tmp1_C8 = (v), z_abs_(&_Tmp1_C8))
00131 #define _CQABS(v) (_Tmp1_CQ = (v), __cq_abs(&_Tmp1_CQ))
00132 
00133 #define _F4SQRT(v) fsqrt(v)
00134 #define _F8SQRT(v) sqrtf(v)
00135 #define _FQSQRT(v) qsqrt(v)  /* libm extension (need to compile -xansi) */
00136 #define _C4SQRT(v) (_Tmp1_C4 = (v), c_sqrt(&_Tmp_C4, &_Tmp1_C4), _Tmp_C4)
00137 #define _C8SQRT(v) (_Tmp1_C8 = (v), z_sqrt(&_Tmp_C8, &_Tmp1_C8), _Tmp_C8)
00138 #define _CQSQRT(v) (_Tmp1_CQ = (v), __cq_sqrt(&_Tmp_CQ, &_Tmp1_CQ), _Tmp_CQ)
00139 
00140 /* In converting floating point numbers into integral numbers we
00141  * employ the same algorithm the compilers use when constant folding
00142  * these operators.  The operations are described in terms of more
00143  * generic _T1T2<op> macros where this is simpler.
00144  */
00145 #define _T1F4RND(v, t1) ((v) >= 0.0F? (t1)((v)+0.5F) : (t1)((v)-0.5F))
00146 #define _T1F8RND(v, t1) ((v) >= 0.0? (t1)((v)+0.5) : (t1)((v)-0.5))
00147 #define _T1FQRND(v, t1) ((v) >= 0.0L? (t1)((v)+0.5L) : (t1)((v)-0.5L))
00148 #define _I4F4RND(v) _T1F4RND(v, _INT32)
00149 #define _I4F8RND(v) _T1F8RND(v, _INT32)
00150 #define _I4FQRND(v) _T1FQRND(v, _INT32)
00151 #define _U4F4RND(v) _T1F4RND(v, _UINT32)
00152 #define _U4F8RND(v) _T1F8RND(v, _UINT32)
00153 #define _U4FQRND(v) _T1FQRND(v, _UINT32)
00154 #define _I8F4RND(v) _T1F4RND(v, _INT64)
00155 #define _I8F8RND(v) _T1F8RND(v, _INT64)
00156 #define _I8FQRND(v) _T1FQRND(v, _INT64)
00157 #define _U8F4RND(v) _T1F4RND(v, _UINT64)
00158 #define _U8F8RND(v) _T1F8RND(v, _UINT64)
00159 #define _U8FQRND(v) _T1FQRND(v, _UINT64)
00160 
00161 #define _I4F4TRUNC(v) (_INT32)(v)
00162 #define _I4F8TRUNC(v) (_INT32)(v)
00163 #define _I4FQTRUNC(v) (_INT32)(v)
00164 #define _U4F4TRUNC(v) (_UINT32)(v)
00165 #define _U4F8TRUNC(v) (_UINT32)(v)
00166 #define _U4FQTRUNC(v) (_UINT32)(v)
00167 #define _I8F4TRUNC(v) (_INT64)(v)
00168 #define _I8F8TRUNC(v) (_INT64)(v)
00169 #define _I8FQTRUNC(v) (_INT64)(v)
00170 #define _U8F4TRUNC(v) (_UINT64)(v)
00171 #define _U8F8TRUNC(v) (_UINT64)(v)
00172 #define _U8FQTRUNC(v) (_UINT64)(v)
00173 
00174 #define _T1T2CEIL(v, t1, t2) ((t2)(t1)(v) < (v)? (t1)(v)+1 : (t1)(v))
00175 #define _I4F4CEIL(v) _T1T2CEIL(v, _INT32, _IEEE32)
00176 #define _I4F8CEIL(v) _T1T2CEIL(v, _INT32, _IEEE64)
00177 #define _I4FQCEIL(v) _T1T2CEIL(v, _INT32, _QUAD)
00178 #define _U4F4CEIL(v) _T1T2CEIL(v, _UINT32, _IEEE32)
00179 #define _U4F8CEIL(v) _T1T2CEIL(v, _UINT32, _IEEE64)
00180 #define _U4FQCEIL(v) _T1T2CEIL(v, _UINT32, _QUAD)
00181 #define _I8F4CEIL(v) _T1T2CEIL(v, _INT64, _IEEE32)
00182 #define _I8F8CEIL(v) _T1T2CEIL(v, _INT64, _IEEE64)
00183 #define _I8FQCEIL(v) _T1T2CEIL(v, _INT64, _QUAD)
00184 #define _U8F4CEIL(v) _T1T2CEIL(v, _UINT64, _IEEE32)
00185 #define _U8F8CEIL(v) _T1T2CEIL(v, _UINT64, _IEEE64)
00186 #define _U8FQCEIL(v) _T1T2CEIL(v, _UINT64, _QUAD)
00187 
00188 #define _T1T2FLOOR(v, t1, t2) ((t2)(t1)(v) > (v)? (t1)(v)-1 : (t1)(v))
00189 #define _I4F4FLOOR(v) _T1T2FLOOR(v, _INT32, _IEEE32)
00190 #define _I4F8FLOOR(v) _T1T2FLOOR(v, _INT32, _IEEE64)
00191 #define _I4FQFLOOR(v) _T1T2FLOOR(v, _INT32, _QUAD)
00192 #define _U4F4FLOOR(v) _T1T2FLOOR(v, _UINT32, _IEEE32)
00193 #define _U4F8FLOOR(v) _T1T2FLOOR(v, _UINT32, _IEEE64)
00194 #define _U4FQFLOOR(v) _T1T2FLOOR(v, _UINT32, _QUAD)
00195 #define _I8F4FLOOR(v) _T1T2FLOOR(v, _INT64, _IEEE32)
00196 #define _I8F8FLOOR(v) _T1T2FLOOR(v, _INT64, _IEEE64)
00197 #define _I8FQFLOOR(v) _T1T2FLOOR(v, _INT64, _QUAD)
00198 #define _U8F4FLOOR(v) _T1T2FLOOR(v, _UINT64, _IEEE32)
00199 #define _U8F8FLOOR(v) _T1T2FLOOR(v, _UINT64, _IEEE64)
00200 #define _U8FQFLOOR(v) _T1T2FLOOR(v, _UINT64, _QUAD)
00201 
00202 #define _C4ADD(v1, v2) \
00203    (_Tmp_C4 = (v1), \
00204     _Tmp_C4.realpart += (v2).realpart, \
00205     _Tmp_C4.imagpart += (v2).imagpart, \
00206     _Tmp_C4)
00207 #define _C8ADD(v1, v2) \
00208    (_Tmp_C8 = (v1), \
00209     _Tmp_C8.realpart += (v2).realpart, \
00210     _Tmp_C8.imagpart += (v2).imagpart, \
00211     _Tmp_C8)
00212 #define _CQADD(v1, v2) \
00213    (_Tmp_CQ = (v1), \
00214     _Tmp_CQ.realpart += (v2).realpart, \
00215     _Tmp_CQ.imagpart += (v2).imagpart, \
00216     _Tmp_CQ)
00217 
00218 #define _C4SUB(v1, v2) \
00219    (_Tmp_C4 = (v1), \
00220     _Tmp_C4.realpart -= (v2).realpart, \
00221     _Tmp_C4.imagpart -= (v2).imagpart, \
00222     _Tmp_C4)
00223 #define _C8SUB(v1, v2) \
00224    (_Tmp_C8 = (v1), \
00225     _Tmp_C8.realpart -= (v2).realpart, \
00226     _Tmp_C8.imagpart -= (v2).imagpart, \
00227     _Tmp_C8)
00228 #define _CQSUB(v1, v2) \
00229    (_Tmp_CQ = (v1), \
00230     _Tmp_CQ.realpart -= (v2).realpart, \
00231     _Tmp_CQ.imagpart -= (v2).imagpart, \
00232     _Tmp_CQ)
00233 
00234 #define _C4MPY(v1, v2) \
00235    (_Tmp_C4.realpart = (v1).realpart*(v2).realpart - (v1).imagpart*(v2).imagpart, \
00236     _Tmp_C4.imagpart = (v1).realpart*(v2).imagpart + (v1).imagpart*(v2).realpart, \
00237     _Tmp_C4)
00238 #define _C8MPY(v1, v2) \
00239    (_Tmp_C8.realpart = (v1).realpart*(v2).realpart - (v1).imagpart*(v2).imagpart, \
00240     _Tmp_C8.imagpart = (v1).realpart*(v2).imagpart + (v1).imagpart*(v2).realpart, \
00241     _Tmp_C8)
00242 #define _CQMPY(v1, v2) \
00243    (_Tmp_CQ.realpart = (v1).realpart*(v2).realpart - (v1).imagpart*(v2).imagpart, \
00244     _Tmp_CQ.imagpart = (v1).realpart*(v2).imagpart + (v1).imagpart*(v2).realpart, \
00245     _Tmp_CQ)
00246 
00247 #define _C4DIV(v1, v2) \
00248    (_Tmp1_C4 = (v1), _Tmp2_C4 = (v2), \
00249     c_div(&_Tmp_C4, &_Tmp1_C4, &_Tmp2_C4), \
00250     _Tmp_C4)
00251 #define _C8DIV(v1, v2) \
00252    (_Tmp1_C8 = (v1), _Tmp2_C8 = (v2), \
00253     z_div(&_Tmp_C8, &_Tmp1_C8, &_Tmp2_C8), \
00254     _Tmp_C8)
00255 #define _CQDIV(v1, v2) \
00256    (_Tmp1_CQ = (v1), _Tmp2_CQ = (v2), \
00257     __cq_div(&_Tmp_CQ, &_Tmp1_CQ, &_Tmp2_CQ), \
00258     _Tmp_CQ)
00259 
00260 /* This should only occur for Fortran programs.  The result is 
00261  * undefined for v2==0.  Note that when either operand (but not
00262  * both) is negative, the remainder (%) will be negative while
00263  * the modulus should be positive ((v1%v2) + v2).  For all
00264  * other cases, the modulus operation is equivalent to the 
00265  * remainder operation.
00266  */
00267 #define _I4MOD(v1, v2) \
00268    ((((v1)%(v2) != 0) && ((v1)>0) ^ ((v2)>0))? (((v1)%(v2)) + (v2)) : ((v1)%(v2)))
00269 #define _I8MOD(v1, v2) \
00270    ((((v1)%(v2) != 0LL) && ((v1)>0LL) ^ ((v2)>0LL))? (((v1)%(v2)) + (v2)) : ((v1)%(v2)))
00271 
00272 /*
00273  * INTRN_DIVFLOOR(x,y)
00274  * INTRN_DIVCEIL(x,y)
00275  *      x,y are integers
00276  * 
00277  * Definition
00278  * x y             INTRN_DIVFLOOR          INTRN_DIVCEIL
00279  * ---             --------------          -------------
00280  * + +                 x / y                (x+y-1) / y
00281  * 
00282  * - -                 x / y                (x+y+1) / y
00283  * 
00284  * + -              (x+ -1-y)/y                x / y
00285  * 
00286  * - +              (x+  1-y)/y                x / y
00287  *
00288  * Evaulate (divfloor) without branch code, using:
00289  *
00290  *    f(y) => ((y<0)? -1 : +1) => ((y>>31)<<1) + 1
00291  *
00292  *    MASK(x,y,v) => (x>=0 && y>=0) || (x<0 && y<0)? 0 : v => ((x^y)>>31) & v
00293  *
00294  * The cleverness (Shapiro's) was the composition of these functions
00295  * to evaluate divfloor:
00296  *
00297  *    DIVFLOOR(x,y) = (x + MASK(x, y, f(y) - y)) / y
00298  *
00299  * where:
00300  *
00301  *    (f(y) - y) => (-1-y) [+-],   (+1-y) [-+]
00302  */
00303 #define _I4DIVFLOOR_SIGN(y) ((((y)>>31)<<1) + 1) /* ((y<0)? -1 : +1) */
00304 #define _I4DIVFLOOR_MASK(x, y, v) ((((x)^(y))>>31) & (v))
00305 #define _I8DIVFLOOR_SIGN(y) ((((y)>>63)<<1) + 1LL) /* ((y<0)? -1 : +1) */
00306 #define _I8DIVFLOOR_MASK(x, y, v) ((((x)^(y))>>63) & (v))
00307 
00308 #define _I4DIVFLOOR(v1, v2) \
00309    ((v1) + _I4DIVFLOOR_MASK(v1, v2, _I4DIVFLOOR_SIGN(v2) - (v2))) / (v2)
00310 #define _I8DIVFLOOR(v1, v2) \
00311    ((v1) + _I8DIVFLOOR_MASK(v1, v2, _I8DIVFLOOR_SIGN(v2) - (v2))) / (v2)
00312 #define _U4DIVFLOOR(v1, v2) ((v1)/(v2))
00313 #define _U8DIVFLOOR(v1, v2) ((v1)/(v2))
00314 
00315 #define _I4DIVCEIL(v1, v2) -_I4DIVFLOOR(-(v1), (v2))
00316 #define _I8DIVCEIL(v1, v2) -_I8DIVFLOOR(-(v1), (v2))
00317 #define _U4DIVCEIL(v1, v2) ((v1)+(v2)-1)/(v2)
00318 #define _U8DIVCEIL(v1, v2) ((v1)+(v2)-1)/(v2)
00319 
00320 #define _I4MODFLOOR(v1, v2) ((v1) - (v2)*_I4DIVFLOOR((v1), (v2)))
00321 #define _I8MODFLOOR(v1, v2) ((v1) - (v2)*_I8DIVFLOOR((v1), (v2)))
00322 #define _U4MODFLOOR(v1, v2) ((v1) - (v2)*((v1)/(v2)))
00323 #define _U8MODFLOOR(v1, v2) ((v1) - (v2)*((v1)/(v2)))
00324 
00325 #define _I4MODCEIL(v1, v2) ((v1) - (v2)*_I4DIVCEIL((v1), (v2)))
00326 #define _I8MODCEIL(v1, v2) ((v1) - (v2)*_I8DIVCEIL((v1), (v2)))
00327 #define _U4MODCEIL(v1, v2) ((v1) - (v2)*_U4DIVCEIL((v1), (v2)))
00328 #define _U8MODCEIL(v1, v2) ((v1) - (v2)*_U8DIVCEIL((v1), (v2)))
00329 
00330 #define _I4MAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00331 #define _I8MAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00332 #define _U4MAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00333 #define _U8MAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00334 #define _F4MAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00335 #define _F8MAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00336 #define _FQMAX(v1, v2) ((v1) > (v2)? (v1) : (v2))
00337 
00338 #define _I4MIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00339 #define _I8MIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00340 #define _U4MIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00341 #define _U8MIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00342 #define _F4MIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00343 #define _F8MIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00344 #define _FQMIN(v1, v2) ((v1) < (v2)? (v1) : (v2))
00345 
00346 #define _I4SHL(v1, v2) ((v1) << ((v2) & 31))
00347 #define _I8SHL(v1, v2) ((v1) << ((v2) & 63LL))
00348 #define _U4SHL(v1, v2) ((v1) << ((v2) & 31))
00349 #define _U8SHL(v1, v2) ((v1) << ((v2) & 63LL))
00350 
00351 /* For right shifts we make the non-strict ANSI assumption
00352  * that the '>>' operator sign-extends signed numbers, and
00353  * zero extends unsigned numbers.
00354  */
00355 #define _I4ASHR(v1, v2) ((v1) >> ((v2) & 31))
00356 #define _I8ASHR(v1, v2) ((v1) >> ((v2) & 63LL))
00357 #define _U4ASHR(v1, v2) (_UINT32)((_INT32)(v1) >> ((v2) & 31))
00358 #define _U8ASHR(v1, v2) (_UINT64)((_INT64)(v1) >> ((v2) & 63LL))
00359 
00360 #define _I4LSHR(v1, v2) (_INT32)((_UINT32)(v1) >> ((v2) & 31))
00361 #define _I8LSHR(v1, v2) (_INT64)((_UINT64)(v1) >> ((v2) & 63LL))
00362 #define _U4LSHR(v1, v2) ((v1) >> ((v2) & 31))
00363 #define _U8LSHR(v1, v2) ((v1) >> ((v2) & 63LL))
00364 
00365 #define _F4RECIP(v) (1.0F/v)
00366 #define _F8RECIP(v) (1.0/v)
00367 #define _FQRECIP(v) (1.0L/v)
00368 #define _C4RECIP(v) _C4DIV(_One_C4, v)
00369 #define _C8RECIP(v) _C8DIV(_One_C8, v)
00370 #define _CQRECIP(v) _CQDIV(_One_CQ, v)
00371 
00372 #define _F4RSQRT(v) (1.0F/_F4SQRT(v))
00373 #define _F8RSQRT(v) (1.0/_F8SQRT(v))
00374 #define _FQRSQRT(v) (1.0L/_FQSQRT(v))
00375 #define _C4RSQRT(v) (_Tmp_C4 = _C4SQRT(v), _C4DIV(_One_C4, _Tmp_C4))
00376 #define _C8RSQRT(v) (_Tmp_C8 = _C8SQRT(v), _C8DIV(_One_C8, _Tmp_C8))
00377 #define _CQRSQRT(v) (_Tmp_CQ = _CQSQRT(v), _CQDIV(_One_CQ, _Tmp_CQ))
00378 
00379 #endif /* __WHIRL2C_H__ */
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines