- fixed soft float support so megaglest might now work with other architectures at least in terms of streflop support

This commit is contained in:
Mark Vejvoda 2012-02-11 15:20:40 +00:00
parent 2fbebb4721
commit 4ed3050f84
9 changed files with 7722 additions and 2 deletions

View File

@ -2523,7 +2523,23 @@ int glestMain(int argc, char** argv) {
#ifdef USE_STREFLOP
//# define STREFLOP_NO_DENORMALS
// streflop_init<streflop::Simple>();
printf(" - using STREFLOP\n");
const char *instruction_set = "[none]";
const char *denormals = "[denormals]";
#if defined(STREFLOP_SSE)
instruction_set = "[SSE]";
#elif defined(STREFLOP_X87)
instruction_set = "[X87]";
#elif defined(STREFLOP_SOFT)
instruction_set = "[SOFTFLOAT]";
#endif
#if defined(STREFLOP_NO_DENORMALS)
denormals = "[no-denormals]";
#endif
printf(" - using STREFLOP %s - %s\n",instruction_set,denormals);
#endif
}

View File

@ -237,7 +237,8 @@ IF(BUILD_MEGAGLEST_MODEL_VIEWER OR BUILD_MEGAGLEST_MAP_EDITOR OR BUILD_MEGAGLEST
SET(DIRS_WITH_SRC
${DIRS_WITH_SRC}
streflop
streflop/libm_flt32_source)
streflop/libm_flt32_source
streflop/softfloat)
ENDIF()
IF(NOT WANT_STATIC_LIBS)

View File

@ -0,0 +1,300 @@
/*
streflop: STandalone REproducible FLOating-Point
Nicolas Brodu, 2006
Code released according to the GNU Lesser General Public License
Heavily relies on GNU Libm, itself depending on netlib fplibm, GNU MP, and IBM MP lib.
Uses SoftFloat too.
Please read the history and copyright information in the documentation provided with the source code
*/
#ifdef STREFLOP_SOFT
#ifndef STREFLOP_SOFT_WRAPPER_H
#define STREFLOP_SOFT_WRAPPER_H
/// This file is independent from SoftFloat itself
/// This way, the user programs are clean from SoftFloat details
/// Only the template declarations are done here
/// The template instanciations for N = 4, 8, 10 are done in the CPP file
/// this way, only these types will have defined symbols
/// This file should be included from within a streflop namespace
}
#include "IntegerTypes.h"
namespace streflop {
// Generic define
template<int Nbits> struct SoftFloatWrapper {
char holder[(Nbits/STREFLOP_INTEGER_TYPES_CHAR_BITS)];
template<typename T> inline T& value() {return *reinterpret_cast<T*>(&holder);}
template<typename T> inline const T& value() const {return *reinterpret_cast<const T*>(&holder);}
/// Use dummy bool argument for construction from an already initialized holder
template<typename T> inline SoftFloatWrapper(T init_value, bool) {value<T>() = init_value;}
/// Uninitialized object
inline SoftFloatWrapper() {}
/// Conversion between different types. Also unfortunately includes otherwise perfectly fine copy constructor
SoftFloatWrapper(const SoftFloatWrapper<32>& f);
SoftFloatWrapper& operator=(const SoftFloatWrapper<32>& f);
SoftFloatWrapper(const SoftFloatWrapper<64>& f);
SoftFloatWrapper& operator=(const SoftFloatWrapper<64>& f);
SoftFloatWrapper(const SoftFloatWrapper<96>& f);
SoftFloatWrapper& operator=(const SoftFloatWrapper<96>& f);
/// Destructor
inline ~SoftFloatWrapper() {}
/// Now the real fun, arithmetic operator overloading
SoftFloatWrapper& operator+=(const SoftFloatWrapper& f);
SoftFloatWrapper& operator-=(const SoftFloatWrapper& f);
SoftFloatWrapper& operator*=(const SoftFloatWrapper& f);
SoftFloatWrapper& operator/=(const SoftFloatWrapper& f);
bool operator==(const SoftFloatWrapper& f) const;
bool operator!=(const SoftFloatWrapper& f) const;
bool operator<(const SoftFloatWrapper& f) const;
bool operator<=(const SoftFloatWrapper& f) const;
bool operator>(const SoftFloatWrapper& f) const;
bool operator>=(const SoftFloatWrapper& f) const;
#define STREFLOP_SOFT_WRAPPER_NATIVE_OPS(native_type) \
SoftFloatWrapper(const native_type f); \
SoftFloatWrapper& operator=(const native_type f); \
operator native_type() const; \
SoftFloatWrapper& operator+=(const native_type f); \
SoftFloatWrapper& operator-=(const native_type f); \
SoftFloatWrapper& operator*=(const native_type f); \
SoftFloatWrapper& operator/=(const native_type f); \
bool operator==(const native_type f) const; \
bool operator!=(const native_type f) const; \
bool operator<(const native_type f) const; \
bool operator<=(const native_type f) const; \
bool operator>(const native_type f) const; \
bool operator>=(const native_type f) const;
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(float)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(double)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(long double)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(char)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(unsigned char)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(short)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(unsigned short)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(int)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(unsigned int)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(long)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(unsigned long)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(long long)
STREFLOP_SOFT_WRAPPER_NATIVE_OPS(unsigned long long)
};
#define STREFLOP_SOFT_WRAPPER_MAKE_REAL_CLASS_OPS(N) \
template<> bool SoftFloatWrapper<N>::operator<(const SoftFloatWrapper& f) const; \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator+=(const SoftFloatWrapper<N>& f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator-=(const SoftFloatWrapper<N>& f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator*=(const SoftFloatWrapper<N>& f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator/=(const SoftFloatWrapper<N>& f); \
template<> bool SoftFloatWrapper<N>::operator==(const SoftFloatWrapper<N>& f) const; \
template<> bool SoftFloatWrapper<N>::operator!=(const SoftFloatWrapper<N>& f) const; \
template<> bool SoftFloatWrapper<N>::operator<(const SoftFloatWrapper<N>& f) const; \
template<> bool SoftFloatWrapper<N>::operator<=(const SoftFloatWrapper<N>& f) const; \
template<> bool SoftFloatWrapper<N>::operator>(const SoftFloatWrapper<N>& f) const; \
template<> bool SoftFloatWrapper<N>::operator>=(const SoftFloatWrapper<N>& f) const;
STREFLOP_SOFT_WRAPPER_MAKE_REAL_CLASS_OPS(32)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_CLASS_OPS(64)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_CLASS_OPS(96)
// Making real the conversion N->M
// Have to include default constructors for template overloading reasons
#define STREFLOP_SOFT_WRAPPER_MAKE_REAL_NM_CONVERSION(Nbits) \
template<> SoftFloatWrapper<Nbits>::SoftFloatWrapper(const SoftFloatWrapper<32>& f); \
template<> SoftFloatWrapper<Nbits>& SoftFloatWrapper<Nbits>::operator=(const SoftFloatWrapper<32>& f); \
template<> SoftFloatWrapper<Nbits>::SoftFloatWrapper(const SoftFloatWrapper<64>& f); \
template<> SoftFloatWrapper<Nbits>& SoftFloatWrapper<Nbits>::operator=(const SoftFloatWrapper<64>& f); \
template<> SoftFloatWrapper<Nbits>::SoftFloatWrapper(const SoftFloatWrapper<96>& f); \
template<> SoftFloatWrapper<Nbits>& SoftFloatWrapper<Nbits>::operator=(const SoftFloatWrapper<96>& f);
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NM_CONVERSION(32)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NM_CONVERSION(64)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NM_CONVERSION(96)
#define STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(N,native_type) \
template<> SoftFloatWrapper<N>::SoftFloatWrapper(const native_type f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator=(const native_type f); \
template<> SoftFloatWrapper<N>::operator native_type() const; \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator+=(const native_type f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator-=(const native_type f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator*=(const native_type f); \
template<> SoftFloatWrapper<N>& SoftFloatWrapper<N>::operator/=(const native_type f); \
template<> bool SoftFloatWrapper<N>::operator==(const native_type f) const; \
template<> bool SoftFloatWrapper<N>::operator!=(const native_type f) const; \
template<> bool SoftFloatWrapper<N>::operator<(const native_type f) const; \
template<> bool SoftFloatWrapper<N>::operator<=(const native_type f) const; \
template<> bool SoftFloatWrapper<N>::operator>(const native_type f) const; \
template<> bool SoftFloatWrapper<N>::operator>=(const native_type f) const;
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,float)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,long double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,unsigned char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,unsigned short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,unsigned int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,unsigned long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(32,unsigned long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,float)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,long double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,unsigned char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,unsigned short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,unsigned int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,unsigned long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(64,unsigned long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,float)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,long double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,unsigned char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,unsigned short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,unsigned int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,unsigned long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_NATIVE_OPS(96,unsigned long long)
// Generic versions are fine here, specializations in the cpp
/// binary operators
template<int N> SoftFloatWrapper<N> operator+(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2);
template<int N> SoftFloatWrapper<N> operator-(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2);
template<int N> SoftFloatWrapper<N> operator*(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2);
template<int N> SoftFloatWrapper<N> operator/(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2);
#define STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_CLASS_OPS(N) \
template<> SoftFloatWrapper<N> operator+(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2); \
template<> SoftFloatWrapper<N> operator-(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2); \
template<> SoftFloatWrapper<N> operator*(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2); \
template<> SoftFloatWrapper<N> operator/(const SoftFloatWrapper<N>& f1, const SoftFloatWrapper<N>& f2);
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_CLASS_OPS(32)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_CLASS_OPS(64)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_CLASS_OPS(96)
#define STREFLOP_SOFT_WRAPPER_BINARY_OPS(native_type) \
template<int N> SoftFloatWrapper<N> operator+(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<int N> SoftFloatWrapper<N> operator-(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<int N> SoftFloatWrapper<N> operator*(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<int N> SoftFloatWrapper<N> operator/(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<int N> SoftFloatWrapper<N> operator+(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<int N> SoftFloatWrapper<N> operator-(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<int N> SoftFloatWrapper<N> operator*(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<int N> SoftFloatWrapper<N> operator/(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<int N> bool operator==(const native_type value, const SoftFloatWrapper<N>& f); \
template<int N> bool operator!=(const native_type value, const SoftFloatWrapper<N>& f); \
template<int N> bool operator<(const native_type value, const SoftFloatWrapper<N>& f); \
template<int N> bool operator<=(const native_type value, const SoftFloatWrapper<N>& f); \
template<int N> bool operator>(const native_type value, const SoftFloatWrapper<N>& f); \
template<int N> bool operator>=(const native_type value, const SoftFloatWrapper<N>& f);
STREFLOP_SOFT_WRAPPER_BINARY_OPS(float)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(double)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(long double)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(char)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(unsigned char)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(short)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(unsigned short)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(int)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(unsigned int)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(long)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(unsigned long)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(long long)
STREFLOP_SOFT_WRAPPER_BINARY_OPS(unsigned long long)
#define STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(N,native_type) \
template<> SoftFloatWrapper<N> operator+(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<> SoftFloatWrapper<N> operator-(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<> SoftFloatWrapper<N> operator*(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<> SoftFloatWrapper<N> operator/(const SoftFloatWrapper<N>& f1, const native_type f2); \
template<> SoftFloatWrapper<N> operator+(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<> SoftFloatWrapper<N> operator-(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<> SoftFloatWrapper<N> operator*(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<> SoftFloatWrapper<N> operator/(const native_type f1, const SoftFloatWrapper<N>& f2); \
template<> bool operator==(const native_type value, const SoftFloatWrapper<N>& f); \
template<> bool operator!=(const native_type value, const SoftFloatWrapper<N>& f); \
template<> bool operator<(const native_type value, const SoftFloatWrapper<N>& f); \
template<> bool operator<=(const native_type value, const SoftFloatWrapper<N>& f); \
template<> bool operator>(const native_type value, const SoftFloatWrapper<N>& f); \
template<> bool operator>=(const native_type value, const SoftFloatWrapper<N>& f);
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,float)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,long double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,unsigned char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,unsigned short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,unsigned int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,unsigned long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(32,unsigned long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,float)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,long double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,unsigned char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,unsigned short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,unsigned int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,unsigned long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(64,unsigned long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,float)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,long double)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,unsigned char)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,unsigned short)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,unsigned int)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,unsigned long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,long long)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_BINARY_OPS(96,unsigned long long)
/// Unary operators
template<int N> SoftFloatWrapper<N> operator-(const SoftFloatWrapper<N>& f);
template<int N> SoftFloatWrapper<N> operator+(const SoftFloatWrapper<N>& f);
#define STREFLOP_SOFT_WRAPPER_MAKE_REAL_UNARY_CLASS_OPS(N) \
template<> SoftFloatWrapper<N> operator-(const SoftFloatWrapper<N>& f); \
template<> SoftFloatWrapper<N> operator+(const SoftFloatWrapper<N>& f);
STREFLOP_SOFT_WRAPPER_MAKE_REAL_UNARY_CLASS_OPS(32)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_UNARY_CLASS_OPS(64)
STREFLOP_SOFT_WRAPPER_MAKE_REAL_UNARY_CLASS_OPS(96)
#endif
#endif

View File

@ -0,0 +1,105 @@
/*============================================================================
PROMINENT NOTICE: THIS IS A DERIVATIVE WORK OF THE ORIGINAL SOFTFLOAT CODE
CHANGES:
Removed processors include
This file serves as a bridge to the streflop system
Nicolas Brodu, 2006
=============================================================================*/
/*============================================================================
This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic
Package, Release 2b.
Written by John R. Hauser. This work was made possible in part by the
International Computer Science Institute, located at Suite 600, 1947 Center
Street, Berkeley, California 94704. Funding was partially provided by the
National Science Foundation under grant MIP-9311980. The original version
of this code was written as part of a project to build a fixed-point vector
processor in collaboration with the University of California at Berkeley,
overseen by Profs. Nelson Morgan and John Wawrzynek. More information
is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
arithmetic/SoftFloat.html'.
THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
Derivative works are acceptable, even for commercial purposes, so long as
(1) the source code for the derivative work includes prominent notice that
the work is derivative, and (2) the source code includes prominent notice with
these four paragraphs for those parts of this code that are retained.
=============================================================================*/
/*----------------------------------------------------------------------------
| Include common integer types and flags.
*----------------------------------------------------------------------------*/
#ifdef STREFLOP_SOFT
#include "../System.h"
namespace streflop {
namespace SoftFloat {
// Use the types from System.h, some could be more "convenient"
typedef int8_t flag;
typedef uint8_t uint8;
typedef int8_t int8;
typedef uint16_t uint16;
typedef int16_t int16;
typedef uint32_t uint32;
typedef int32_t int32;
typedef uint64_t uint64;
typedef int64_t int64;
// And these are exact by construction
typedef uint8_t bits8;
typedef int8_t sbits8;
typedef uint16_t bits16;
typedef int16_t sbits16;
typedef uint32_t bits32;
typedef int32_t sbits32;
typedef uint64_t bits64;
typedef int64_t sbits64;
// softfloat needs boolean TRUE/FALSE
#undef TRUE
#undef FALSE
enum {
FALSE = 0,
TRUE = 1
};
// Streflop Bridge: Complete the missing defined that were in the processor files
#if __FLOAT_WORD_ORDER == 1234
#ifndef LITTLEENDIAN
#define LITTLEENDIAN
#endif
#elif __FLOAT_WORD_ORDER == 4321
#ifndef BIGENDIAN
#define BIGENDIAN
#endif
#endif
// 64-bit int types are assumed to exist in other parts of streflop
#define BITS64
// How to define a long long 64-bit constant
#define LIT64( a ) a##LL
// From original comment: If a compiler does not support explicit inlining,
// this macro should be defined to be 'static'.
// However, with C++, this has become obsolete
#define INLINE extern inline
}
}
#endif

View File

@ -0,0 +1,732 @@
/*============================================================================
PROMINENT NOTICE: THIS IS A DERIVATIVE WORK OF THE ORIGINAL SOFTFLOAT CODE
CHANGES:
Inserted this file is a namespace
Nicolas Brodu, 2006
=============================================================================*/
namespace streflop {
namespace SoftFloat {
/*============================================================================
This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
Arithmetic Package, Release 2b.
Written by John R. Hauser. This work was made possible in part by the
International Computer Science Institute, located at Suite 600, 1947 Center
Street, Berkeley, California 94704. Funding was partially provided by the
National Science Foundation under grant MIP-9311980. The original version
of this code was written as part of a project to build a fixed-point vector
processor in collaboration with the University of California at Berkeley,
overseen by Profs. Nelson Morgan and John Wawrzynek. More information
is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
arithmetic/SoftFloat.html'.
THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
INSTITUTE (possibly via similar legal notice) AGAINST ALL LOSSES, COSTS, OR
OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
Derivative works are acceptable, even for commercial purposes, so long as
(1) the source code for the derivative work includes prominent notice that
the work is derivative, and (2) the source code includes prominent notice with
these four paragraphs for those parts of this code that are retained.
=============================================================================*/
/*----------------------------------------------------------------------------
| Shifts `a' right by the number of bits given in `count'. If any nonzero
| bits are shifted off, they are ``jammed'' into the least significant bit of
| the result by setting the least significant bit to 1. The value of `count'
| can be arbitrarily large; in particular, if `count' is greater than 32, the
| result will be either 0 or 1, depending on whether `a' is zero or nonzero.
| The result is stored in the location pointed to by `zPtr'.
*----------------------------------------------------------------------------*/
INLINE void shift32RightJamming( bits32 a, int16 count, bits32 *zPtr )
{
bits32 z;
if ( count == 0 ) {
z = a;
}
else if ( count < 32 ) {
z = ( a>>count ) | ( ( a<<( ( - count ) & 31 ) ) != 0 );
}
else {
z = ( a != 0 );
}
*zPtr = z;
}
/*----------------------------------------------------------------------------
| Shifts `a' right by the number of bits given in `count'. If any nonzero
| bits are shifted off, they are ``jammed'' into the least significant bit of
| the result by setting the least significant bit to 1. The value of `count'
| can be arbitrarily large; in particular, if `count' is greater than 64, the
| result will be either 0 or 1, depending on whether `a' is zero or nonzero.
| The result is stored in the location pointed to by `zPtr'.
*----------------------------------------------------------------------------*/
INLINE void shift64RightJamming( bits64 a, int16 count, bits64 *zPtr )
{
bits64 z;
if ( count == 0 ) {
z = a;
}
else if ( count < 64 ) {
z = ( a>>count ) | ( ( a<<( ( - count ) & 63 ) ) != 0 );
}
else {
z = ( a != 0 );
}
*zPtr = z;
}
/*----------------------------------------------------------------------------
| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by 64
| _plus_ the number of bits given in `count'. The shifted result is at most
| 64 nonzero bits; this is stored at the location pointed to by `z0Ptr'. The
| bits shifted off form a second 64-bit result as follows: The _last_ bit
| shifted off is the most-significant bit of the extra result, and the other
| 63 bits of the extra result are all zero if and only if _all_but_the_last_
| bits shifted off were all zero. This extra result is stored in the location
| pointed to by `z1Ptr'. The value of `count' can be arbitrarily large.
| (This routine makes more sense if `a0' and `a1' are considered to form
| a fixed-point value with binary point between `a0' and `a1'. This fixed-
| point value is shifted right by the number of bits given in `count', and
| the integer part of the result is returned at the location pointed to by
| `z0Ptr'. The fractional part of the result may be slightly corrupted as
| described above, and is returned at the location pointed to by `z1Ptr'.)
*----------------------------------------------------------------------------*/
INLINE void
shift64ExtraRightJamming(
bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
{
bits64 z0, z1;
int8 negCount = ( - count ) & 63;
if ( count == 0 ) {
z1 = a1;
z0 = a0;
}
else if ( count < 64 ) {
z1 = ( a0<<negCount ) | ( a1 != 0 );
z0 = a0>>count;
}
else {
if ( count == 64 ) {
z1 = a0 | ( a1 != 0 );
}
else {
z1 = ( ( a0 | a1 ) != 0 );
}
z0 = 0;
}
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the
| number of bits given in `count'. Any bits shifted off are lost. The value
| of `count' can be arbitrarily large; in particular, if `count' is greater
| than 128, the result will be 0. The result is broken into two 64-bit pieces
| which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
shift128Right(
bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
{
bits64 z0, z1;
int8 negCount = ( - count ) & 63;
if ( count == 0 ) {
z1 = a1;
z0 = a0;
}
else if ( count < 64 ) {
z1 = ( a0<<negCount ) | ( a1>>count );
z0 = a0>>count;
}
else {
z1 = ( count < 64 ) ? ( a0>>( count & 63 ) ) : 0;
z0 = 0;
}
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Shifts the 128-bit value formed by concatenating `a0' and `a1' right by the
| number of bits given in `count'. If any nonzero bits are shifted off, they
| are ``jammed'' into the least significant bit of the result by setting the
| least significant bit to 1. The value of `count' can be arbitrarily large;
| in particular, if `count' is greater than 128, the result will be either
| 0 or 1, depending on whether the concatenation of `a0' and `a1' is zero or
| nonzero. The result is broken into two 64-bit pieces which are stored at
| the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
shift128RightJamming(
bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
{
bits64 z0, z1;
int8 negCount = ( - count ) & 63;
if ( count == 0 ) {
z1 = a1;
z0 = a0;
}
else if ( count < 64 ) {
z1 = ( a0<<negCount ) | ( a1>>count ) | ( ( a1<<negCount ) != 0 );
z0 = a0>>count;
}
else {
if ( count == 64 ) {
z1 = a0 | ( a1 != 0 );
}
else if ( count < 128 ) {
z1 = ( a0>>( count & 63 ) ) | ( ( ( a0<<negCount ) | a1 ) != 0 );
}
else {
z1 = ( ( a0 | a1 ) != 0 );
}
z0 = 0;
}
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' right
| by 64 _plus_ the number of bits given in `count'. The shifted result is
| at most 128 nonzero bits; these are broken into two 64-bit pieces which are
| stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted
| off form a third 64-bit result as follows: The _last_ bit shifted off is
| the most-significant bit of the extra result, and the other 63 bits of the
| extra result are all zero if and only if _all_but_the_last_ bits shifted off
| were all zero. This extra result is stored in the location pointed to by
| `z2Ptr'. The value of `count' can be arbitrarily large.
| (This routine makes more sense if `a0', `a1', and `a2' are considered
| to form a fixed-point value with binary point between `a1' and `a2'. This
| fixed-point value is shifted right by the number of bits given in `count',
| and the integer part of the result is returned at the locations pointed to
| by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly
| corrupted as described above, and is returned at the location pointed to by
| `z2Ptr'.)
*----------------------------------------------------------------------------*/
INLINE void
shift128ExtraRightJamming(
bits64 a0,
bits64 a1,
bits64 a2,
int16 count,
bits64 *z0Ptr,
bits64 *z1Ptr,
bits64 *z2Ptr
)
{
bits64 z0, z1, z2;
int8 negCount = ( - count ) & 63;
if ( count == 0 ) {
z2 = a2;
z1 = a1;
z0 = a0;
}
else {
if ( count < 64 ) {
z2 = a1<<negCount;
z1 = ( a0<<negCount ) | ( a1>>count );
z0 = a0>>count;
}
else {
if ( count == 64 ) {
z2 = a1;
z1 = a0;
}
else {
a2 |= a1;
if ( count < 128 ) {
z2 = a0<<negCount;
z1 = a0>>( count & 63 );
}
else {
z2 = ( count == 128 ) ? a0 : ( a0 != 0 );
z1 = 0;
}
}
z0 = 0;
}
z2 |= ( a2 != 0 );
}
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Shifts the 128-bit value formed by concatenating `a0' and `a1' left by the
| number of bits given in `count'. Any bits shifted off are lost. The value
| of `count' must be less than 64. The result is broken into two 64-bit
| pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
shortShift128Left(
bits64 a0, bits64 a1, int16 count, bits64 *z0Ptr, bits64 *z1Ptr )
{
*z1Ptr = a1<<count;
*z0Ptr =
( count == 0 ) ? a0 : ( a0<<count ) | ( a1>>( ( - count ) & 63 ) );
}
/*----------------------------------------------------------------------------
| Shifts the 192-bit value formed by concatenating `a0', `a1', and `a2' left
| by the number of bits given in `count'. Any bits shifted off are lost.
| The value of `count' must be less than 64. The result is broken into three
| 64-bit pieces which are stored at the locations pointed to by `z0Ptr',
| `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
shortShift192Left(
bits64 a0,
bits64 a1,
bits64 a2,
int16 count,
bits64 *z0Ptr,
bits64 *z1Ptr,
bits64 *z2Ptr
)
{
bits64 z0, z1, z2;
int8 negCount;
z2 = a2<<count;
z1 = a1<<count;
z0 = a0<<count;
if ( 0 < count ) {
negCount = ( ( - count ) & 63 );
z1 |= a2>>negCount;
z0 |= a1>>negCount;
}
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Adds the 128-bit value formed by concatenating `a0' and `a1' to the 128-bit
| value formed by concatenating `b0' and `b1'. Addition is modulo 2^128, so
| any carry out is lost. The result is broken into two 64-bit pieces which
| are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
add128(
bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr )
{
bits64 z1;
z1 = a1 + b1;
*z1Ptr = z1;
*z0Ptr = a0 + b0 + ( z1 < a1 );
}
/*----------------------------------------------------------------------------
| Adds the 192-bit value formed by concatenating `a0', `a1', and `a2' to the
| 192-bit value formed by concatenating `b0', `b1', and `b2'. Addition is
| modulo 2^192, so any carry out is lost. The result is broken into three
| 64-bit pieces which are stored at the locations pointed to by `z0Ptr',
| `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
add192(
bits64 a0,
bits64 a1,
bits64 a2,
bits64 b0,
bits64 b1,
bits64 b2,
bits64 *z0Ptr,
bits64 *z1Ptr,
bits64 *z2Ptr
)
{
bits64 z0, z1, z2;
int8 carry0, carry1;
z2 = a2 + b2;
carry1 = ( z2 < a2 );
z1 = a1 + b1;
carry0 = ( z1 < a1 );
z0 = a0 + b0;
z1 += carry1;
z0 += ( z1 < carry1 );
z0 += carry0;
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Subtracts the 128-bit value formed by concatenating `b0' and `b1' from the
| 128-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo
| 2^128, so any borrow out (carry out) is lost. The result is broken into two
| 64-bit pieces which are stored at the locations pointed to by `z0Ptr' and
| `z1Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
sub128(
bits64 a0, bits64 a1, bits64 b0, bits64 b1, bits64 *z0Ptr, bits64 *z1Ptr )
{
*z1Ptr = a1 - b1;
*z0Ptr = a0 - b0 - ( a1 < b1 );
}
/*----------------------------------------------------------------------------
| Subtracts the 192-bit value formed by concatenating `b0', `b1', and `b2'
| from the 192-bit value formed by concatenating `a0', `a1', and `a2'.
| Subtraction is modulo 2^192, so any borrow out (carry out) is lost. The
| result is broken into three 64-bit pieces which are stored at the locations
| pointed to by `z0Ptr', `z1Ptr', and `z2Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
sub192(
bits64 a0,
bits64 a1,
bits64 a2,
bits64 b0,
bits64 b1,
bits64 b2,
bits64 *z0Ptr,
bits64 *z1Ptr,
bits64 *z2Ptr
)
{
bits64 z0, z1, z2;
int8 borrow0, borrow1;
z2 = a2 - b2;
borrow1 = ( a2 < b2 );
z1 = a1 - b1;
borrow0 = ( a1 < b1 );
z0 = a0 - b0;
z0 -= ( z1 < borrow1 );
z1 -= borrow1;
z0 -= borrow0;
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Multiplies `a' by `b' to obtain a 128-bit product. The product is broken
| into two 64-bit pieces which are stored at the locations pointed to by
| `z0Ptr' and `z1Ptr'.
*----------------------------------------------------------------------------*/
INLINE void mul64To128( bits64 a, bits64 b, bits64 *z0Ptr, bits64 *z1Ptr )
{
bits32 aHigh, aLow, bHigh, bLow;
bits64 z0, zMiddleA, zMiddleB, z1;
aLow = a;
aHigh = a>>32;
bLow = b;
bHigh = b>>32;
z1 = ( (bits64) aLow ) * bLow;
zMiddleA = ( (bits64) aLow ) * bHigh;
zMiddleB = ( (bits64) aHigh ) * bLow;
z0 = ( (bits64) aHigh ) * bHigh;
zMiddleA += zMiddleB;
z0 += ( ( (bits64) ( zMiddleA < zMiddleB ) )<<32 ) + ( zMiddleA>>32 );
zMiddleA <<= 32;
z1 += zMiddleA;
z0 += ( z1 < zMiddleA );
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Multiplies the 128-bit value formed by concatenating `a0' and `a1' by
| `b' to obtain a 192-bit product. The product is broken into three 64-bit
| pieces which are stored at the locations pointed to by `z0Ptr', `z1Ptr', and
| `z2Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
mul128By64To192(
bits64 a0,
bits64 a1,
bits64 b,
bits64 *z0Ptr,
bits64 *z1Ptr,
bits64 *z2Ptr
)
{
bits64 z0, z1, z2, more1;
mul64To128( a1, b, &z1, &z2 );
mul64To128( a0, b, &z0, &more1 );
add128( z0, more1, 0, z1, &z0, &z1 );
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Multiplies the 128-bit value formed by concatenating `a0' and `a1' to the
| 128-bit value formed by concatenating `b0' and `b1' to obtain a 256-bit
| product. The product is broken into four 64-bit pieces which are stored at
| the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
*----------------------------------------------------------------------------*/
INLINE void
mul128To256(
bits64 a0,
bits64 a1,
bits64 b0,
bits64 b1,
bits64 *z0Ptr,
bits64 *z1Ptr,
bits64 *z2Ptr,
bits64 *z3Ptr
)
{
bits64 z0, z1, z2, z3;
bits64 more1, more2;
mul64To128( a1, b1, &z2, &z3 );
mul64To128( a1, b0, &z1, &more2 );
add128( z1, more2, 0, z2, &z1, &z2 );
mul64To128( a0, b0, &z0, &more1 );
add128( z0, more1, 0, z1, &z0, &z1 );
mul64To128( a0, b1, &more1, &more2 );
add128( more1, more2, 0, z2, &more1, &z2 );
add128( z0, z1, 0, more1, &z0, &z1 );
*z3Ptr = z3;
*z2Ptr = z2;
*z1Ptr = z1;
*z0Ptr = z0;
}
/*----------------------------------------------------------------------------
| Returns an approximation to the 64-bit integer quotient obtained by dividing
| `b' into the 128-bit value formed by concatenating `a0' and `a1'. The
| divisor `b' must be at least 2^63. If q is the exact quotient truncated
| toward zero, the approximation returned lies between q and q + 2 inclusive.
| If the exact quotient q is larger than 64 bits, the maximum positive 64-bit
| unsigned integer is returned.
*----------------------------------------------------------------------------*/
static bits64 estimateDiv128To64( bits64 a0, bits64 a1, bits64 b )
{
bits64 b0, b1;
bits64 rem0, rem1, term0, term1;
bits64 z;
if ( b <= a0 ) return LIT64( 0xFFFFFFFFFFFFFFFF );
b0 = b>>32;
z = ( b0<<32 <= a0 ) ? LIT64( 0xFFFFFFFF00000000 ) : ( a0 / b0 )<<32;
mul64To128( b, z, &term0, &term1 );
sub128( a0, a1, term0, term1, &rem0, &rem1 );
while ( ( (sbits64) rem0 ) < 0 ) {
z -= LIT64( 0x100000000 );
b1 = b<<32;
add128( rem0, rem1, b0, b1, &rem0, &rem1 );
}
rem0 = ( rem0<<32 ) | ( rem1>>32 );
z |= ( b0<<32 <= rem0 ) ? 0xFFFFFFFF : rem0 / b0;
return z;
}
/*----------------------------------------------------------------------------
| Returns an approximation to the square root of the 32-bit significand given
| by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
| `aExp' (the least significant bit) is 1, the integer returned approximates
| 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'
| is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either
| case, the approximation returned lies strictly within +/-2 of the exact
| value.
*----------------------------------------------------------------------------*/
static bits32 estimateSqrt32( int16 aExp, bits32 a )
{
static const bits16 sqrtOddAdjustments[] = {
0x0004, 0x0022, 0x005D, 0x00B1, 0x011D, 0x019F, 0x0236, 0x02E0,
0x039C, 0x0468, 0x0545, 0x0631, 0x072B, 0x0832, 0x0946, 0x0A67
};
static const bits16 sqrtEvenAdjustments[] = {
0x0A2D, 0x08AF, 0x075A, 0x0629, 0x051A, 0x0429, 0x0356, 0x029E,
0x0200, 0x0179, 0x0109, 0x00AF, 0x0068, 0x0034, 0x0012, 0x0002
};
int8 index;
bits32 z;
index = ( a>>27 ) & 15;
if ( aExp & 1 ) {
z = 0x4000 + ( a>>17 ) - sqrtOddAdjustments[ index ];
z = ( ( a / z )<<14 ) + ( z<<15 );
a >>= 1;
}
else {
z = 0x8000 + ( a>>17 ) - sqrtEvenAdjustments[ index ];
z = a / z + z;
z = ( 0x20000 <= z ) ? 0xFFFF8000 : ( z<<15 );
if ( z <= a ) return (bits32) ( ( (sbits32) a )>>1 );
}
return ( (bits32) ( ( ( (bits64) a )<<31 ) / z ) ) + ( z>>1 );
}
/*----------------------------------------------------------------------------
| Returns the number of leading 0 bits before the most-significant 1 bit of
| `a'. If `a' is zero, 32 is returned.
*----------------------------------------------------------------------------*/
static int8 countLeadingZeros32( bits32 a )
{
static const int8 countLeadingZerosHigh[] = {
8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
int8 shiftCount;
shiftCount = 0;
if ( a < 0x10000 ) {
shiftCount += 16;
a <<= 16;
}
if ( a < 0x1000000 ) {
shiftCount += 8;
a <<= 8;
}
shiftCount += countLeadingZerosHigh[ a>>24 ];
return shiftCount;
}
/*----------------------------------------------------------------------------
| Returns the number of leading 0 bits before the most-significant 1 bit of
| `a'. If `a' is zero, 64 is returned.
*----------------------------------------------------------------------------*/
static int8 countLeadingZeros64( bits64 a )
{
int8 shiftCount;
shiftCount = 0;
if ( a < ( (bits64) 1 )<<32 ) {
shiftCount += 32;
}
else {
a >>= 32;
}
shiftCount += countLeadingZeros32( a );
return shiftCount;
}
/*----------------------------------------------------------------------------
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1'
| is equal to the 128-bit value formed by concatenating `b0' and `b1'.
| Otherwise, returns 0.
*----------------------------------------------------------------------------*/
INLINE flag eq128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
{
return ( a0 == b0 ) && ( a1 == b1 );
}
/*----------------------------------------------------------------------------
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less
| than or equal to the 128-bit value formed by concatenating `b0' and `b1'.
| Otherwise, returns 0.
*----------------------------------------------------------------------------*/
INLINE flag le128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
{
return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 <= b1 ) );
}
/*----------------------------------------------------------------------------
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is less
| than the 128-bit value formed by concatenating `b0' and `b1'. Otherwise,
| returns 0.
*----------------------------------------------------------------------------*/
INLINE flag lt128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
{
return ( a0 < b0 ) || ( ( a0 == b0 ) && ( a1 < b1 ) );
}
/*----------------------------------------------------------------------------
| Returns 1 if the 128-bit value formed by concatenating `a0' and `a1' is
| not equal to the 128-bit value formed by concatenating `b0' and `b1'.
| Otherwise, returns 0.
*----------------------------------------------------------------------------*/
INLINE flag ne128( bits64 a0, bits64 a1, bits64 b0, bits64 b1 )
{
return ( a0 != b0 ) || ( a1 != b1 );
}
// Close namespaces
}
}

View File

@ -0,0 +1,517 @@
/*============================================================================
PROMINENT NOTICE: THIS IS A DERIVATIVE WORK OF THE ORIGINAL SOFTFLOAT CODE
CHANGES:
This derived work raises REAL system traps, controlled by the value
of a global variable.
Streflop defines the flags controlling traps.
The following files are now included too
#include <unistd.h>
#include <signal.h>
Nicolas Brodu, 2006
=============================================================================*/
#include <unistd.h>
#include <signal.h>
#include "../streflop.h"
namespace streflop {
namespace SoftFloat {
// Here is the variable that controls sending real traps.
// Initalized to 0, see FPUSettings.h to check this masks all exceptions
int float_exception_realtraps = 0;
/*============================================================================
This C source fragment is part of the SoftFloat IEC/IEEE Floating-point
Arithmetic Package, Release 2b.
Written by John R. Hauser. This work was made possible in part by the
International Computer Science Institute, located at Suite 600, 1947 Center
Street, Berkeley, California 94704. Funding was partially provided by the
National Science Foundation under grant MIP-9311980. The original version
of this code was written as part of a project to build a fixed-point vector
processor in collaboration with the University of California at Berkeley,
overseen by Profs. Nelson Morgan and John Wawrzynek. More information
is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
arithmetic/SoftFloat.html'.
THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
Derivative works are acceptable, even for commercial purposes, so long as
(1) the source code for the derivative work includes prominent notice that
the work is derivative, and (2) the source code includes prominent notice with
these four paragraphs for those parts of this code that are retained.
=============================================================================*/
/*----------------------------------------------------------------------------
| Underflow tininess-detection mode, statically initialized to default value.
| (The declaration in `softfloat.h' must match the `int8' type here.)
*----------------------------------------------------------------------------*/
int8 float_detect_tininess = float_tininess_after_rounding;
/*----------------------------------------------------------------------------
| Raises the exceptions specified by `flags'. Floating-point traps can be
| defined here if desired. It is currently not possible for such a trap
| to substitute a result value. If traps are not implemented, this routine
| should be simply `float_exception_flags |= flags;'.
*----------------------------------------------------------------------------*/
void float_raise( int8 flags )
{
float_exception_flags |= flags;
/* NB060423: Modifications to send real traps
Conversion needed between softfloat system and x87 system to check for matches
*/
int trap = 0;
if ((flags & float_flag_invalid !=0) && (float_exception_realtraps & FE_INVALID !=0)) {
trap = 1;
}
if ((flags & float_flag_divbyzero !=0) && (float_exception_realtraps & FE_DIVBYZERO !=0)) {
trap = 1;
}
if ((flags & float_flag_overflow !=0) && (float_exception_realtraps & FE_OVERFLOW !=0)) {
trap = 1;
}
if ((flags & float_flag_underflow !=0) && (float_exception_realtraps & FE_UNDERFLOW !=0)) {
trap = 1;
}
if ((flags & float_flag_inexact !=0) && (float_exception_realtraps & FE_INEXACT !=0)) {
trap = 1;
}
// Send SIGFPE signal to current process
if (trap==1) {
kill(getpid(), SIGFPE);
}
}
/*----------------------------------------------------------------------------
| Internal canonical NaN format.
*----------------------------------------------------------------------------*/
typedef struct {
flag sign;
bits64 high, low;
} commonNaNT;
/*----------------------------------------------------------------------------
| The pattern for a default generated single-precision NaN.
*----------------------------------------------------------------------------*/
#define float32_default_nan 0xFFC00000
/*----------------------------------------------------------------------------
| Returns 1 if the single-precision floating-point value `a' is a NaN;
| otherwise returns 0.
*----------------------------------------------------------------------------*/
flag float32_is_nan( float32 a )
{
return ( 0xFF000000 < (bits32) ( a<<1 ) );
}
/*----------------------------------------------------------------------------
| Returns 1 if the single-precision floating-point value `a' is a signaling
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
flag float32_is_signaling_nan( float32 a )
{
return ( ( ( a>>22 ) & 0x1FF ) == 0x1FE ) && ( a & 0x003FFFFF );
}
/*----------------------------------------------------------------------------
| Returns the result of converting the single-precision floating-point NaN
| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
| exception is raised.
*----------------------------------------------------------------------------*/
static commonNaNT float32ToCommonNaN( float32 a )
{
commonNaNT z;
if ( float32_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
z.sign = a>>31;
z.low = 0;
z.high = ( (bits64) a )<<41;
return z;
}
/*----------------------------------------------------------------------------
| Returns the result of converting the canonical NaN `a' to the single-
| precision floating-point format.
*----------------------------------------------------------------------------*/
static float32 commonNaNToFloat32( commonNaNT a )
{
return ( ( (bits32) a.sign )<<31 ) | 0x7FC00000 | ( a.high>>41 );
}
/*----------------------------------------------------------------------------
| Takes two single-precision floating-point values `a' and `b', one of which
| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
| signaling NaN, the invalid exception is raised.
*----------------------------------------------------------------------------*/
static float32 propagateFloat32NaN( float32 a, float32 b )
{
flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
aIsNaN = float32_is_nan( a );
aIsSignalingNaN = float32_is_signaling_nan( a );
bIsNaN = float32_is_nan( b );
bIsSignalingNaN = float32_is_signaling_nan( b );
a |= 0x00400000;
b |= 0x00400000;
if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
if ( aIsSignalingNaN ) {
if ( bIsSignalingNaN ) goto returnLargerSignificand;
return bIsNaN ? b : a;
}
else if ( aIsNaN ) {
if ( bIsSignalingNaN | ! bIsNaN ) return a;
returnLargerSignificand:
if ( (bits32) ( a<<1 ) < (bits32) ( b<<1 ) ) return b;
if ( (bits32) ( b<<1 ) < (bits32) ( a<<1 ) ) return a;
return ( a < b ) ? a : b;
}
else {
return b;
}
}
/*----------------------------------------------------------------------------
| The pattern for a default generated double-precision NaN.
*----------------------------------------------------------------------------*/
#define float64_default_nan LIT64( 0xFFF8000000000000 )
/*----------------------------------------------------------------------------
| Returns 1 if the double-precision floating-point value `a' is a NaN;
| otherwise returns 0.
*----------------------------------------------------------------------------*/
flag float64_is_nan( float64 a )
{
return ( LIT64( 0xFFE0000000000000 ) < (bits64) ( a<<1 ) );
}
/*----------------------------------------------------------------------------
| Returns 1 if the double-precision floating-point value `a' is a signaling
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
flag float64_is_signaling_nan( float64 a )
{
return
( ( ( a>>51 ) & 0xFFF ) == 0xFFE )
&& ( a & LIT64( 0x0007FFFFFFFFFFFF ) );
}
/*----------------------------------------------------------------------------
| Returns the result of converting the double-precision floating-point NaN
| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
| exception is raised.
*----------------------------------------------------------------------------*/
static commonNaNT float64ToCommonNaN( float64 a )
{
commonNaNT z;
if ( float64_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
z.sign = a>>63;
z.low = 0;
z.high = a<<12;
return z;
}
/*----------------------------------------------------------------------------
| Returns the result of converting the canonical NaN `a' to the double-
| precision floating-point format.
*----------------------------------------------------------------------------*/
static float64 commonNaNToFloat64( commonNaNT a )
{
return
( ( (bits64) a.sign )<<63 )
| LIT64( 0x7FF8000000000000 )
| ( a.high>>12 );
}
/*----------------------------------------------------------------------------
| Takes two double-precision floating-point values `a' and `b', one of which
| is a NaN, and returns the appropriate NaN result. If either `a' or `b' is a
| signaling NaN, the invalid exception is raised.
*----------------------------------------------------------------------------*/
static float64 propagateFloat64NaN( float64 a, float64 b )
{
flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
aIsNaN = float64_is_nan( a );
aIsSignalingNaN = float64_is_signaling_nan( a );
bIsNaN = float64_is_nan( b );
bIsSignalingNaN = float64_is_signaling_nan( b );
a |= LIT64( 0x0008000000000000 );
b |= LIT64( 0x0008000000000000 );
if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
if ( aIsSignalingNaN ) {
if ( bIsSignalingNaN ) goto returnLargerSignificand;
return bIsNaN ? b : a;
}
else if ( aIsNaN ) {
if ( bIsSignalingNaN | ! bIsNaN ) return a;
returnLargerSignificand:
if ( (bits64) ( a<<1 ) < (bits64) ( b<<1 ) ) return b;
if ( (bits64) ( b<<1 ) < (bits64) ( a<<1 ) ) return a;
return ( a < b ) ? a : b;
}
else {
return b;
}
}
#ifdef FLOATX80
/*----------------------------------------------------------------------------
| The pattern for a default generated extended double-precision NaN. The
| `high' and `low' values hold the most- and least-significant bits,
| respectively.
*----------------------------------------------------------------------------*/
#define floatx80_default_nan_high 0xFFFF
#define floatx80_default_nan_low LIT64( 0xC000000000000000 )
/*----------------------------------------------------------------------------
| Returns 1 if the extended double-precision floating-point value `a' is a
| NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
flag floatx80_is_nan( floatx80 a )
{
return ( ( a.high & 0x7FFF ) == 0x7FFF ) && (bits64) ( a.low<<1 );
}
/*----------------------------------------------------------------------------
| Returns 1 if the extended double-precision floating-point value `a' is a
| signaling NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
flag floatx80_is_signaling_nan( floatx80 a )
{
bits64 aLow;
aLow = a.low & ~ LIT64( 0x4000000000000000 );
return
( ( a.high & 0x7FFF ) == 0x7FFF )
&& (bits64) ( aLow<<1 )
&& ( a.low == aLow );
}
/*----------------------------------------------------------------------------
| Returns the result of converting the extended double-precision floating-
| point NaN `a' to the canonical NaN format. If `a' is a signaling NaN, the
| invalid exception is raised.
*----------------------------------------------------------------------------*/
static commonNaNT floatx80ToCommonNaN( floatx80 a )
{
commonNaNT z;
if ( floatx80_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
z.sign = a.high>>15;
z.low = 0;
z.high = a.low<<1;
return z;
}
/*----------------------------------------------------------------------------
| Returns the result of converting the canonical NaN `a' to the extended
| double-precision floating-point format.
*----------------------------------------------------------------------------*/
static floatx80 commonNaNToFloatx80( commonNaNT a )
{
floatx80 z;
z.low = LIT64( 0xC000000000000000 ) | ( a.high>>1 );
z.high = ( ( (bits16) a.sign )<<15 ) | 0x7FFF;
return z;
}
/*----------------------------------------------------------------------------
| Takes two extended double-precision floating-point values `a' and `b', one
| of which is a NaN, and returns the appropriate NaN result. If either `a' or
| `b' is a signaling NaN, the invalid exception is raised.
*----------------------------------------------------------------------------*/
static floatx80 propagateFloatx80NaN( floatx80 a, floatx80 b )
{
flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
aIsNaN = floatx80_is_nan( a );
aIsSignalingNaN = floatx80_is_signaling_nan( a );
bIsNaN = floatx80_is_nan( b );
bIsSignalingNaN = floatx80_is_signaling_nan( b );
a.low |= LIT64( 0xC000000000000000 );
b.low |= LIT64( 0xC000000000000000 );
if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
if ( aIsSignalingNaN ) {
if ( bIsSignalingNaN ) goto returnLargerSignificand;
return bIsNaN ? b : a;
}
else if ( aIsNaN ) {
if ( bIsSignalingNaN | ! bIsNaN ) return a;
returnLargerSignificand:
if ( a.low < b.low ) return b;
if ( b.low < a.low ) return a;
return ( a.high < b.high ) ? a : b;
}
else {
return b;
}
}
#endif
#ifdef FLOAT128
/*----------------------------------------------------------------------------
| The pattern for a default generated quadruple-precision NaN. The `high' and
| `low' values hold the most- and least-significant bits, respectively.
*----------------------------------------------------------------------------*/
#define float128_default_nan_high LIT64( 0xFFFF800000000000 )
#define float128_default_nan_low LIT64( 0x0000000000000000 )
/*----------------------------------------------------------------------------
| Returns 1 if the quadruple-precision floating-point value `a' is a NaN;
| otherwise returns 0.
*----------------------------------------------------------------------------*/
flag float128_is_nan( float128 a )
{
return
( LIT64( 0xFFFE000000000000 ) <= (bits64) ( a.high<<1 ) )
&& ( a.low || ( a.high & LIT64( 0x0000FFFFFFFFFFFF ) ) );
}
/*----------------------------------------------------------------------------
| Returns 1 if the quadruple-precision floating-point value `a' is a
| signaling NaN; otherwise returns 0.
*----------------------------------------------------------------------------*/
flag float128_is_signaling_nan( float128 a )
{
return
( ( ( a.high>>47 ) & 0xFFFF ) == 0xFFFE )
&& ( a.low || ( a.high & LIT64( 0x00007FFFFFFFFFFF ) ) );
}
/*----------------------------------------------------------------------------
| Returns the result of converting the quadruple-precision floating-point NaN
| `a' to the canonical NaN format. If `a' is a signaling NaN, the invalid
| exception is raised.
*----------------------------------------------------------------------------*/
static commonNaNT float128ToCommonNaN( float128 a )
{
commonNaNT z;
if ( float128_is_signaling_nan( a ) ) float_raise( float_flag_invalid );
z.sign = a.high>>63;
shortShift128Left( a.high, a.low, 16, &z.high, &z.low );
return z;
}
/*----------------------------------------------------------------------------
| Returns the result of converting the canonical NaN `a' to the quadruple-
| precision floating-point format.
*----------------------------------------------------------------------------*/
static float128 commonNaNToFloat128( commonNaNT a )
{
float128 z;
shift128Right( a.high, a.low, 16, &z.high, &z.low );
z.high |= ( ( (bits64) a.sign )<<63 ) | LIT64( 0x7FFF800000000000 );
return z;
}
/*----------------------------------------------------------------------------
| Takes two quadruple-precision floating-point values `a' and `b', one of
| which is a NaN, and returns the appropriate NaN result. If either `a' or
| `b' is a signaling NaN, the invalid exception is raised.
*----------------------------------------------------------------------------*/
static float128 propagateFloat128NaN( float128 a, float128 b )
{
flag aIsNaN, aIsSignalingNaN, bIsNaN, bIsSignalingNaN;
aIsNaN = float128_is_nan( a );
aIsSignalingNaN = float128_is_signaling_nan( a );
bIsNaN = float128_is_nan( b );
bIsSignalingNaN = float128_is_signaling_nan( b );
a.high |= LIT64( 0x0000800000000000 );
b.high |= LIT64( 0x0000800000000000 );
if ( aIsSignalingNaN | bIsSignalingNaN ) float_raise( float_flag_invalid );
if ( aIsSignalingNaN ) {
if ( bIsSignalingNaN ) goto returnLargerSignificand;
return bIsNaN ? b : a;
}
else if ( aIsNaN ) {
if ( bIsSignalingNaN | ! bIsNaN ) return a;
returnLargerSignificand:
if ( lt128( a.high<<1, a.low, b.high<<1, b.low ) ) return b;
if ( lt128( b.high<<1, b.low, a.high<<1, a.low ) ) return a;
return ( a.high < b.high ) ? a : b;
}
else {
return b;
}
}
#endif
// NB060506: close namespaces
}
}

View File

@ -0,0 +1,336 @@
/*============================================================================
PROMINENT NOTICE: THIS IS A DERIVATIVE WORK OF THE ORIGINAL SOFTFLOAT CODE
CHANGES:
Comment out FLOAT128
Removed all signed char => char
Inserted this file is a namespace
Added variable to control the sending of real system traps
Protect this header by a #define
pack the fields of floatx80, just in case (should be useless)
Nicolas Brodu, 2006
=============================================================================*/
#ifndef SOFTFLOAT_H
#define SOFTFLOAT_H
#ifdef __cplusplus
extern "C" {
#endif
namespace streflop {
namespace SoftFloat {
// Control which of the softfloat exceptions will send real system traps
// Uses streflop FE_XXX flags, see the softfloat-specialize file
extern int float_exception_realtraps;
/*============================================================================
This C header file is part of the SoftFloat IEC/IEEE Floating-point Arithmetic
Package, Release 2b.
Written by John R. Hauser. This work was made possible in part by the
International Computer Science Institute, located at Suite 600, 1947 Center
Street, Berkeley, California 94704. Funding was partially provided by the
National Science Foundation under grant MIP-9311980. The original version
of this code was written as part of a project to build a fixed-point vector
processor in collaboration with the University of California at Berkeley,
overseen by Profs. Nelson Morgan and John Wawrzynek. More information
is available through the Web page `http://www.cs.berkeley.edu/~jhauser/
arithmetic/SoftFloat.html'.
THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort has
been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT TIMES
RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO PERSONS
AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ALL LOSSES,
COSTS, OR OTHER PROBLEMS THEY INCUR DUE TO THE SOFTWARE, AND WHO FURTHERMORE
EFFECTIVELY INDEMNIFY JOHN HAUSER AND THE INTERNATIONAL COMPUTER SCIENCE
INSTITUTE (possibly via similar legal warning) AGAINST ALL LOSSES, COSTS, OR
OTHER PROBLEMS INCURRED BY THEIR CUSTOMERS AND CLIENTS DUE TO THE SOFTWARE.
Derivative works are acceptable, even for commercial purposes, so long as
(1) the source code for the derivative work includes prominent notice that
the work is derivative, and (2) the source code includes prominent notice with
these four paragraphs for those parts of this code that are retained.
=============================================================================*/
/*----------------------------------------------------------------------------
| The macro `FLOATX80' must be defined to enable the extended double-precision
| floating-point format `floatx80'. If this macro is not defined, the
| `floatx80' type will not be defined, and none of the functions that either
| input or output the `floatx80' type will be defined. The same applies to
| the `FLOAT128' macro and the quadruple-precision format `float128'.
*----------------------------------------------------------------------------*/
#define FLOATX80
//#define FLOAT128
//typedef SizedInteger<32>::Type int32_t;
//typedef int32_t int32;
/*----------------------------------------------------------------------------
| Software IEC/IEEE floating-point types.
*----------------------------------------------------------------------------*/
typedef unsigned int float32;
typedef unsigned long long float64;
#ifdef FLOATX80
typedef struct {
unsigned long long low;
unsigned short high
#ifdef __GNUC__
// Should be useless, since it's aligned at 64 already
__attribute__ ((__packed__));
#endif
;
} floatx80;
#endif
#ifdef FLOAT128
typedef struct {
unsigned long long low, high;
} float128;
#endif
/*----------------------------------------------------------------------------
| Software IEC/IEEE floating-point underflow tininess-detection mode.
*----------------------------------------------------------------------------*/
extern char float_detect_tininess;
enum {
float_tininess_after_rounding = 0,
float_tininess_before_rounding = 1
};
/*----------------------------------------------------------------------------
| Software IEC/IEEE floating-point rounding mode.
*----------------------------------------------------------------------------*/
extern char float_rounding_mode;
enum {
float_round_nearest_even = 0,
float_round_down = 1,
float_round_up = 2,
float_round_to_zero = 3
};
/*----------------------------------------------------------------------------
| Software IEC/IEEE floating-point exception flags.
*----------------------------------------------------------------------------*/
extern char float_exception_flags;
enum {
float_flag_invalid = 1,
float_flag_divbyzero = 4,
float_flag_overflow = 8,
float_flag_underflow = 16,
float_flag_inexact = 32
};
/*----------------------------------------------------------------------------
| Routine to raise any or all of the software IEC/IEEE floating-point
| exception flags.
*----------------------------------------------------------------------------*/
void float_raise( char );
/*----------------------------------------------------------------------------
| Software IEC/IEEE integer-to-floating-point conversion routines.
*----------------------------------------------------------------------------*/
float32 int32_to_float32( int );
float64 int32_to_float64( int );
#ifdef FLOATX80
floatx80 int32_to_floatx80( int );
#endif
#ifdef FLOAT128
float128 int32_to_float128( int );
#endif
float32 int64_to_float32( long long );
float64 int64_to_float64( long long );
#ifdef FLOATX80
floatx80 int64_to_floatx80( long long );
#endif
#ifdef FLOAT128
float128 int64_to_float128( long long );
#endif
/*----------------------------------------------------------------------------
| Software IEC/IEEE single-precision conversion routines.
*----------------------------------------------------------------------------*/
int float32_to_int32( float32 );
//inline int float32_to_int32( unsigned int value) { return float32_to_int32( static_cast<float32>(value) ); }
int float32_to_int32_round_to_zero( float32 );
//inline int float32_to_int32_round_to_zero( unsigned int value) { return float32_to_int32_round_to_zero( static_cast<float32>(value) ); }
long long float32_to_int64( float32 );
//inline long long float32_to_int64( unsigned int value) { return float32_to_int64( static_cast<float32>(value) ); }
long long float32_to_int64_round_to_zero( float32 );
//inline long long float32_to_int64_round_to_zero( unsigned int value) { return float32_to_int64_round_to_zero(static_cast<float32>(value)); }
float64 float32_to_float64( float32 );
//inline float64 float32_to_float64( unsigned int value) { return float32_to_float64( static_cast<float32>(value) ); }
#ifdef FLOATX80
floatx80 float32_to_floatx80( float32 );
#endif
#ifdef FLOAT128
float128 float32_to_float128( float32 );
#endif
/*----------------------------------------------------------------------------
| Software IEC/IEEE single-precision operations.
*----------------------------------------------------------------------------*/
float32 float32_round_to_int( float32 );
//inline float32 float32_round_to_int( unsigned int value) { return float32_round_to_int( static_cast<float32>(value) ); }
float32 float32_add( float32, float32 );
//inline float32 float32_add( unsigned int value1, unsigned int value2) { return float32_add( static_cast<float32>(value1), static_cast<float32>(value2) ); }
float32 float32_sub( float32, float32 );
//inline float32 float32_sub( unsigned int value1, unsigned int value2) { return float32_sub( static_cast<float32>(value1), static_cast<float32>(value2) ); }
float32 float32_mul( float32, float32 );
//inline float32 float32_mul( unsigned int value1, unsigned int value2) { return float32_mul( static_cast<float32>(value1), static_cast<float32>(value2) ); }
float32 float32_div( float32, float32 );
//inline float32 float32_div( unsigned int value1, unsigned int value2) { return float32_div( static_cast<float32>(value1), static_cast<float32>(value2) ); }
float32 float32_rem( float32, float32 );
//inline float32 float32_rem( unsigned int value1, unsigned int value2) { return float32_rem( static_cast<float32>(value1), static_cast<float32>(value2) ); }
float32 float32_sqrt( float32 );
//inline float32 float32_sqrt( unsigned int value1) { return float32_sqrt( static_cast<float32>(value1) ); }
char float32_eq( float32, float32 );
//inline char float32_eq( unsigned int value1, unsigned int value2) { return float32_eq( static_cast<float32>(value1), static_cast<float32>(value2) ); }
char float32_le( float32, float32 );
//inline char float32_le( unsigned int value1, unsigned int value2) { return float32_le( static_cast<float32>(value1), static_cast<float32>(value2) ); }
char float32_lt( float32, float32 );
//inline char float32_lt( unsigned int value1, unsigned int value2) { return float32_lt( static_cast<float32>(value1), static_cast<float32>(value2) ); }
char float32_eq_signaling( float32, float32 );
//inline char float32_eq_signaling( unsigned int value1, unsigned int value2) { return float32_eq_signaling( static_cast<float32>(value1), static_cast<float32>(value2) ); }
char float32_le_quiet( float32, float32 );
//inline char float32_le_quiet( unsigned int value1, unsigned int value2) { return float32_le_quiet( static_cast<float32>(value1), static_cast<float32>(value2) ); }
char float32_lt_quiet( float32, float32 );
//inline char float32_lt_quiet( unsigned int value1, unsigned int value2) { return float32_lt_quiet( static_cast<float32>(value1), static_cast<float32>(value2) ); }
char float32_is_signaling_nan( float32 );
//inline char float32_is_signaling_nan( unsigned int value1) { return float32_is_signaling_nan( static_cast<float32>(value1) ); }
/*----------------------------------------------------------------------------
| Software IEC/IEEE double-precision conversion routines.
*----------------------------------------------------------------------------*/
int float64_to_int32( float64 );
int float64_to_int32_round_to_zero( float64 );
long long float64_to_int64( float64 );
long long float64_to_int64_round_to_zero( float64 );
float32 float64_to_float32( float64 );
#ifdef FLOATX80
floatx80 float64_to_floatx80( float64 );
#endif
#ifdef FLOAT128
float128 float64_to_float128( float64 );
#endif
/*----------------------------------------------------------------------------
| Software IEC/IEEE double-precision operations.
*----------------------------------------------------------------------------*/
float64 float64_round_to_int( float64 );
float64 float64_add( float64, float64 );
float64 float64_sub( float64, float64 );
float64 float64_mul( float64, float64 );
float64 float64_div( float64, float64 );
float64 float64_rem( float64, float64 );
float64 float64_sqrt( float64 );
char float64_eq( float64, float64 );
char float64_le( float64, float64 );
char float64_lt( float64, float64 );
char float64_eq_signaling( float64, float64 );
char float64_le_quiet( float64, float64 );
char float64_lt_quiet( float64, float64 );
char float64_is_signaling_nan( float64 );
#ifdef FLOATX80
/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision conversion routines.
*----------------------------------------------------------------------------*/
int floatx80_to_int32( floatx80 );
int floatx80_to_int32_round_to_zero( floatx80 );
long long floatx80_to_int64( floatx80 );
long long floatx80_to_int64_round_to_zero( floatx80 );
float32 floatx80_to_float32( floatx80 );
float64 floatx80_to_float64( floatx80 );
#ifdef FLOAT128
float128 floatx80_to_float128( floatx80 );
#endif
/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision rounding precision. Valid
| values are 32, 64, and 80.
*----------------------------------------------------------------------------*/
extern char floatx80_rounding_precision;
/*----------------------------------------------------------------------------
| Software IEC/IEEE extended double-precision operations.
*----------------------------------------------------------------------------*/
floatx80 floatx80_round_to_int( floatx80 );
floatx80 floatx80_add( floatx80, floatx80 );
floatx80 floatx80_sub( floatx80, floatx80 );
floatx80 floatx80_mul( floatx80, floatx80 );
floatx80 floatx80_div( floatx80, floatx80 );
floatx80 floatx80_rem( floatx80, floatx80 );
floatx80 floatx80_sqrt( floatx80 );
char floatx80_eq( floatx80, floatx80 );
char floatx80_le( floatx80, floatx80 );
char floatx80_lt( floatx80, floatx80 );
char floatx80_eq_signaling( floatx80, floatx80 );
char floatx80_le_quiet( floatx80, floatx80 );
char floatx80_lt_quiet( floatx80, floatx80 );
char floatx80_is_signaling_nan( floatx80 );
#endif
#ifdef FLOAT128
/*----------------------------------------------------------------------------
| Software IEC/IEEE quadruple-precision conversion routines.
*----------------------------------------------------------------------------*/
int float128_to_int32( float128 );
int float128_to_int32_round_to_zero( float128 );
long long float128_to_int64( float128 );
long long float128_to_int64_round_to_zero( float128 );
float32 float128_to_float32( float128 );
float64 float128_to_float64( float128 );
#ifdef FLOATX80
floatx80 float128_to_floatx80( float128 );
#endif
/*----------------------------------------------------------------------------
| Software IEC/IEEE quadruple-precision operations.
*----------------------------------------------------------------------------*/
float128 float128_round_to_int( float128 );
float128 float128_add( float128, float128 );
float128 float128_sub( float128, float128 );
float128 float128_mul( float128, float128 );
float128 float128_div( float128, float128 );
float128 float128_rem( float128, float128 );
float128 float128_sqrt( float128 );
char float128_eq( float128, float128 );
char float128_le( float128, float128 );
char float128_lt( float128, float128 );
char float128_eq_signaling( float128, float128 );
char float128_le_quiet( float128, float128 );
char float128_lt_quiet( float128, float128 );
char float128_is_signaling_nan( float128 );
#endif
// Close namespaces
}
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif

View File

@ -0,0 +1,500 @@
/*
streflop: STandalone REproducible FLOating-Point
Nicolas Brodu, 2006
Code released according to the GNU Lesser General Public License
Heavily relies on GNU Libm, itself depending on netlib fplibm, GNU MP, and IBM MP lib.
Uses SoftFloat too.
Please read the history and copyright information in the documentation provided with the source code
*/
#ifdef STREFLOP_SOFT
// Include generic version
#include "streflop.h"
// Macro to select the correct version of a softfloat function according to user flags
#if N_SPECIALIZED == 96
#define SF_PREPEND(func) floatx80 ## func
#define SF_APPEND(func) func ## floatx80
#define SF_TYPE floatx80
#elif N_SPECIALIZED == 64
#define SF_PREPEND(func) float64 ## func
#define SF_APPEND(func) func ## float64
#define SF_TYPE float64
#elif N_SPECIALIZED == 32
#define SF_PREPEND(func) float32 ## func
#define SF_APPEND(func) func ## float32
#define SF_TYPE float32
#else
#error Unknown specialization size (N_SPECIALIZED)
#endif
// This file may include System.h and SoftFloat
#include "System.h"
#include "softfloat/softfloat.h"
namespace streflop {
using namespace streflop::SoftFloat;
// The template instanciations for N = 4, 8, 10 are done here
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator+=(const SoftFloatWrapper<N_SPECIALIZED>& f) {
value<SF_TYPE>() = SF_PREPEND(_add)(value<SF_TYPE>(), f.value<SF_TYPE>());
return *this;
}
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator-=(const SoftFloatWrapper<N_SPECIALIZED>& f) {
value<SF_TYPE>() = SF_PREPEND(_sub)(value<SF_TYPE>(), f.value<SF_TYPE>());
return *this;
}
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator*=(const SoftFloatWrapper<N_SPECIALIZED>& f) {
value<SF_TYPE>() = SF_PREPEND(_mul)(value<SF_TYPE>(), f.value<SF_TYPE>());
return *this;
}
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator/=(const SoftFloatWrapper<N_SPECIALIZED>& f) {
value<SF_TYPE>() = SF_PREPEND(_div)(value<SF_TYPE>(), f.value<SF_TYPE>());
return *this;
}
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator==(const SoftFloatWrapper<N_SPECIALIZED>& f) const {
return SF_PREPEND(_eq)(value<SF_TYPE>(), f.value<SF_TYPE>());
}
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator!=(const SoftFloatWrapper<N_SPECIALIZED>& f) const {
// Boolean negation is OK for equality comparison
return !SF_PREPEND(_eq)(value<SF_TYPE>(), f.value<SF_TYPE>());
}
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator<(const SoftFloatWrapper<N_SPECIALIZED>& f) const {
return SF_PREPEND(_lt)(value<SF_TYPE>(), f.value<SF_TYPE>());
}
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator<=(const SoftFloatWrapper<N_SPECIALIZED>& f) const {
return SF_PREPEND(_le)(value<SF_TYPE>(), f.value<SF_TYPE>());
}
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator>(const SoftFloatWrapper<N_SPECIALIZED>& f) const {
// Take care of NaN, reverse arguments and do NOT take the boolean negation of <=
return SF_PREPEND(_lt)(f.value<SF_TYPE>(), value<SF_TYPE>());
}
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator>=(const SoftFloatWrapper<N_SPECIALIZED>& f) const {
// Take care of NaN, reverse arguments and do NOT take the boolean negation of <
return SF_PREPEND(_le)(f.value<SF_TYPE>(), value<SF_TYPE>());
}
// Use compile-time specialization with template meta-programming
// instead of macros to decide on the correct softfloat function
// => sizeof is useable
// Note: To avoid duplicate symbols, insert a third template argument corresponding to N_SPECIALIZED
// This is consistent with the use of SF_XXPEND macros
template<int N, typename T, bool is_large> struct IntConverter {
};
// Specialization for large ints > 32 bits
template<typename T> struct IntConverter<N_SPECIALIZED, T, true> {
static inline SF_TYPE
convert_from_int(T an_int) {
return SF_APPEND(int64_to_)((int64_t)an_int);
}
static inline T convert_to_int(SF_TYPE value) {
return (T)SF_PREPEND(_to_int64_round_to_zero)(value);
}
};
// Specialization for ints <= 32 bits
template<typename T> struct IntConverter<N_SPECIALIZED, T, false> {
static inline SF_TYPE
convert_from_int(T an_int) {
return SF_APPEND(int32_to_)((int32_t)an_int);
}
static inline T convert_to_int(SF_TYPE value) {
return (T)SF_PREPEND(_to_int32_round_to_zero)(value);
}
};
#define STREFLOP_X87DENORMAL_NATIVE_OPS_INT(native_type) \
template<> SoftFloatWrapper<N_SPECIALIZED>::SoftFloatWrapper(const native_type f) { \
value<SF_TYPE>() = IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator=(const native_type f) { \
value<SF_TYPE>() = IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>::operator native_type() const { \
return IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_to_int(value<SF_TYPE>()); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator+=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_add)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator-=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_sub)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator*=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_mul)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator/=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_div)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
return *this; \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator==(const native_type f) const { \
return SF_PREPEND(_eq)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator!=(const native_type f) const { \
return !SF_PREPEND(_eq)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator<(const native_type f) const { \
return SF_PREPEND(_lt)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator<=(const native_type f) const { \
return SF_PREPEND(_le)(value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator>(const native_type f) const { \
return SF_PREPEND(_lt)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f), value<SF_TYPE>()); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator>=(const native_type f) const { \
return SF_PREPEND(_le)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f), value<SF_TYPE>()); \
}
// Now handle the same operations with native float types
// Use the softfloat property of memory pattern equivalence.
// => consider the float as a memory zone, then pass that to the softfloat conversion routines
// => this way, conversion is done by softfloat, not by the FPU
// Use a sizeof trick:
// - Specialize for BOTH C type and the expected type size of C type for correct memory pattern
// - Call the template with sizeof(C type) to rule out mismatching combinations
// - this way, it would be possible to extend the scheme to other architectures
// Ex: could specialize for <long double, 10>, <long double, 12> and <long double, 16>
// Note: read above note for specialization on N_SPECIALIZED
template<int N, typename ctype, int ctype_size> struct FloatConverter {
};
// dummy wrapers to cover all cases
inline float32 float32_to_float32(float32 a_float) {return a_float;}
inline float64 float64_to_float64(float64 a_float) {return a_float;}
inline floatx80 floatx80_to_floatx80(floatx80 a_float) {return a_float;}
// Specialization for float32 when C float type size is 4
template<> struct FloatConverter<N_SPECIALIZED, float, 4> {
static inline SF_TYPE
convert_from_float(const float a_float) {
return SF_APPEND(float32_to_)(*reinterpret_cast<const float32*>(&a_float));
}
static inline float convert_to_float(SF_TYPE value) {
float32 res = SF_PREPEND(_to_float32)(value);
return *reinterpret_cast<float*>(&res);
}
};
// Specialization for double64 when C double type size is 8
template<> struct FloatConverter<N_SPECIALIZED, double, 8> {
static inline SF_TYPE
convert_from_float(const double a_float) {
return SF_APPEND(float64_to_)(*reinterpret_cast<const float64*>(&a_float));
}
static inline double convert_to_float(SF_TYPE value) {
float64 res = SF_PREPEND(_to_float64)(value);
return *reinterpret_cast<double*>(&res);
}
};
// Specialization for floatx80 when C long double type size is 12 (there is 16 bit padding, endian dependent)
template<> struct FloatConverter<N_SPECIALIZED, long double, 12> {
// Little endian OK: both address are the same
#if __FLOAT_WORD_ORDER == 1234
static inline SF_TYPE
convert_from_float(const long double a_float) {
return SF_APPEND(floatx80_to_)(*reinterpret_cast<const floatx80*>(&a_float));
}
static inline long double convert_to_float(SF_TYPE value) {
// avoid invalid memory access: must return a 12-bytes value from a 10-byte type
// do it this way, by declaring the 12-byte on the stack
long double holder;
// And use that space for the result using the softfloat memory bit pattern equivalence property
*reinterpret_cast<floatx80*>(&holder) = SF_PREPEND(_to_floatx80)(value);
return holder;
}
// big endian needs address modification, but for what architecture?
#elif __FLOAT_WORD_ORDER == 4321
#warning You are using a completely UNTESTED new architecture. Please check that the 12-byte long double containing a 10-byte float is properly aligned in memory so that softfloat may correctly read the bit pattern. If this works for you, remove this warning and please consider sending a patch!
static inline SF_TYPE
convert_from_float(const long double a_float) {
return SF_APPEND(floatx80_to_)(*reinterpret_cast<const floatx80*>(reinterpret_cast<const char*>(&a_float)+2));
}
static inline long double convert_to_float(SF_TYPE value) {
// avoid invalid memory access: must return a 12-bytes value from a 10-byte type
// do it this way, by declaring the 12-byte on the stack
long double holder;
// And use that space for the result using the softfloat memory bit pattern equivalence property
*reinterpret_cast<floatx80*>(reinterpret_cast<const char*>(&holder)+2) = SF_PREPEND(_to_floatx80)(value);
return holder;
}
#else
#error Unknown byte order
#endif
};
// Specialization for floatx80 when C long double type size is 16. This is the case for g++ using -m128bit-long-double, which is itself the default on x86_64
template<> struct FloatConverter<N_SPECIALIZED, long double, 16> {
// Little endian OK: both address are the same
#if __FLOAT_WORD_ORDER == 1234
static inline SF_TYPE
convert_from_float(const long double a_float) {
return SF_APPEND(floatx80_to_)(*reinterpret_cast<const floatx80*>(&a_float));
}
static inline long double convert_to_float(SF_TYPE value) {
// avoid invalid memory access: must return a 16-bytes value from a 10-byte type
// do it this way, by declaring the 16-byte on the stack
long double holder;
// And use that space for the result using the softfloat memory bit pattern equivalence property
*reinterpret_cast<floatx80*>(&holder) = SF_PREPEND(_to_floatx80)(value);
return holder;
}
// big endian needs address modification, but for what architecture?
#elif __FLOAT_WORD_ORDER == 4321
#warning You are using a completely UNTESTED new architecture. Please check that the 16-byte long double containing a 10-byte float is properly aligned in memory so that softfloat may correctly read the bit pattern. If this works for you, remove this warning and please consider sending a patch!
static inline SF_TYPE
convert_from_float(const long double a_float) {
return SF_APPEND(floatx80_to_)(*reinterpret_cast<const floatx80*>(reinterpret_cast<const char*>(&a_float)+6));
}
static inline long double convert_to_float(SF_TYPE value) {
// avoid invalid memory access: must return a 12-bytes value from a 10-byte type
// do it this way, by declaring the 12-byte on the stack
long double holder;
// And use that space for the result using the softfloat memory bit pattern equivalence property
*reinterpret_cast<floatx80*>(reinterpret_cast<const char*>(&holder)+6) = SF_PREPEND(_to_floatx80)(value);
return holder;
}
#else
#error Unknown byte order
#endif
};
#define STREFLOP_X87DENORMAL_NATIVE_OPS_FLOAT(native_type) \
template<> SoftFloatWrapper<N_SPECIALIZED>::SoftFloatWrapper(const native_type f) { \
value<SF_TYPE>() = FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator=(const native_type f) { \
value<SF_TYPE>() = FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>::operator native_type() const { \
return FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_to_float(value<SF_TYPE>()); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator+=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_add)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator-=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_sub)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator*=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_mul)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
return *this; \
} \
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator/=(const native_type f) { \
value<SF_TYPE>() = SF_PREPEND(_div)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
return *this; \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator==(const native_type f) const { \
return SF_PREPEND(_eq)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator!=(const native_type f) const { \
return !SF_PREPEND(_eq)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator<(const native_type f) const { \
return SF_PREPEND(_lt)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator<=(const native_type f) const { \
return SF_PREPEND(_le)(value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f)); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator>(const native_type f) const { \
return SF_PREPEND(_lt)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f), value<SF_TYPE>()); \
} \
template<> bool SoftFloatWrapper<N_SPECIALIZED>::operator>=(const native_type f) const { \
return SF_PREPEND(_le)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f), value<SF_TYPE>()); \
}
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(char)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(unsigned char)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(short)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(unsigned short)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(int)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(unsigned int)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(long)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(unsigned long)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(long long)
STREFLOP_X87DENORMAL_NATIVE_OPS_INT(unsigned long long)
STREFLOP_X87DENORMAL_NATIVE_OPS_FLOAT(float)
STREFLOP_X87DENORMAL_NATIVE_OPS_FLOAT(double)
STREFLOP_X87DENORMAL_NATIVE_OPS_FLOAT(long double)
/// binary operators
/// use dummy argument factories to distinguish from integer conversion and avoid creating temporary object
template<> SoftFloatWrapper<N_SPECIALIZED> operator+(const SoftFloatWrapper<N_SPECIALIZED>& f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) {
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_add)(f1.value<SF_TYPE>(), f2.value<SF_TYPE>()), true);
}
template<> SoftFloatWrapper<N_SPECIALIZED> operator-(const SoftFloatWrapper<N_SPECIALIZED>& f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) {
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_sub)(f1.value<SF_TYPE>(), f2.value<SF_TYPE>()), true);
}
template<> SoftFloatWrapper<N_SPECIALIZED> operator*(const SoftFloatWrapper<N_SPECIALIZED>& f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) {
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_mul)(f1.value<SF_TYPE>(), f2.value<SF_TYPE>()), true);
}
template<> SoftFloatWrapper<N_SPECIALIZED> operator/(const SoftFloatWrapper<N_SPECIALIZED>& f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) {
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_div)(f1.value<SF_TYPE>(), f2.value<SF_TYPE>()), true);
}
#define STREFLOP_X87DENORMAL_BINARY_OPS_INT(native_type) \
template<> SoftFloatWrapper<N_SPECIALIZED> operator+(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_add)(f1.value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator-(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_sub)(f1.value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator*(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_mul)(f1.value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator/(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_div)(f1.value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator+(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_add)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f1), f2.value<SF_TYPE>()), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator-(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_sub)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f1), f2.value<SF_TYPE>()), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator*(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_mul)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f1), f2.value<SF_TYPE>()), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator/(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_div)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(f1), f2.value<SF_TYPE>()), true); \
} \
template<> bool operator==(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_eq)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(value), f.value<SF_TYPE>()); \
} \
template<> bool operator!=(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return !SF_PREPEND(_eq)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(value), f.value<SF_TYPE>()); \
} \
template<> bool operator<(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_lt)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(value), f.value<SF_TYPE>()); \
} \
template<> bool operator<=(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_le)(IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(value), f.value<SF_TYPE>()); \
} \
template<> bool operator>(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_lt)(f.value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(value)); \
} \
template<> bool operator>=(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_le)(f.value<SF_TYPE>(), IntConverter< N_SPECIALIZED, native_type, (sizeof(native_type)>4) >::convert_from_int(value)); \
}
#define STREFLOP_X87DENORMAL_BINARY_OPS_FLOAT(native_type) \
template<> SoftFloatWrapper<N_SPECIALIZED> operator+(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_add)(f1.value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator-(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_sub)(f1.value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator*(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_mul)(f1.value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator/(const SoftFloatWrapper<N_SPECIALIZED>& f1, const native_type f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_div)(f1.value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f2)), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator+(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_add)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f1), f2.value<SF_TYPE>()), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator-(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_sub)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f1), f2.value<SF_TYPE>()), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator*(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_mul)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f1), f2.value<SF_TYPE>()), true); \
} \
template<> SoftFloatWrapper<N_SPECIALIZED> operator/(const native_type f1, const SoftFloatWrapper<N_SPECIALIZED>& f2) { \
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_div)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(f1), f2.value<SF_TYPE>()), true); \
} \
template<> bool operator==(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_eq)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(value), f.value<SF_TYPE>()); \
} \
template<> bool operator!=(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return !SF_PREPEND(_eq)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(value), f.value<SF_TYPE>()); \
} \
template<> bool operator<(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_lt)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(value), f.value<SF_TYPE>()); \
} \
template<> bool operator<=(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_le)(FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(value), f.value<SF_TYPE>()); \
} \
template<> bool operator>(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_lt)(f.value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(value)); \
} \
template<> bool operator>=(const native_type value, const SoftFloatWrapper<N_SPECIALIZED>& f) { \
return SF_PREPEND(_le)(f.value<SF_TYPE>(), FloatConverter< N_SPECIALIZED, native_type, sizeof(native_type)>::convert_from_float(value)); \
}
STREFLOP_X87DENORMAL_BINARY_OPS_INT(char)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(unsigned char)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(short)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(unsigned short)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(int)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(unsigned int)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(long)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(unsigned long)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(long long)
STREFLOP_X87DENORMAL_BINARY_OPS_INT(unsigned long long)
STREFLOP_X87DENORMAL_BINARY_OPS_FLOAT(float)
STREFLOP_X87DENORMAL_BINARY_OPS_FLOAT(double)
STREFLOP_X87DENORMAL_BINARY_OPS_FLOAT(long double)
/// Unary operators
template<> SoftFloatWrapper<N_SPECIALIZED> operator-(const SoftFloatWrapper<N_SPECIALIZED>& f) {
// We could do it right here by flipping the bit sign
// However, there is the exceptions handling and such, so...
return SoftFloatWrapper<N_SPECIALIZED>(SF_PREPEND(_sub)(SF_APPEND(int32_to_)(0), f.value<SF_TYPE>()), true);
}
template<> SoftFloatWrapper<N_SPECIALIZED> operator+(const SoftFloatWrapper<N_SPECIALIZED>& f) {
return f; // makes a copy
}
template<> SoftFloatWrapper<N_SPECIALIZED>::SoftFloatWrapper(const SoftFloatWrapper<32>& f) {
value<SF_TYPE>() = SF_APPEND(float32_to_)(f.value<float32>());
}
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator=(const SoftFloatWrapper<32>& f) {
value<SF_TYPE>() = SF_APPEND(float32_to_)(f.value<float32>());
return *this;
}
template<> SoftFloatWrapper<N_SPECIALIZED>::SoftFloatWrapper(const SoftFloatWrapper<64>& f) {
value<SF_TYPE>() = SF_APPEND(float64_to_)(f.value<float64>());
}
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator=(const SoftFloatWrapper<64>& f) {
value<SF_TYPE>() = SF_APPEND(float64_to_)(f.value<float64>());
return *this;
}
template<> SoftFloatWrapper<N_SPECIALIZED>::SoftFloatWrapper(const SoftFloatWrapper<96>& f) {
value<SF_TYPE>() = SF_APPEND(floatx80_to_)(f.value<floatx80>());
}
template<> SoftFloatWrapper<N_SPECIALIZED>& SoftFloatWrapper<N_SPECIALIZED>::operator=(const SoftFloatWrapper<96>& f) {
value<SF_TYPE>() = SF_APPEND(floatx80_to_)(f.value<floatx80>());
return *this;
}
} // end of namespace
#endif

File diff suppressed because it is too large Load Diff