blob: f885d96e9cd73f2f8dfcd4ec92a9831f313c42c0 [file] [log] [blame]
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary to decimal floating point conversion.
// Algorithm:
// 1) store mantissa in multiprecision decimal
// 2) shift decimal by exponent
// 3) read digits out & format
package strconv
import "math"
// TODO: move elsewhere?
type floatInfo struct {
mantbits uint
expbits uint
bias int
}
var float32info = floatInfo{23, 8, -127}
var float64info = floatInfo{52, 11, -1023}
// FormatFloat converts the floating-point number f to a string,
// according to the format fmt and precision prec. It rounds the
// result assuming that the original was obtained from a floating-point
// value of bitSize bits (32 for float32, 64 for float64).
//
// The format fmt is one of
// 'b' (-ddddp±ddd, a binary exponent),
// 'e' (-d.dddde±dd, a decimal exponent),
// 'E' (-d.ddddE±dd, a decimal exponent),
// 'f' (-ddd.dddd, no exponent),
// 'g' ('e' for large exponents, 'f' otherwise), or
// 'G' ('E' for large exponents, 'f' otherwise).
//
// The precision prec controls the number of digits
// (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats.
// For 'e', 'E', and 'f' it is the number of digits after the decimal point.
// For 'g' and 'G' it is the total number of digits.
// The special precision -1 uses the smallest number of digits
// necessary such that ParseFloat will return f exactly.
func FormatFloat(f float64, fmt byte, prec, bitSize int) string {
return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize))
}
// AppendFloat appends the string form of the floating-point number f,
// as generated by FormatFloat, to dst and returns the extended buffer.
func AppendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte {
return genericFtoa(dst, f, fmt, prec, bitSize)
}
func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte {
var bits uint64
var flt *floatInfo
switch bitSize {
case 32:
bits = uint64(math.Float32bits(float32(val)))
flt = &float32info
case 64:
bits = math.Float64bits(val)
flt = &float64info
default:
panic("strconv: illegal AppendFloat/FormatFloat bitSize")
}
neg := bits>>(flt.expbits+flt.mantbits) != 0
exp := int(bits>>flt.mantbits) & (1<<flt.expbits - 1)
mant := bits & (uint64(1)<<flt.mantbits - 1)
switch exp {
case 1<<flt.expbits - 1:
// Inf, NaN
var s string
switch {
case mant != 0:
s = "NaN"
case neg:
s = "-Inf"
default:
s = "+Inf"
}
return append(dst, s...)
case 0:
// denormalized
exp++
default:
// add implicit top bit
mant |= uint64(1) << flt.mantbits
}
exp += flt.bias
// Pick off easy binary format.
if fmt == 'b' {
return fmtB(dst, neg, mant, exp, flt)
}
if !optimize {
return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
}
var digs decimalSlice
ok := false
// Negative precision means "only as much as needed to be exact."
shortest := prec < 0
if shortest {
// Try Grisu3 algorithm.
f := new(extFloat)
lower, upper := f.AssignComputeBounds(mant, exp, neg, flt)
var buf [32]byte
digs.d = buf[:]
ok = f.ShortestDecimal(&digs, &lower, &upper)
if !ok {
return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
}
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = max(digs.nd-1, 0)
case 'f':
prec = max(digs.nd-digs.dp, 0)
case 'g', 'G':
prec = digs.nd
}
} else if fmt != 'f' {
// Fixed number of digits.
digits := prec
switch fmt {
case 'e', 'E':
digits++
case 'g', 'G':
if prec == 0 {
prec = 1
}
digits = prec
}
if digits <= 15 {
// try fast algorithm when the number of digits is reasonable.
var buf [24]byte
digs.d = buf[:]
f := extFloat{mant, exp - int(flt.mantbits), neg}
ok = f.FixedDecimal(&digs, digits)
}
}
if !ok {
return bigFtoa(dst, prec, fmt, neg, mant, exp, flt)
}
return formatDigits(dst, shortest, neg, digs, prec, fmt)
}
// bigFtoa uses multiprecision computations to format a float.
func bigFtoa(dst []byte, prec int, fmt byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
d := new(decimal)
d.Assign(mant)
d.Shift(exp - int(flt.mantbits))
var digs decimalSlice
shortest := prec < 0
if shortest {
roundShortest(d, mant, exp, flt)
digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
// Precision for shortest representation mode.
switch fmt {
case 'e', 'E':
prec = digs.nd - 1
case 'f':
prec = max(digs.nd-digs.dp, 0)
case 'g', 'G':
prec = digs.nd
}
} else {
// Round appropriately.
switch fmt {
case 'e', 'E':
d.Round(prec + 1)
case 'f':
d.Round(d.dp + prec)
case 'g', 'G':
if prec == 0 {
prec = 1
}
d.Round(prec)
}
digs = decimalSlice{d: d.d[:], nd: d.nd, dp: d.dp}
}
return formatDigits(dst, shortest, neg, digs, prec, fmt)
}
func formatDigits(dst []byte, shortest bool, neg bool, digs decimalSlice, prec int, fmt byte) []byte {
switch fmt {
case 'e', 'E':
return fmtE(dst, neg, digs, prec, fmt)
case 'f':
return fmtF(dst, neg, digs, prec)
case 'g', 'G':
// trailing fractional zeros in 'e' form will be trimmed.
eprec := prec
if eprec > digs.nd && digs.nd >= digs.dp {
eprec = digs.nd
}
// %e is used if the exponent from the conversion
// is less than -4 or greater than or equal to the precision.
// if precision was the shortest possible, use precision 6 for this decision.
if shortest {
eprec = 6
}
exp := digs.dp - 1
if exp < -4 || exp >= eprec {
if prec > digs.nd {
prec = digs.nd
}
return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g')
}
if prec > digs.dp {
prec = digs.nd
}
return fmtF(dst, neg, digs, max(prec-digs.dp, 0))
}
// unknown format
return append(dst, '%', fmt)
}
// Round d (= mant * 2^exp) to the shortest number of digits
// that will let the original floating point value be precisely
// reconstructed. Size is original floating point size (64 or 32).
func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) {
// If mantissa is zero, the number is zero; stop now.
if mant == 0 {
d.nd = 0
return
}
// Compute upper and lower such that any decimal number
// between upper and lower (possibly inclusive)
// will round to the original floating point number.
// We may see at once that the number is already shortest.
//
// Suppose d is not denormal, so that 2^exp <= d < 10^dp.
// The closest shorter number is at least 10^(dp-nd) away.
// The lower/upper bounds computed below are at distance
// at most 2^(exp-mantbits).
//
// So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits),
// or equivalently log2(10)*(dp-nd) > exp-mantbits.
// It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32).
minexp := flt.bias + 1 // minimum possible exponent
if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) {
// The number is already shortest.
return
}
// d = mant << (exp - mantbits)
// Next highest floating point number is mant+1 << exp-mantbits.
// Our upper bound is halfway between, mant*2+1 << exp-mantbits-1.
upper := new(decimal)
upper.Assign(mant*2 + 1)
upper.Shift(exp - int(flt.mantbits) - 1)
// d = mant << (exp - mantbits)
// Next lowest floating point number is mant-1 << exp-mantbits,
// unless mant-1 drops the significant bit and exp is not the minimum exp,
// in which case the next lowest is mant*2-1 << exp-mantbits-1.
// Either way, call it mantlo << explo-mantbits.
// Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1.
var mantlo uint64
var explo int
if mant > 1<<flt.mantbits || exp == minexp {
mantlo = mant - 1
explo = exp
} else {
mantlo = mant*2 - 1
explo = exp - 1
}
lower := new(decimal)
lower.Assign(mantlo*2 + 1)
lower.Shift(explo - int(flt.mantbits) - 1)
// The upper and lower bounds are possible outputs only if
// the original mantissa is even, so that IEEE round-to-even
// would round to the original mantissa and not the neighbors.
inclusive := mant%2 == 0
// Now we can figure out the minimum number of digits required.
// Walk along until d has distinguished itself from upper and lower.
for i := 0; i < d.nd; i++ {
var l, m, u byte // lower, middle, upper digits
if i < lower.nd {
l = lower.d[i]
} else {
l = '0'
}
m = d.d[i]
if i < upper.nd {
u = upper.d[i]
} else {
u = '0'
}
// Okay to round down (truncate) if lower has a different digit
// or if lower is inclusive and is exactly the result of rounding down.
okdown := l != m || (inclusive && l == m && i+1 == lower.nd)
// Okay to round up if upper has a different digit and
// either upper is inclusive or upper is bigger than the result of rounding up.
okup := m != u && (inclusive || m+1 < u || i+1 < upper.nd)
// If it's okay to do either, then round to the nearest one.
// If it's okay to do only one, do it.
switch {
case okdown && okup:
d.Round(i + 1)
return
case okdown:
d.RoundDown(i + 1)
return
case okup:
d.RoundUp(i + 1)
return
}
}
}
type decimalSlice struct {
d []byte
nd, dp int
neg bool
}
// %e: -d.ddddde±dd
func fmtE(dst []byte, neg bool, d decimalSlice, prec int, fmt byte) []byte {
// sign
if neg {
dst = append(dst, '-')
}
// first digit
ch := byte('0')
if d.nd != 0 {
ch = d.d[0]
}
dst = append(dst, ch)
// .moredigits
if prec > 0 {
dst = append(dst, '.')
i := 1
m := min(d.nd, prec+1)
if i < m {
dst = append(dst, d.d[i:m]...)
i = m
}
for ; i <= prec; i++ {
dst = append(dst, '0')
}
}
// e±
dst = append(dst, fmt)
exp := d.dp - 1
if d.nd == 0 { // special case: 0 has exponent 0
exp = 0
}
if exp < 0 {
ch = '-'
exp = -exp
} else {
ch = '+'
}
dst = append(dst, ch)
// dd or ddd
switch {
case exp < 10:
dst = append(dst, '0', byte(exp)+'0')
case exp < 100:
dst = append(dst, byte(exp/10)+'0', byte(exp%10)+'0')
default:
dst = append(dst, byte(exp/100)+'0', byte(exp/10)%10+'0', byte(exp%10)+'0')
}
return dst
}
// %f: -ddddddd.ddddd
func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte {
// sign
if neg {
dst = append(dst, '-')
}
// integer, padded with zeros as needed.
if d.dp > 0 {
m := min(d.nd, d.dp)
dst = append(dst, d.d[:m]...)
for ; m < d.dp; m++ {
dst = append(dst, '0')
}
} else {
dst = append(dst, '0')
}
// fraction
if prec > 0 {
dst = append(dst, '.')
for i := 0; i < prec; i++ {
ch := byte('0')
if j := d.dp + i; 0 <= j && j < d.nd {
ch = d.d[j]
}
dst = append(dst, ch)
}
}
return dst
}
// %b: -ddddddddp+ddd
func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte {
var buf [50]byte
w := len(buf)
exp -= int(flt.mantbits)
esign := byte('+')
if exp < 0 {
esign = '-'
exp = -exp
}
n := 0
for exp > 0 || n < 1 {
n++
w--
buf[w] = byte(exp%10 + '0')
exp /= 10
}
w--
buf[w] = esign
w--
buf[w] = 'p'
n = 0
for mant > 0 || n < 1 {
n++
w--
buf[w] = byte(mant%10 + '0')
mant /= 10
}
if neg {
w--
buf[w] = '-'
}
return append(dst, buf[w:]...)
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}