// NewHeatMap creates as new heat map plotter for the given data, // using the provided palette. If g has Min and Max methods that return // a float, those returned values are used to set the respective HeatMap // fields. func NewHeatMap(g GridXYZ, p palette.Palette) *HeatMap { var min, max float64 type minMaxer interface { Min() float64 Max() float64 } switch g := g.(type) { case minMaxer: min, max = g.Min(), g.Max() default: min, max = math.Inf(1), math.Inf(-1) c, r := g.Dims() for i := 0; i < c; i++ { for j := 0; j < r; j++ { v := g.Z(i, j) if math.IsNaN(v) { continue } min = math.Min(min, v) max = math.Max(max, v) } } } return &HeatMap{ GridXYZ: g, Palette: p, Min: min, Max: max, } }
// TestFloatCmpSpecialValues tests that Cmp produces the correct results for // combinations of zero (±0), finite (±1 and ±2.71828), and infinite (±Inf) // operands. func TestFloatCmpSpecialValues(t *testing.T) { zero := 0.0 args := []float64{math.Inf(-1), -2.71828, -1, -zero, zero, 1, 2.71828, math.Inf(1)} xx := new(Float) yy := new(Float) for i := 0; i < 4; i++ { for _, x := range args { xx.SetFloat64(x) // check conversion is correct // (no need to do this for y, since we see exactly the // same values there) if got, acc := xx.Float64(); got != x || acc != Exact { t.Errorf("Float(%g) == %g (%s)", x, got, acc) } for _, y := range args { yy.SetFloat64(y) got := xx.Cmp(yy) want := 0 switch { case x < y: want = -1 case x > y: want = +1 } if got != want { t.Errorf("(%g).Cmp(%g) = %s; want %s", x, y, got, want) } } } } }
// Train computes and stores the bin values // for the training instances. func (b *BinningFilter) Train() error { as := b.getAttributeSpecs() // Set up the AttributeSpecs, and values for attr := range b.attrs { if !b.attrs[attr] { continue } b.minVals[attr] = float64(math.Inf(1)) b.maxVals[attr] = float64(math.Inf(-1)) } err := b.train.MapOverRows(as, func(row [][]byte, rowNo int) (bool, error) { for i, a := range row { attr := as[i].GetAttribute() attrf := attr.(*base.FloatAttribute) val := float64(attrf.GetFloatFromSysVal(a)) if val > b.maxVals[attr] { b.maxVals[attr] = val } if val < b.minVals[attr] { b.minVals[attr] = val } } return true, nil }) if err != nil { return fmt.Errorf("Training error: %s", err) } b.trained = true return nil }
// generateValidatedLengthExample generates a random size array of examples based on what's given. func (eg *exampleGenerator) generateValidatedLengthExample() interface{} { minlength, maxlength := math.Inf(1), math.Inf(-1) if eg.a.Validation != nil { if eg.a.Validation.MinLength != nil { minlength = float64(*eg.a.Validation.MinLength) } if eg.a.Validation.MaxLength != nil { minlength = float64(*eg.a.Validation.MaxLength) } } count := 0 if math.IsInf(minlength, 1) { count = int(maxlength) - (eg.r.Int() % 3) } else if math.IsInf(maxlength, -1) { count = int(minlength) + (eg.r.Int() % 3) } else if minlength < maxlength { count = int(minlength) + (eg.r.Int() % int(maxlength-minlength)) } else if minlength == maxlength { count = int(minlength) } else { panic("Validation: MinLength > MaxLength") } if !eg.a.Type.IsArray() { return eg.r.faker.Characters(count) } res := make([]interface{}, count) for i := 0; i < count; i++ { res[i] = eg.a.Type.ToArray().ElemType.GenerateExample(eg.r) } return res }
// checkIsBestApprox checks that f is the best possible float64 // approximation of r. // Returns true on success. func checkIsBestApprox(t *testing.T, f float64, r *Rat) bool { if math.Abs(f) >= math.MaxFloat64 { // Cannot check +Inf, -Inf, nor the float next to them (MaxFloat64). // But we have tests for these special cases. return true } // r must be strictly between f0 and f1, the floats bracketing f. f0 := math.Nextafter(f, math.Inf(-1)) f1 := math.Nextafter(f, math.Inf(+1)) // For f to be correct, r must be closer to f than to f0 or f1. df := delta(r, f) df0 := delta(r, f0) df1 := delta(r, f1) if df.Cmp(df0) > 0 { t.Errorf("Rat(%v).Float64() = %g (%b), but previous float64 %g (%b) is closer", r, f, f, f0, f0) return false } if df.Cmp(df1) > 0 { t.Errorf("Rat(%v).Float64() = %g (%b), but next float64 %g (%b) is closer", r, f, f, f1, f1) return false } if df.Cmp(df0) == 0 && !isEven(f) { t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f0, f0) return false } if df.Cmp(df1) == 0 && !isEven(f) { t.Errorf("Rat(%v).Float64() = %g (%b); halfway should have rounded to %g (%b) instead", r, f, f, f1, f1) return false } return true }
func TestAllSetDefaults(t *testing.T) { // Exercise SetDefaults with all scalar field types. m := &Defaults{ // NaN != NaN, so override that here. F_Nan: Float32(1.7), } expected := &Defaults{ F_Bool: Bool(true), F_Int32: Int32(32), F_Int64: Int64(64), F_Fixed32: Uint32(320), F_Fixed64: Uint64(640), F_Uint32: Uint32(3200), F_Uint64: Uint64(6400), F_Float: Float32(314159), F_Double: Float64(271828), F_String: String(`hello, "world!"` + "\n"), F_Bytes: []byte("Bignose"), F_Sint32: Int32(-32), F_Sint64: Int64(-64), F_Enum: NewDefaults_Color(Defaults_GREEN), F_Pinf: Float32(float32(math.Inf(1))), F_Ninf: Float32(float32(math.Inf(-1))), F_Nan: Float32(1.7), } SetDefaults(m) if !Equal(m, expected) { t.Errorf(" got %v\nwant %v", m, expected) } }
func TestNewFromFloat(t *testing.T) { var err float64 for f, s := range testTable { d := NewFromFloat(f) if d.String() != s { err++ // t.Errorf("expected %s, got %s (%d, %d)", // s, d.String(), d.compact, d.scale) } } // Some margin of error is acceptable when converting from // a float. On a table of roughly 9,000 entries an acceptable // margin of error is around 450. // Currently, using Gaussian/banker's rounding our margin // of error is roughly 215 per 9,000 entries, for a rate of // around 2.3%. if err >= 0.05*float64(len(testTable)) { t.Errorf("expected error rate to be < 0.05%% of table, got %.f", err) } shouldPanicOn := []float64{ math.NaN(), math.Inf(1), math.Inf(-1), } for _, n := range shouldPanicOn { var d *Decimal if !didPanic(func() { d = NewFromFloat(n) }) { t.Fatalf("expected panic when creating a Decimal from %v, got %v instead", n, d.String()) } } }
func (b *Button) Initialize() { b.Foundation.Initialize() b.DrawOp = draw.Over b.Label = NewLabel(b.Size, LabelConfig{ Text: "", FontSize: 12, Color: color.Black, }) b.AddBlock(&b.Label.Block) b.Clickers = map[Clicker]bool{} b.AddClicker = make(chan Clicker, 1) b.RemoveClicker = make(chan Clicker, 1) var cs geom.Coord cs.X, cs.Y = b.Bounds().Size() sh := uik.SizeHint{ MinSize: cs, PreferredSize: cs, MaxSize: geom.Coord{math.Inf(1), math.Inf(1)}, } b.SetSizeHint(sh) b.setConfig = make(chan ButtonConfig, 1) b.getConfig = make(chan ButtonConfig, 1) }
// generateValidatedLengthExample generates a random size array of examples based on what's given. func (eg *exampleGenerator) generateValidatedLengthExample() interface{} { minlength, maxlength := math.Inf(1), math.Inf(-1) for _, v := range eg.a.Validations { switch actual := v.(type) { case *dslengine.MinLengthValidationDefinition: minlength = math.Min(minlength, float64(actual.MinLength)) maxlength = math.Max(maxlength, float64(actual.MinLength)) case *dslengine.MaxLengthValidationDefinition: minlength = math.Min(minlength, float64(actual.MaxLength)) maxlength = math.Max(maxlength, float64(actual.MaxLength)) } } count := 0 if math.IsInf(minlength, 1) { count = int(maxlength) - (eg.r.Int() % 3) } else if math.IsInf(maxlength, -1) { count = int(minlength) + (eg.r.Int() % 3) } else if minlength < maxlength { count = int(minlength) + (eg.r.Int() % int(maxlength-minlength)) } else if minlength == maxlength { count = int(minlength) } else { panic("Validation: MinLength > MaxLength") } if !eg.a.Type.IsArray() { return eg.r.faker.Characters(count) } res := make([]interface{}, count) for i := 0; i < count; i++ { res[i] = eg.a.Type.ToArray().ElemType.GenerateExample(eg.r) } return res }
// Merge merges the data of two Stats objects. func (s Stats) Merge(t Stats) Stats { if s.count == 0 { s.max = math.Inf(-1) s.min = math.Inf(+1) } delta := t.mean - s.mean newcount := t.count + s.count // max & min s.max = math.Max(s.max, t.max) s.min = math.Min(s.min, t.min) // mean s.mean += delta * (t.count / newcount) // sum of squares s.sumsq += t.sumsq s.sumsq += delta * delta * (t.count * s.count / newcount) // count s.count = newcount return s }
func TestRTT_getDatacenterDistance(t *testing.T) { s := newMockServer() // The serfer's own DC is always 0 ms away. if dist, err := getDatacenterDistance(s, "dc0"); err != nil || dist != 0.0 { t.Fatalf("bad: %v err: %v", dist, err) } // Check a DC with no coordinates, which should give positive infinity. if dist, err := getDatacenterDistance(s, "dcX"); err != nil || dist != math.Inf(1.0) { t.Fatalf("bad: %v err: %v", dist, err) } // Similar for a totally unknown DC. if dist, err := getDatacenterDistance(s, "acdc"); err != nil || dist != math.Inf(1.0) { t.Fatalf("bad: %v err: %v", dist, err) } // Check the trivial median case (just one node). if dist, err := getDatacenterDistance(s, "dc2"); err != nil || dist != 0.002 { t.Fatalf("bad: %v err: %v", dist, err) } // Check the more interesting median case, note that there's a mystery // node4 in there that should be excluded to make the distances sort // like this: // // [0] node3 (0.005), [1] node1 (0.007), [2] node2 (0.008) // // So the median should be at index 3 / 2 = 1 -> 0.007. if dist, err := getDatacenterDistance(s, "dc1"); err != nil || dist != 0.007 { t.Fatalf("bad: %v err: %v", dist, err) } }
func TestMinimalSurface(t *testing.T) { for _, size := range [][2]int{ {20, 30}, {30, 30}, {50, 40}, } { f := NewMinimalSurface(size[0], size[1]) x0 := f.InitX() grad := make([]float64, len(x0)) f.Grad(grad, x0) fdGrad := fd.Gradient(nil, f.Func, x0, &fd.Settings{Formula: fd.Central}) // Test that the numerical and analytical gradients agree. dist := floats.Distance(grad, fdGrad, math.Inf(1)) if dist > 1e-9 { t.Errorf("grid %v x %v: numerical and analytical gradient do not match. |fdGrad - grad|_∞ = %v", size[0], size[1], dist) } // Test that the gradient at the minimum is small enough. // In some sense this test is not completely correct because ExactX // returns the exact solution to the continuous problem projected on the // grid, not the exact solution to the discrete problem which we are // solving. This is the reason why a relatively loose tolerance 1e-4 // must be used. xSol := f.ExactX() f.Grad(grad, xSol) norm := floats.Norm(grad, math.Inf(1)) if norm > 1e-4 { t.Errorf("grid %v x %v: gradient at the minimum not small enough. |grad|_∞ = %v", size[0], size[1], norm) } } }
func TestScrubValues(t *testing.T) { dummy := Converter{ tracker: new(tracker), } epoch := time.Unix(0, 0) simple := []tsm1.Value{tsm1.NewValue(epoch, 1.0)} for _, tt := range []struct { input, expected []tsm1.Value }{ { input: simple, expected: simple, }, { input: []tsm1.Value{simple[0], tsm1.NewValue(epoch, math.NaN())}, expected: simple, }, { input: []tsm1.Value{simple[0], tsm1.NewValue(epoch, math.Inf(-1))}, expected: simple, }, { input: []tsm1.Value{simple[0], tsm1.NewValue(epoch, math.Inf(1)), tsm1.NewValue(epoch, math.NaN())}, expected: simple, }, } { out := dummy.scrubValues(tt.input) if !reflect.DeepEqual(out, tt.expected) { t.Errorf("Failed to scrub '%s': Got '%s', Expected '%s'", pretty(tt.input), pretty(out), pretty(tt.expected)) } } }
// Normalize Returns all the values of the given matrix normalized, the formula // applied to all the elements is: (Xn - Avg) / (max - min) If all the elements // in the slice have the same values, or the slice is empty, the slice can't be // normalized, then returns false in the valid parameter func Normalize(values []float64) (norm []float64, valid bool) { avg := 0.0 max := math.Inf(-1) min := math.Inf(1) math.Inf(1) for _, val := range values { avg += val if val < min { min = val } if val > max { max = val } } if max == min || len(values) == 0 { valid = false return } valid = true avg /= float64(len(values)) for _, val := range values { norm = append(norm, (val-avg)/(max-min)) } return }
func TestScaledUpHalfKStandardWeibullProb(t *testing.T) { pts := []univariateProbPoint{ univariateProbPoint{ loc: 0, prob: math.Inf(1), cumProb: 0, logProb: math.Inf(1), }, univariateProbPoint{ loc: -1, prob: 0, cumProb: 0, logProb: 0, }, univariateProbPoint{ loc: 1, prob: 0.180436508682207, cumProb: 0.558022622759326, logProb: -1.712376315541750, }, univariateProbPoint{ loc: 20, prob: 0.002369136850928, cumProb: 0.974047406098605, logProb: -6.045229588092130, }, } testDistributionProbs(t, Weibull{K: 0.5, Lambda: 1.5}, "0.5K 1.5λ Weibull", pts) }
/* * Starting at node 'root', finds the best node at which to insert * 'node' and returns it. */ func (node *RTreeNode) ChooseInsertionPoint(newNode *RTreeNode) *RTreeNode { for { if node.leaf { return node } var selectedNode *RTreeNode minArea := float32(math.Inf(1)) minEnlargement := float32(math.Inf(1)) // The best insertion point is the one that minimizes the enlargement // of the target subtree's bounding box. Or, if enlargements are the // same, the smaller of the resulting areas. for _, child := range node.children { area := child.Bounds.Area() containingBox := newNode.Bounds.Union(&child.Bounds) containingArea := containingBox.Area() enlargement := containingArea - area if enlargement < minEnlargement || (enlargement == minEnlargement && area < minArea) { minEnlargement = enlargement minArea = Min(area, minArea) selectedNode = child } } node = selectedNode } }
func TestScaledDownHalfKStandardWeibullProb(t *testing.T) { pts := []univariateProbPoint{ univariateProbPoint{ loc: 0, prob: math.Inf(1), cumProb: 0, logProb: math.Inf(1), }, univariateProbPoint{ loc: -1, prob: 0, cumProb: 0, logProb: 0, }, univariateProbPoint{ loc: 1, prob: 0.171909491538362, cumProb: 0.756883265565786, logProb: -1.760787152653070, }, univariateProbPoint{ loc: 20, prob: 0.000283302579100, cumProb: 0.998208237166091, logProb: -8.168995047393730, }, } testDistributionProbs(t, Weibull{K: 0.5, Lambda: 0.5}, "0.5K 0.5λ Weibull", pts) }
func percentile(data []float64, percent float64, interpolate bool) float64 { if len(data) == 0 || percent < 0 || percent > 100 { return math.NaN() } if len(data) == 1 { return data[0] } k := (float64(len(data)-1) * percent) / 100 length := int(math.Ceil(k)) + 1 quickselect.Float64QuickSelect(data, length) top, secondTop := math.Inf(-1), math.Inf(-1) for _, val := range data[0:length] { if val > top { secondTop = top top = val } else if val > secondTop { secondTop = val } } remainder := k - float64(int(k)) if remainder == 0 || !interpolate { return top } return (top * remainder) + (secondTop * (1 - remainder)) }
func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { switch d.vd { case bincVdSpecial: d.bdRead = false switch d.vs { case bincSpNan: return math.NaN() case bincSpPosInf: return math.Inf(1) case bincSpZeroFloat, bincSpZero: return case bincSpNegInf: return math.Inf(-1) default: decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) } case bincVdFloat: f = d.decFloat() default: _, i, _ := d.decIntAny() f = float64(i) } checkOverflowFloat32(f, chkOverflow32) d.bdRead = false return }
// Finds the minimum possible cost this cost model can return for valid length and // distance symbols. func (costModel costModelFun) minCost(costContext interface{}) float64 { var minCost float64 // Table of distances that have a different distance symbol in the deflate // specification. Each value is the first distance that has a new symbol. Only // different symbols affect the cost model so only these need to be checked. // See RFC 1951 section 3.2.5. Compressed blocks (length and distance codes). // bestPair has lowest cost in the cost model var bestPair, pair lz77Pair pair.dist = 1 minCost = math.Inf(1) for pair.litLen = uint16(3); pair.litLen < 259; pair.litLen++ { c := costModel(pair, costContext) if c < minCost { bestPair.litLen = pair.litLen minCost = c } } // TODO: try using bestPair.litlen instead of 3 pair.litLen = 3 minCost = math.Inf(1) for i := 0; i < 30; i++ { pair.dist = dSymbolTable[i] c := costModel(pair, costContext) if c < minCost { bestPair.dist = pair.dist minCost = c } } return costModel(bestPair, costContext) }
func (t *MsgpackTest) TestFloat64(c *C) { table := []struct { v float64 b []byte }{ {.1, []byte{0xcb, 0x3f, 0xb9, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a}}, {.2, []byte{0xcb, 0x3f, 0xc9, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a}}, {-.1, []byte{0xcb, 0xbf, 0xb9, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a}}, {-.2, []byte{0xcb, 0xbf, 0xc9, 0x99, 0x99, 0x99, 0x99, 0x99, 0x9a}}, {math.Inf(1), []byte{0xcb, 0x7f, 0xf0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {math.Inf(-1), []byte{0xcb, 0xff, 0xf0, 0x00, 0x00, 0x0, 0x0, 0x0, 0x0}}, {math.MaxFloat64, []byte{0xcb, 0x7f, 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}, {math.SmallestNonzeroFloat64, []byte{0xcb, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}}, } for _, r := range table { c.Assert(t.enc.Encode(r.v), IsNil) c.Assert(t.buf.Bytes(), DeepEquals, r.b, Commentf("err encoding %v", r.v)) var v float64 c.Assert(t.dec.Decode(&v), IsNil) c.Assert(v, Equals, r.v) c.Assert(t.enc.Encode(r.v), IsNil) iface, err := t.dec.DecodeInterface() c.Assert(err, IsNil) c.Assert(iface, Equals, r.v) } in := math.NaN() c.Assert(t.enc.Encode(in), IsNil) var out float64 c.Assert(t.dec.Decode(&out), IsNil) c.Assert(math.IsNaN(out), Equals, true) }
func (t *MsgpackTest) TestFloat32(c *C) { table := []struct { v float32 b []byte }{ {.1, []byte{0xca, 0x3d, 0xcc, 0xcc, 0xcd}}, {.2, []byte{0xca, 0x3e, 0x4c, 0xcc, 0xcd}}, {-.1, []byte{0xca, 0xbd, 0xcc, 0xcc, 0xcd}}, {-.2, []byte{0xca, 0xbe, 0x4c, 0xcc, 0xcd}}, {float32(math.Inf(1)), []byte{0xca, 0x7f, 0x80, 0x00, 0x00}}, {float32(math.Inf(-1)), []byte{0xca, 0xff, 0x80, 0x00, 0x00}}, {math.MaxFloat32, []byte{0xca, 0x7f, 0x7f, 0xff, 0xff}}, {math.SmallestNonzeroFloat32, []byte{0xca, 0x0, 0x0, 0x0, 0x1}}, } for _, r := range table { c.Assert(t.enc.Encode(r.v), IsNil) c.Assert(t.buf.Bytes(), DeepEquals, r.b, Commentf("err encoding %v", r.v)) var v float32 c.Assert(t.dec.Decode(&v), IsNil) c.Assert(v, Equals, r.v) c.Assert(t.enc.Encode(r.v), IsNil) iface, err := t.dec.DecodeInterface() c.Assert(err, IsNil) c.Assert(iface, Equals, r.v) } in := float32(math.NaN()) c.Assert(t.enc.Encode(in), IsNil) var out float32 c.Assert(t.dec.Decode(&out), IsNil) c.Assert(math.IsNaN(float64(out)), Equals, true) }
func TestLogistic(t *testing.T) { for _, x := range []float64{ 1e-300, 1e-15, 1e-7, .2, .3, .5, } { for _, x := range []float64{x, -x} { p := Logistic(x) logit := Logit(p) if err := math.Abs(logit - x); err > 3e-16 { t.Errorf("mismatch: %g differs from %g by %g", Logit(p), x, err) } } } for _, c := range []struct{ p, logit float64 }{ {1, math.Inf(1)}, {0, math.Inf(-1)}, } { if logit := Logit(c.p); logit != c.logit { t.Errorf("Logit error: got %g, wanted %g", logit, c.logit) } } for _, p := range []float64{-1, -1e300, 1 + 1e300, 2} { if !math.IsNaN(Logit(p)) { t.Errorf("expected NaN for Logit(%g), got %g", p, Logit(p)) } } }
func init() { __NaN__ = math.NaN() __PositiveInfinity__ = math.Inf(+1) __NegativeInfinity__ = math.Inf(-1) __PositiveZero__ = 0 __NegativeZero__ = math.Float64frombits(0 | (1 << 63)) }
func TestIsFinite(t *testing.T) { finites := []float64{ 1.0 / 3, 4891559871276714924261e+222, math.MaxFloat64, math.SmallestNonzeroFloat64, -math.MaxFloat64, -math.SmallestNonzeroFloat64, } for _, f := range finites { if !isFinite(f) { t.Errorf("!IsFinite(%g (%b))", f, f) } } nonfinites := []float64{ math.NaN(), math.Inf(-1), math.Inf(+1), } for _, f := range nonfinites { if isFinite(f) { t.Errorf("IsFinite(%g, (%b))", f, f) } } }
// this rect contains nothing func NilRect() (r Rect) { r.Min.X = math.Inf(1) r.Min.Y = math.Inf(1) r.Max.X = math.Inf(-1) r.Max.Y = math.Inf(-1) return }
func special(s string) (f float64, ok bool) { if len(s) == 0 { return } switch s[0] { default: return case '+': if equalIgnoreCase(s, "+inf") || equalIgnoreCase(s, "+infinity") { return math.Inf(1), true } case '-': if equalIgnoreCase(s, "-inf") || equalIgnoreCase(s, "-infinity") { return math.Inf(-1), true } case 'n', 'N': if equalIgnoreCase(s, "nan") { return math.NaN(), true } case 'i', 'I': if equalIgnoreCase(s, "inf") || equalIgnoreCase(s, "infinity") { return math.Inf(1), true } } return }
func TestHalfKStandardWeibullProb(t *testing.T) { pts := []univariateProbPoint{ univariateProbPoint{ loc: 0, prob: math.Inf(1), cumProb: 0, logProb: math.Inf(1), }, univariateProbPoint{ loc: -1, prob: 0, cumProb: 0, logProb: 0, }, univariateProbPoint{ loc: 1, prob: 0.183939720585721, cumProb: 0.632120558828558, logProb: -1.693147180559950, }, univariateProbPoint{ loc: 20, prob: 0.001277118038048, cumProb: 0.988577109006533, logProb: -6.663149272336520, }, } testDistributionProbs(t, Weibull{K: 0.5, Lambda: 1}, "0.5K Standard Weibull", pts) }
func (d *bincDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { if !d.bdRead { d.readNextBd() } vd, vs := d.vd, d.vs if vd == bincVdSpecial { d.bdRead = false if vs == bincSpNan { return math.NaN() } else if vs == bincSpPosInf { return math.Inf(1) } else if vs == bincSpZeroFloat || vs == bincSpZero { return } else if vs == bincSpNegInf { return math.Inf(-1) } else { d.d.errorf("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) return } } else if vd == bincVdFloat { f = d.decFloat() } else { f = float64(d.DecodeInt(64)) } if chkOverflow32 && chkOvf.Float32(f) { d.d.errorf("binc: float32 overflow: %v", f) return } d.bdRead = false return }
func init() { INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} }