func ExampleDistanceMatrix_Symmetric() { d1 := cluster.DistanceMatrix{ {0, 3}, // true {3, 0}, } d2 := cluster.DistanceMatrix{ {0, 3}, // false {7, 0}, } d3 := cluster.DistanceMatrix{ {0, math.NaN()}, // false (NaNs do not compare equal) {math.NaN(), 0}, } d4 := cluster.DistanceMatrix{ {0, 3}, // true (diagonal is not checked) {3, math.NaN()}, } fmt.Println(d1.Symmetric()) fmt.Println(d2.Symmetric()) fmt.Println(d3.Symmetric()) fmt.Println(d4.Symmetric()) // Output: // true // false // false // true }
func TestNewPointNaN(t *testing.T) { test(t, `cpu value=NaN 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": math.NaN(), }, time.Unix(1, 0)), ) test(t, `cpu value=nAn 1000000000`, tsdb.NewPoint( "cpu", tsdb.Tags{}, tsdb.Fields{ "value": math.NaN(), }, time.Unix(1, 0)), ) test(t, `nan value=NaN`, tsdb.NewPoint( "nan", tsdb.Tags{}, tsdb.Fields{ "value": math.NaN(), }, time.Unix(0, 0)), ) }
func TestDot(t *testing.T) { for _, test := range []struct { n int x, y []float64 indices []int want float64 }{ { n: 5, x: []float64{1, 2, 3}, indices: []int{0, 2, 4}, y: []float64{1, math.NaN(), 3, math.NaN(), 5}, want: 22, }, } { x := NewVector(test.n, test.x, test.indices) y := mat64.NewVector(len(test.y), test.y) got := Dot(x, y) if got != test.want { t.Errorf("want = %v, got %v\n", test.want, got) } } }
func (d *DatasourceDerive) CalculatePdpPrep(newValue string, interval float64) (float64, error) { if float64(d.Heartbeat) < interval { d.LastValue = Undefined } rate := math.NaN() newPdp := math.NaN() if newValue != Undefined && float64(d.Heartbeat) >= interval { newInt := new(big.Int) _, err := fmt.Sscan(newValue, newInt) if err != nil { return math.NaN(), errors.Errorf("not a simple signed integer: %s", newValue) } if d.LastValue != "U" { prevInt := new(big.Int) _, err := fmt.Sscan(d.LastValue, prevInt) if err != nil { return math.NaN(), errors.Wrap(err, 0) } diff := new(big.Int) diff.Sub(newInt, prevInt) newPdp = float64(diff.Uint64()) rate = newPdp / interval } } if !d.checkRateBounds(rate) { newPdp = math.NaN() } d.LastValue = newValue return newPdp, nil }
/* Min比较一组数字、字符串式的数字中最小的一个 */ func Min(arg ...interface{}) (res float64, err error) { fmt.Println("") //多个元素比较大小 if len(arg) > 1 { res, err = GetFloat(arg[0]) for _, v := range arg { v, _ := GetFloat(v) if v < res { res = v } } } else { //对单个数组中元素进行比较 t := reflect.TypeOf(arg[0]) v := reflect.ValueOf(arg[0]) if t != arrayFloat { return math.NaN(), errors.New("Max: 传入一个元素时,必须是[]flat64") } else { res = math.NaN() for i := 1; i < v.Len(); i++ { if math.IsNaN(res) { res = v.Index(0).Float() } if v.Index(i).Float() < res { res = v.Index(i).Float() } } } } return res, err }
/// <summary> /// Upside Potential Ratio,compared to Sortino, was a further improvement, extending the /// measurement of only upside on the numerator, and only downside of the /// denominator of the ratio equation. /// (分子只考虑超过MAR部分,分母只考虑DownsideDeviation的下跌风险) /// </summary> func UpsidePotentialRatio(Ra *utils.SlidingWindow, MAR float64) (float64, error) { //var r = Ra.Where<float64>(singleData => singleData > MAR).ToList<float64>(); r, err := utils.AboveValue(Ra, MAR) if err != nil { return math.NaN(), err } var length int method := "subset" switch method { case "full": length = Ra.Count() break case "subset": length = r.Count() break default: return math.NaN(), errors.New("In UpsidePotentialRatio, method is default !!!") } add_Sliding, err := utils.Add(-MAR, r) if err != nil { return math.NaN(), err } dd2Data, err := DownsideDeviation2(Ra, MAR) if err != nil { return math.NaN(), err } var result = (add_Sliding.Sum() / float64(length)) / dd2Data return result, nil }
func TestTimeseries_MarshalJSON(t *testing.T) { for _, suite := range []struct { input Timeseries expected string }{ { Timeseries{ TagSet: ParseTagSet("foo=bar"), Values: []float64{0, 1, -1, math.NaN()}, }, `{"tagset":{"foo":"bar"},"values":[0,1,-1,null]}`, }, { Timeseries{ TagSet: NewTagSet(), Values: []float64{0, 1, -1, math.NaN()}, }, `{"tagset":{},"values":[0,1,-1,null]}`, }, } { a := assert.New(t).Contextf("expected=%s", suite.expected) encoded, err := json.Marshal(suite.input) a.CheckError(err) a.Eq(string(encoded), suite.expected) } }
// Covariance is a measure of how much two sets of data change func Covariance(data1, data2 Float64Data) (float64, error) { l1 := data1.Len() l2 := data2.Len() if l1 == 0 || l2 == 0 { return math.NaN(), EmptyInput } if l1 != l2 { return math.NaN(), SizeErr } m1, _ := Mean(data1) m2, _ := Mean(data2) // Calculate sum of squares var ss float64 for i := 0; i < l1; i++ { delta1 := (data1.Get(i) - m1) delta2 := (data2.Get(i) - m2) ss += (delta1*delta2 - ss) / float64(i+1) } return ss * float64(l1) / float64(l1-1), nil }
// CovariancePopulation computes covariance for entire population between two variables. func CovariancePopulation(data1, data2 Float64Data) (float64, error) { l1 := data1.Len() l2 := data2.Len() if l1 == 0 || l2 == 0 { return math.NaN(), EmptyInput } if l1 != l2 { return math.NaN(), SizeErr } m1, _ := Mean(data1) m2, _ := Mean(data2) var s float64 for i := 0; i < l1; i++ { delta1 := (data1.Get(i) - m1) delta2 := (data2.Get(i) - m2) s += delta1 * delta2 } return s / float64(l1), nil }
// XY returns the cartesian coordinates of n. If n is not a node // in the grid, (NaN, NaN) is returned. func (g *Grid) XY(n graph.Node) (x, y float64) { if !g.Has(n) { return math.NaN(), math.NaN() } r, c := g.RowCol(n.ID()) return float64(c), float64(r) }
/** Returns the lowest positive root of the quadric equation given by a* x * x + b * x + c = 0. If no solution is given * Float.Nan is returned. * @param a the first coefficient of the quadric equation * @param b the second coefficient of the quadric equation * @param c the third coefficient of the quadric equation * @return the lowest positive root or Float.Nan */ func LowestPositiveRoot(a, b, c float32) float32 { det := b*b - 4*a*c if det < 0 { return float32(math.NaN()) } sqrtD := float32(math.Sqrt(float64(det))) invA := 1 / (2 * a) r1 := (-b - sqrtD) * invA r2 := (-b + sqrtD) * invA if r1 > r2 { tmp := r2 r2 = r1 r1 = tmp } if r1 > 0 { return r1 } if r2 > 0 { return r2 } return float32(math.NaN()) }
// PrioritizeWorkUnits changes the priorities of some number of work // units. The actual work units are in options["work_unit_keys"]. A // higher priority results in the work units being scheduled sooner. func (jobs *JobServer) PrioritizeWorkUnits(workSpecName string, options map[string]interface{}) (bool, string, error) { var ( err error query coordinate.WorkUnitQuery workSpec coordinate.WorkSpec ) pwuOptions := PrioritizeWorkUnitsOptions{ Priority: math.NaN(), Adjustment: math.NaN(), } workSpec, err = jobs.Namespace.WorkSpec(workSpecName) if err == nil { err = decode(&pwuOptions, options) } if err == nil && pwuOptions.WorkUnitKeys == nil { return false, "missing work_unit_keys", err } if err == nil { query.Names = pwuOptions.WorkUnitKeys if !math.IsNaN(pwuOptions.Priority) { err = workSpec.SetWorkUnitPriorities(query, pwuOptions.Priority) } else if !math.IsNaN(pwuOptions.Adjustment) { err = workSpec.AdjustWorkUnitPriorities(query, pwuOptions.Adjustment) } } return err == nil, "", err }
func (b *Bisection) Init(f, g float64, step float64) EvaluationType { if step <= 0 { panic("bisection: bad step size") } if g >= 0 { panic("bisection: initial derivative is non-negative") } if b.GradConst == 0 { b.GradConst = 0.9 } if b.GradConst <= 0 || b.GradConst >= 1 { panic("bisection: GradConst not between 0 and 1") } b.minStep = 0 b.maxStep = math.Inf(1) b.currStep = step b.initF = f b.minF = f b.maxF = math.NaN() b.initGrad = g b.minGrad = g b.maxGrad = math.NaN() return FuncEvaluation | GradEvaluation }
// PercentileNearestRank finds the relative standing in a slice of floats using the Nearest Rank method func PercentileNearestRank(input Float64Data, percent float64) (percentile float64, err error) { // Find the length of items in the slice il := input.Len() // Return an error for empty slices if il == 0 { return math.NaN(), EmptyInput } // Return error for less than 0 or greater than 100 percentages if percent < 0 || percent > 100 { return math.NaN(), BoundsErr } // Start by sorting a copy of the slice c := sortedCopy(input) // Return the last item if percent == 100.0 { return c[il-1], nil } // Find ordinal ranking or := int(math.Ceil(float64(il) * percent / 100)) // Return the item that is in the place of the ordinal rank if or == 0 { return c[0], nil } return c[or-1], nil }
/// <summary> /// Kappa is a generalized downside risk-adjusted performance measure. /// To calculate it, we take the difference of the mean of the distribution /// to the target and we divide it by the l-root of the lth lower partial /// moment. To calculate the lth lower partial moment we take the subset of /// returns below the target and we sum the differences of the target to /// these returns. We then return return this sum divided by the length of /// the whole distribution. /// (非年化的超MAR平均收益率通过l阶根的低于MAR的收益率序列的l阶矩) /// </summary> func Kappa(Ra *utils.SlidingWindow, MAR float64, l float64) (float64, error) { undervalues, err := utils.NewSlidingWindow(Ra.Count()) if err != nil { return math.NaN(), err } for i := 0; i < Ra.Count(); i++ { if Ra.Data()[i] < MAR { undervalues.Add(Ra.Data()[i]) } } var n = float64(Ra.Count()) var m = float64(Ra.Average()) neg_Sliding, err := utils.Negative(undervalues) if err != nil { return math.NaN(), err } add_Sliding, err := utils.Add(MAR, neg_Sliding) if err != nil { return math.NaN(), err } pow_Sliding, err := utils.Power(add_Sliding, float64(l)) if err != nil { return math.NaN(), err } var temp = pow_Sliding.Sum() / n return (m - MAR) / math.Pow(temp, (1.0/float64(l))), nil }
// Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte // types are mapped as NaN and !ok func dbToFloat64(t interface{}) (float64, bool) { switch v := t.(type) { case int64: return float64(v), true case float64: return v, true case time.Time: return float64(v.Unix()), true case []byte: // Try and convert to string and then parse to a float64 strV := string(v) result, err := strconv.ParseFloat(strV, 64) if err != nil { return math.NaN(), false } return result, true case string: result, err := strconv.ParseFloat(v, 64) if err != nil { log.Infoln("Could not parse string:", err) return math.NaN(), false } return result, true case nil: return math.NaN(), true default: return math.NaN(), false } }
/// <summary> /// To calculate Burke ratio we take the difference between the portfolio /// return and the risk free rate and we divide it by the square root of the /// sum of the square of the drawdowns. To calculate the modified Burke ratio /// we just multiply the Burke ratio by the square root of the number of datas. /// (一种调整收益率的计算方式,调整是通过drawdown的平方和进行的) /// </summary> func BurkeRatio(Ra *utils.SlidingWindow, Rf float64, scale float64) (float64, error) { var len = Ra.Count() var in_drawdown = false var peak = 1 var temp = 0.0 drawdown, err := utils.NewSlidingWindow(len) if err != nil { return math.NaN(), err } for i := 1; i < len; i++ { if Ra.Data()[i] < 0 { if !in_drawdown { peak = i - 1 in_drawdown = true } } else { if in_drawdown { temp = 1.0 for j := peak + 1; j < i; j++ { temp = temp * (1.0 + Ra.Data()[j]) } drawdown.Add(temp - 1.0) //Source in_drawdown = false } } } if in_drawdown { temp = 1.0 for j := peak + 1; j < len; j++ { temp = temp * (1.0 + Ra.Data()[j]) } drawdown.Add(temp - 1.0) //Source //drawdown.Add((temp - 1.0) * 100.0) in_drawdown = false } //var Rp = Annualized(Ra, scale, true) - 1.0--->Source Rp, err := Annualized(Ra, scale, true) if err != nil { return math.NaN(), err } var result float64 if drawdown.Count() != 0 { pow_Sliding, err := utils.Power(drawdown, 2) if err != nil { return math.NaN(), err } Rf = Rf * scale result = (Rp - Rf) / math.Sqrt(pow_Sliding.Sum()) } else { result = 0 } modified := true if modified { result = result * math.Sqrt(float64(len)) } return result, nil }
func (d *DatasourceAbstract) ProcessPdp(pdpValue float64, elapsed ElapsedPdpSteps, step time.Duration) float64 { var preUnknown float64 if math.IsNaN(pdpValue) { preUnknown = elapsed.PreInt } else { if math.IsNaN(d.PdpValue) { d.PdpValue = 0 } d.PdpValue += pdpValue / elapsed.Interval * elapsed.PreInt } var pdpTemp float64 if elapsed.Interval > float64(d.Heartbeat) || uint64(step/time.Second/2) < d.UnknownSecCount { pdpTemp = math.NaN() } else { diffPdpSteps := (elapsed.Steps * uint64(step)) / uint64(time.Second) pdpTemp = d.PdpValue / (float64(diffPdpSteps-d.UnknownSecCount) - preUnknown) } if math.IsNaN(pdpValue) { d.UnknownSecCount = uint64(elapsed.PostInt) d.PdpValue = math.NaN() } else { d.UnknownSecCount = 0 d.PdpValue = pdpValue / elapsed.Interval * elapsed.PostInt } return pdpTemp }
/// <summary> /// calculate a traditional or modified Sharpe Ratio of Return over StdDev or /// VaR or ES /// /// The Sharpe ratio is simply the return per unit of risk (represented by /// variability). In the classic case, the unit of risk is the standard /// deviation of the returns. /// </summary> func SharpeRatio(Ra *utils.SlidingWindow, Rf_val float64, scale float64) (float64, error) { Rf, err := utils.CreateList(Rf_val, Ra.Count()) if err != nil { return math.NaN(), err } xR, err := Excess(Ra, Rf) if err != nil { return math.NaN(), err } numerator := 0.0 denominator := 0.0 annualize := 1 if annualize == 1 { denominator, err = StdDev_Annualized(Ra, scale) if err != nil { return math.NaN(), err } numerator, err = Annualized(xR, scale, true) if err != nil { return math.NaN(), err } } else { denominator, err = StdDev(Ra) if err != nil { return math.NaN(), err } numerator = xR.Average() } return numerator / denominator, nil }
func (this *P1S1F1SWrapper) Process(AssetPriceReturns, AssetPriceBenchMark []float64, date []time.Time) (float64, error) { if AssetPriceReturns == nil { return math.NaN(), errors.New("The Input RA is Error !!!") } var err error Period := getPeriod(date) if Period == 2520 { AssetPriceReturns, err = reorganizeInputPrice(date, AssetPriceReturns) if err != nil { return math.NaN(), errors.New("Reorganize Minutes Price Error !!!") } Period = 252 } if this.param == 252 { this.param = Period } Price, err := utils.NewSlidingWindow(len(AssetPriceReturns)) if err != nil { return math.NaN(), err } for _, val := range AssetPriceReturns { Price.Add(val) } Ra, err := Calculate(Price, "discrete") if err != nil { return math.NaN(), err } if this.param == 0.03 { this.param = this.param / Period } return this.function(Ra, this.param, this.str) }
func (s *StatsTimer) Percentile(percentile float64) (float64, error) { // Nearest rank implementation // http://en.wikipedia.org/wiki/Percentile histLen := len(s.history) if percentile > 100 { return math.NaN(), errors.New("Invalid argument") } in := make([]int64, 0, histLen) for i := range s.history { if s.history[i] != NOT_INITIALIZED { in = append(in, s.history[i]) } } filtLen := len(in) if filtLen < 1 { return math.NaN(), errors.New("No values") } // Since slices are zero-indexed, we are naturally rounded up nearest_rank := int((percentile / 100) * float64(filtLen)) if nearest_rank == filtLen { nearest_rank = filtLen - 1 } sort.Sort(Int64Slice(in)) ret := float64(in[nearest_rank]) / float64(s.timeUnit.Nanoseconds()) return ret, nil }
func Transform2(srcpj, dstpj *Proj, x, y float64) (float64, float64, error) { xx, yy, _, err := transform(srcpj, dstpj, []float64{x}, []float64{y}, nil) if err != nil { return math.NaN(), math.NaN(), err } return xx[0], yy[0], err }
func TestVfltu8(t *testing.T) { input := []byte{4, 127, 250, 190} output := make([]float32, 8) for i := 0; i < len(output); i++ { output[i] = float32(math.NaN()) } Vfltu8(input, 1, output, 1) for i, x := range input { expected := float32(x) if expected != output[i] { t.Errorf("Vfltu8 strides == 1 : output %f != expected %f for index %d", output[i], expected, i) } } for i := 0; i < len(output); i++ { output[i] = float32(math.NaN()) } Vfltu8(input, 2, output, 1) for i := 0; i < len(input); i += 2 { expected := float32(input[i]) if expected != output[i/2] { t.Errorf("Vfltu8 in stride = 2 out stride = 1 : output %f != expected %f for index %d", output[i/2], expected, i) } } for i := len(input) / 2; i < len(output); i++ { if !math.IsNaN(float64(output[i])) { t.Errorf("Vfltu8 wrote too far for input stride 2 (%d=%f)", i, output[i]) } } }
func Transform3(srcpj, dstpj *Proj, x, y, z float64) (float64, float64, float64, error) { xx, yy, zz, err := transform(srcpj, dstpj, []float64{x}, []float64{y}, []float64{z}) if err != nil { return math.NaN(), math.NaN(), math.NaN(), err } return xx[0], yy[0], zz[0], err }
func TestInDelta(t *testing.T) { mockT := new(testing.T) True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") cases := []struct { a, b interface{} delta float64 }{ {uint8(2), uint8(1), 1}, {uint16(2), uint16(1), 1}, {uint32(2), uint32(1), 1}, {uint64(2), uint64(1), 1}, {int(2), int(1), 1}, {int8(2), int8(1), 1}, {int16(2), int16(1), 1}, {int32(2), int32(1), 1}, {int64(2), int64(1), 1}, {float32(2), float32(1), 1}, {float64(2), float64(1), 1}, } for _, tc := range cases { True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) } }
/// <summary> /// M squared excess is the quantity above the standard M. /// There is a geometric excess return which is better for Bacon and an arithmetic excess return /// (是与Rb的年化收益率进行的excess比较) /// </summary> func MSquaredExcess(Ra *utils.SlidingWindow, Rb *utils.SlidingWindow, scale float64, Rf float64, method string) (float64, error) { //var n = Rb.Count() //Ra&Rb等长 Rbp, err := Annualized(Rb, scale, true) if err != nil { return math.NaN(), err } var result float64 switch method { case "geometric": msq_data, err := MSquared(Ra, Rb, scale, Rf) if err != nil { return math.NaN(), err } result = (1.0+msq_data)/(1.0+Rbp) - 1.0 break case "arithmetic": msq_data, err := MSquared(Ra, Rb, scale, Rf) if err != nil { return math.NaN(), err } result = msq_data - Rbp break default: return math.NaN(), errors.New("In MSquaredExcess, method default !!!") } return result, nil }
func (d *DatasourceDDerive) CalculatePdpPrep(newValue string, interval float64) (float64, error) { if float64(d.Heartbeat) < interval { d.LastValue = Undefined } rate := math.NaN() newPdp := math.NaN() if newValue != Undefined && float64(d.Heartbeat) >= interval { newval, err := strconv.ParseFloat(newValue, 64) if err != nil { return math.NaN(), errors.Wrap(err, 0) } oldval, err := strconv.ParseFloat(d.LastValue, 64) if err != nil { return math.NaN(), errors.Wrap(err, 0) } newPdp = newval - oldval rate = newPdp / interval } if !d.checkRateBounds(rate) { newPdp = math.NaN() } d.LastValue = newValue return newPdp, nil }
// XY returns the cartesian coordinates of n. If n is not a node // in the grid, (NaN, NaN) is returned. func (l *LimitedVisionGrid) XY(n graph.Node) (x, y float64) { if !l.Has(n) { return math.NaN(), math.NaN() } r, c := l.RowCol(n.ID()) return float64(c), float64(r) }
func stringToFloat(value string) float64 { value = strings.TrimSpace(value) if value == "" { return 0 } parseFloat := false if strings.IndexRune(value, '.') != -1 { parseFloat = true } else if stringToNumberParseInteger.MatchString(value) { parseFloat = false } else { parseFloat = true } if parseFloat { number, err := strconv.ParseFloat(value, 64) if err != nil && err.(*strconv.NumError).Err != strconv.ErrRange { return math.NaN() } return number } number, err := strconv.ParseInt(value, 0, 64) if err != nil { return math.NaN() } return float64(number) }
// redundancy returns the redundancy of the least redundant chunk. A file // becomes available when this redundancy is >= 1. Assumes that every piece is // unique within a file contract. -1 is returned if the file has size 0. func (f *file) redundancy() float64 { if f.size == 0 { return math.NaN() } piecesPerChunk := make([]int, f.numChunks()) // If the file has non-0 size then the number of chunks should also be // non-0. Therefore the f.size == 0 conditional block above must appear // before this check. if len(piecesPerChunk) == 0 { build.Critical("cannot get redundancy of a file with 0 chunks") return math.NaN() } for _, fc := range f.contracts { for _, p := range fc.Pieces { piecesPerChunk[p.Chunk]++ } } minPieces := piecesPerChunk[0] for _, numPieces := range piecesPerChunk { if numPieces < minPieces { minPieces = numPieces } } return float64(minPieces) / float64(f.erasureCode.MinPieces()) }