func TestWaldTest(t *testing.T) { tests := []struct { wald Wald sample Sample result Result }{ { wald: Wald{Size: 0.05, NullValue: 0}, sample: sample{mle: 1, variance: 1}, result: Result{ Statistic: 1.0, Power: 1 - stdNormal.Cdf(-1+1.95996) + stdNormal.Cdf(-1-1.95996), ConfidenceInterval: []float64{-0.95996, 2.95996}, ConfidenceLevel: .95, PValue: 2 * stdNormal.Cdf(-1.0), RejectNull: false, Size: 0.05, NullValue: 0, }, }, } for _, tt := range tests { actual := tt.wald.Test(tt.sample) assert.Equal(t, tt.result.Statistic, actual.Statistic) assert.Equal(t, tt.result.Power, actual.Power) // Confidence interval assert.InEpsilon(t, tt.result.ConfidenceInterval[0], actual.ConfidenceInterval[0], 0.0001) assert.InEpsilon(t, tt.result.ConfidenceInterval[1], actual.ConfidenceInterval[1], 0.0001) assert.Equal(t, tt.result.PValue, actual.PValue) assert.Equal(t, tt.result.RejectNull, actual.RejectNull) } }
func TestStddevBesselWeighted(t *testing.T) { result, err := mino.FromPoints(weightedFib).Transform(transform.Stddev{Bessel: true}) r := result.(transform.StddevResults) assert.Nil(t, err) assert.InEpsilon(t, 6.84670, r.Deviation, epsilon) assert.InEpsilon(t, 8.48138, r.Average, epsilon) }
func TestStddevNormalOne(t *testing.T) { result, err := mino.FromList(one).Transform(transform.Stddev{}) r := result.(transform.StddevResults) assert.Equal(t, transform.InsufficientDataError, err) assert.InEpsilon(t, 0, r.Deviation, epsilon) assert.InEpsilon(t, 1, r.Average, epsilon) }
func TestStddevBessel(t *testing.T) { result, err := mino.FromList(fibonacci).Transform(transform.Stddev{Bessel: true}) assert.Nil(t, err) r := result.(transform.StddevResults) assert.InEpsilon(t, 7.0660, r.Deviation, epsilon) assert.InEpsilon(t, 6.7500, r.Average, epsilon) }
func Test_DeterministicTrigger_Unmarshal(t *testing.T) { var trigger triggers.DeterministicTrigger var err error // unmarshal with type and parameter err = json.Unmarshal([]byte(`{"type":"deterministic","parameter":0.123}`), &trigger) assert.Nil(t, err) assert.InEpsilon(t, trigger.GetParameter(), 0.123, 0.001) // unmarshal with type, parameter and comment err = json.Unmarshal([]byte(`{"type":"deterministic","parameter":0.456,"comment":"ok"}`), &trigger) assert.Nil(t, err) assert.InEpsilon(t, trigger.GetParameter(), 0.456, 0.001) assert.Equal(t, trigger.GetComment(), "ok") }
func TestReservation(t *testing.T) { pod := buildPod("p1", 100, 200000) pod2 := &kube_api.Pod{ Spec: kube_api.PodSpec{ Containers: []kube_api.Container{ { Resources: kube_api.ResourceRequirements{ Requests: kube_api.ResourceList{}, }, }, }, }, } nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2) node := &kube_api.Node{ Status: kube_api.NodeStatus{ Capacity: kube_api.ResourceList{ kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI), }, }, } reservation, err := calculateReservation(node, nodeInfo, kube_api.ResourceCPU) assert.NoError(t, err) assert.InEpsilon(t, 1.0/10, reservation, 0.01) _, err = calculateReservation(node, nodeInfo, kube_api.ResourceMemory) assert.Error(t, err) }
func TestSum(t *testing.T) { s := Sum(DiceMust("2"), DiceMust("d5")) min, max := s.Bound() assert.Equal(t, 3, min) assert.Equal(t, 7, max) assert.InEpsilon(t, 5, convergentAvg(s), near) }
func TestFloat32(t *testing.T) { p := parserMixin{} v := p.Float32() err := p.value.Set("123.45") assert.NoError(t, err) assert.InEpsilon(t, 123.45, *v, 0.001) }
func TestLearnOnSegments(t *testing.T) { tmp := NewTemporalMemoryParams() tm := NewTemporalMemory(tmp) connections := tm.Connections connections.CreateSegment(0) connections.CreateSynapse(0, 23, 0.6) connections.CreateSynapse(0, 37, 0.4) connections.CreateSynapse(0, 477, 0.9) connections.CreateSegment(1) connections.CreateSynapse(1, 733, 0.7) connections.CreateSegment(8) connections.CreateSynapse(2, 486, 0.9) connections.CreateSegment(100) prevActiveSegments := []int{0, 2} learningSegments := []int{1, 3} prevActiveSynapsesForSegment := map[int][]int{ 0: []int{0, 1}, 1: []int{3}, } winnerCells := []int{0} prevWinnerCells := []int{10, 11, 12, 13, 14} tm.learnOnSegments(prevActiveSegments, learningSegments, prevActiveSynapsesForSegment, winnerCells, prevWinnerCells, connections) //Check segment 0 assert.Equal(t, 0.7, connections.DataForSynapse(0).Permanence) assert.Equal(t, 0.5, connections.DataForSynapse(1).Permanence) assert.Equal(t, 0.8, connections.DataForSynapse(2).Permanence) //Check segment 1 assert.InEpsilon(t, 0.8, connections.DataForSynapse(3).Permanence, 0.1) assert.Equal(t, 2, len(connections.synapsesForSegment[1])) //Check segment 2 assert.Equal(t, 0.9, connections.DataForSynapse(4).Permanence) assert.Equal(t, 1, len(connections.synapsesForSegment[2])) // Check segment 3 assert.Equal(t, 1, len(connections.synapsesForSegment[3])) }
func testCollapseTimeSamples(t *testing.T) { now := time.Now() metrics := heapster.MetricResult{ Metrics: []heapster.MetricPoint{ {Timestamp: now, Value: 50, FloatValue: nil}, {Timestamp: now.Add(-15 * time.Second), Value: 100, FloatValue: nil}, {Timestamp: now.Add(-60 * time.Second), Value: 100000, FloatValue: nil}}, LatestTimestamp: now, } val, timestamp, hadMetrics := collapseTimeSamples(metrics, time.Minute) assert.True(t, hadMetrics, "should report that it received a populated list of metrics") assert.InEpsilon(t, float64(75), val, 0.1, "collapsed sample value should be as expected") assert.True(t, timestamp.Equal(now), "timestamp should be the current time (the newest)") }
func TestExponentialBackoff(t *testing.T) { clock := &MockClock{} rp := NewDefaultRetryPolicy(clock) rp.MaxAttempts = 9999999 rp.MinDelay = time.Second rp.MaxDelay = time.Minute rp.TimeLimit = time.Hour rp.RandomizeDelays = false op := rp.StartOperation() assert.True(t, op.ShouldRetry("Attempt 1")) assert.Equal(t, time.Duration(0), clock.LastSleepDuration) // first retry is immediate assert.True(t, op.ShouldRetry("Attempt 2")) assert.Equal(t, time.Second, clock.LastSleepDuration) // MinDelay assert.True(t, op.ShouldRetry("Attempt 3")) assert.InEpsilon(t, 1.62, clock.LastSleepDuration.Seconds(), 0.01) assert.True(t, op.ShouldRetry("Attempt 4")) assert.InEpsilon(t, 2.62, clock.LastSleepDuration.Seconds(), 0.01) assert.True(t, op.ShouldRetry("Attempt 5")) assert.InEpsilon(t, 4.24, clock.LastSleepDuration.Seconds(), 0.01) for i := 0; i < 7; i++ { assert.True(t, op.ShouldRetry("Attempt X")) } assert.Equal(t, time.Minute, clock.LastSleepDuration) // MaxDelay }
func TestDiceBounds(t *testing.T) { for _, tc := range []struct { string min, max int avg float64 }{ {"4d8", 4, 8 * 4, (4 + 8*4) / 2}, {"d4", 1, 4, float64(1+4) / 2.0}, } { r := DiceMust(tc.string) min, max := r.Bound() assert.Equal(t, tc.min, min) assert.Equal(t, tc.max, max) assert.InEpsilon(t, tc.avg, convergentAvg(r), near) } }
func TestUtilization(t *testing.T) { pod := BuildTestPod("p1", 100, 200000) pod2 := BuildTestPod("p2", -1, -1) nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2) node := BuildTestNode("node1", 2000, 2000000) utilization, err := CalculateUtilization(node, nodeInfo) assert.NoError(t, err) assert.InEpsilon(t, 2.0/10, utilization, 0.01) node2 := BuildTestNode("node1", 2000, -1) _, err = CalculateUtilization(node2, nodeInfo) assert.Error(t, err) }
func TestAggregateSumSingle(t *testing.T) { now := time.Now() result := heapster.MetricResultList{ Items: []heapster.MetricResult{ { Metrics: []heapster.MetricPoint{ {now, 50, nil}, {now.Add(-65 * time.Second), 100000, nil}}, LatestTimestamp: now, }, }, } sum, cnt, _ := calculateSumFromTimeSample(result, time.Minute) assert.Equal(t, int64(50), sum.intValue) assert.InEpsilon(t, 50.0, sum.floatValue, 0.1) assert.Equal(t, 1, cnt) }
func TestShuffler(t *testing.T) { rand.Seed(time.Now().Unix()) fixture := Whisper{} in := make(chan *points.Points) out1 := make(chan *points.Points) out2 := make(chan *points.Points) out3 := make(chan *points.Points) out4 := make(chan *points.Points) out := [](chan *points.Points){out1, out2, out3, out4} go fixture.shuffler(in, out, nil) buckets := [4]int{0, 0, 0, 0} runlength := 10000 var wg sync.WaitGroup wg.Add(4) for index, _ := range out { outChan := out[index] i := index go func() { for { _, ok := <-outChan if !ok { break } buckets[i]++ } wg.Done() }() } randomPoints(runlength, in) close(in) wg.Wait() total := 0 for b := range buckets { assert.InEpsilon(t, float64(runlength)/4, buckets[b], (float64(runlength)/4)*.005, fmt.Sprintf("shuffle distribution is greater than .5% across 4 buckets after %d inputs", runlength)) total += buckets[b] } assert.Equal(t, runlength, total, "total output of shuffle is not equal to input") }
func TestAggregateSum(t *testing.T) { //calculateSumFromTimeSample(metrics heapster.MetricResultList, duration time.Duration) (sum intAndFloat, count int, timestamp time.Time) { now := time.Now() result := heapster.MetricResultList{ Items: []heapster.MetricResult{ { Metrics: []heapster.MetricPoint{ {now, 50, nil}, {now.Add(-15 * time.Second), 100, nil}, {now.Add(-60 * time.Second), 100000, nil}}, LatestTimestamp: now, }, }, } sum, cnt, _ := calculateSumFromTimeSample(result, time.Minute) assert.Equal(t, int64(75), sum.intValue) assert.InEpsilon(t, 75.0, sum.floatValue, 0.1) assert.Equal(t, 1, cnt) }
func TestShuffler(t *testing.T) { rand.Seed(time.Now().Unix()) fixture := Whisper{exit: make(chan bool)} in := make(chan *points.Points) out1 := make(chan *points.Points) out2 := make(chan *points.Points) out3 := make(chan *points.Points) out4 := make(chan *points.Points) out := [](chan *points.Points){out1, out2, out3, out4} go fixture.shuffler(in, out) buckets := [4]int{0, 0, 0, 0} dotest := make(chan bool) runlength := 10000 go func() { for { select { case <-out1: buckets[0]++ case <-out2: buckets[1]++ case <-out3: buckets[2]++ case <-out4: buckets[3]++ case <-dotest: total := 0 for b := range buckets { assert.InEpsilon(t, float64(runlength)/4, buckets[b], (float64(runlength)/4)*.005, fmt.Sprintf("shuffle distribution is greater than .5% across 4 buckets after %d inputs", runlength)) total += buckets[b] } assert.Equal(t, runlength, total, "total output of shuffle is not equal to input") } } }() randomPoints(runlength, in) fixture.exit <- true dotest <- true }
func TestModelMetricPassing(t *testing.T) { if testing.Short() { t.Skip("skipping heapster model integration test.") } assert := assert.New(t) resolution := 2 * time.Second sources := []source_api.Source{newTestSource()} cache := cache.NewCache(time.Hour, time.Hour) assert.NotNil(cache) sinkManager, err := sinks.NewExternalSinkManager([]sink_api.ExternalSink{}, cache, resolution) assert.NoError(err) manager, err := manager.NewManager(sources, sinkManager, resolution, time.Hour, cache, true, resolution, resolution) assert.NoError(err) start := time.Now() manager.Start() defer manager.Stop() time.Sleep(10 * time.Second) model := manager.GetModel() pods := model.GetPods(testNamespace) assert.Equal(podCount, len(pods)) metrics, _, err := model.GetPodMetric(model_api.PodMetricRequest{ NamespaceName: testNamespace, PodName: "pod-0", MetricRequest: model_api.MetricRequest{ Start: start, End: time.Time{}, MetricName: "cpu-usage", }, }) assert.NoError(err) //TODO: Expect more than 1 metric once #551 is fixed assert.NotEmpty(metrics) assert.InEpsilon(loadAverageMilli, metrics[0].Value, 50) }
func TestRateCalculator(t *testing.T) { key := core.PodContainerKey("ns1", "pod1", "c") now := time.Now() prev := &core.DataBatch{ Timestamp: now.Add(-time.Minute), MetricSets: map[string]*core.MetricSet{ key: { CreateTime: now.Add(-time.Hour), ScrapeTime: now.Add(-60 * time.Second), Labels: map[string]string{ core.LabelMetricSetType.Key: core.MetricSetTypePodContainer, }, MetricValues: map[string]core.MetricValue{ core.MetricCpuUsage.MetricDescriptor.Name: { ValueType: core.ValueInt64, MetricType: core.MetricCumulative, IntValue: 947130377781, }, core.MetricNetworkTxErrors.MetricDescriptor.Name: { ValueType: core.ValueInt64, MetricType: core.MetricCumulative, IntValue: 0, }, }, }, }, } current := &core.DataBatch{ Timestamp: now, MetricSets: map[string]*core.MetricSet{ key: { CreateTime: now.Add(-time.Hour), ScrapeTime: now, Labels: map[string]string{ core.LabelMetricSetType.Key: core.MetricSetTypePodContainer, }, MetricValues: map[string]core.MetricValue{ core.MetricCpuUsage.MetricDescriptor.Name: { ValueType: core.ValueInt64, MetricType: core.MetricCumulative, IntValue: 948071062732, }, core.MetricNetworkTxErrors.MetricDescriptor.Name: { ValueType: core.ValueInt64, MetricType: core.MetricCumulative, IntValue: 120, }, }, }, }, } procesor := NewRateCalculator(core.RateMetricsMapping) procesor.Process(prev) procesor.Process(current) ms := current.MetricSets[key] cpuRate := ms.MetricValues[core.MetricCpuUsageRate.Name] txeRate := ms.MetricValues[core.MetricNetworkTxErrorsRate.Name] assert.InEpsilon(t, 13, cpuRate.IntValue, 2) assert.InEpsilon(t, 2, txeRate.FloatValue, 0.1) }
// InEpsilon asserts that expected and actual have a relative error less than epsilon // // Returns whether the assertion was successful (true) or not (false). func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { t.FailNow() } }
func TestParseMonDump(t *testing.T) { dump, err := parseDump(monPerfDump) assert.NoError(t, err) assert.InEpsilon(t, 5678670180, (*dump)["cluster"]["osd_kb_used"], epsilon) assert.InEpsilon(t, 6866.540527000, (*dump)["paxos"]["store_state_latency.sum"], epsilon) }
func TestParseOsdDump(t *testing.T) { dump, err := parseDump(osdPerfDump) assert.NoError(t, err) assert.InEpsilon(t, 552132.109360000, (*dump)["filestore"]["commitcycle_interval.sum"], epsilon) assert.Equal(t, float64(0), (*dump)["mutex-FileJournal::finisher_lock"]["wait.avgcount"]) }
func Test_DeterministicTrigger_Time(t *testing.T) { trigger := triggers.NewDeterministicTrigger(1.0) time := trigger.Time(2.0) assert.InEpsilon(t, time, 3.0, 0.001) }
func TestSumWeighted(t *testing.T) { result, err := mino.FromPoints(weightedFib).Transform(transform.Sum{}) assert.Nil(t, err) assert.InEpsilon(t, 114.5, result.(float64), epsilon) }
func TestAverageWeighted(t *testing.T) { result, err := mino.FromPoints(weightedFib).Transform(transform.Average{}) assert.Nil(t, err) assert.InEpsilon(t, 8.4814, result.(float64), epsilon) }