Beispiel #1
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	c := cache.NewCache(*argCacheDuration, time.Minute)
	sources, err := newSources(c)
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil, c, *argSinkFrequency)
	if err != nil {
		return nil, nil, nil, err
	}

	// Spawn the Model Housekeeping goroutine even if the model is not enabled.
	// This will allow the model to be activated/deactivated in runtime.
	// Set the housekeeping period to 2 * argModelResolution + 25 sec
	// TODO(afein): select a more well-defined housekeeping interval
	modelDuration := 2 * *argModelResolution
	modelDuration = time.Time{}.Add(modelDuration).Add(25 * time.Second).Sub(time.Time{})
	if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
		modelDuration = *argCacheDuration
	}

	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution,
		modelDuration)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		return nil, nil, nil, err
	}

	manager.Start()
	return sources, sinkManager, manager, nil
}
Beispiel #2
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	c := cache.NewCache(*argCacheDuration, time.Minute)
	sources, err := newSources(c)
	if err != nil {
		// Do not fail heapster is source setup fails for any reason.
		glog.Errorf("failed to setup sinks - %v", err)
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil, c, *argSinkFrequency)
	if err != nil {
		return nil, nil, nil, err
	}

	// Spawn the Model Housekeeping goroutine even if the model is not enabled.
	// This will allow the model to be activated/deactivated in runtime.
	modelDuration := *argModelFrequency
	if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
		modelDuration = *argCacheDuration
	}

	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution,
		modelDuration)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		// Do not fail heapster is sink setup fails for any reason.
		glog.Errorf("failed to setup sinks - %v", err)
	}

	manager.Start()
	return sources, sinkManager, manager, nil
}
Beispiel #3
0
func doWork() ([]source_api.Source, sinks.ExternalSinkManager, manager.Manager, error) {
	c := cache.NewCache(*argCacheDuration, time.Minute)
	sources, err := newSources(c)
	if err != nil {
		return nil, nil, nil, err
	}
	sinkManager, err := sinks.NewExternalSinkManager(nil)
	if err != nil {
		return nil, nil, nil, err
	}
	manager, err := manager.NewManager(sources, sinkManager, *argStatsResolution, *argCacheDuration, c, *argUseModel, *argModelResolution, *argAlignStats)
	if err != nil {
		return nil, nil, nil, err
	}
	if err := manager.SetSinkUris(argSinks); err != nil {
		return nil, nil, nil, err
	}

	// Spawn the Model Housekeeping goroutine even if the model is not enabled.
	// This will allow the model to be activated/deactivated in runtime.
	modelDuration := 2 * *argModelResolution
	if (*argCacheDuration).Nanoseconds() < modelDuration.Nanoseconds() {
		modelDuration = *argCacheDuration
	}
	go util.Until(manager.HousekeepModel, modelDuration, util.NeverStop)

	go util.Until(manager.Housekeep, *argPollDuration, util.NeverStop)
	return sources, sinkManager, manager, nil
}
Beispiel #4
0
// TestUpdate tests the normal flows of Update.
// TestUpdate performs consecutive calls to Update with both empty and non-empty caches
func TestUpdate(t *testing.T) {
	var (
		cluster      = newRealModel(time.Minute)
		source_cache = cacheFactory()
		empty_cache  = cache.NewCache(24*time.Hour, time.Hour)
		zeroTime     = time.Time{}
		assert       = assert.New(t)
		require      = require.New(t)
	)

	// Invocation with empty cache
	assert.NoError(cluster.Update(empty_cache))
	assert.Empty(cluster.Nodes)
	assert.Empty(cluster.Namespaces)
	assert.Empty(cluster.Metrics)

	// Invocation with regular parameters
	assert.NoError(cluster.Update(source_cache))
	verifyCacheFactoryCluster(&cluster.ClusterInfo, t)

	// Assert Node Metric aggregation
	require.NotEmpty(cluster.Nodes)
	require.NotEmpty(cluster.Metrics)
	require.NotNil(cluster.Metrics[memWorking])
	mem_work_ts := *(cluster.Metrics[memWorking])
	actual := mem_work_ts.Hour.Get(zeroTime, zeroTime)
	require.Len(actual, 6)
	// Datapoint present in both nodes,

	assert.Equal(actual[0].Value, uint64(602+602))
	assert.Equal(actual[1].Value, 2*memWorkingEpsilon)
	assert.Equal(actual[5].Value, 2*memWorkingEpsilon)

	require.NotNil(cluster.Metrics[memUsage])
	mem_usage_ts := *(cluster.Metrics[memUsage])
	actual = mem_usage_ts.Hour.Get(zeroTime, zeroTime)
	require.Len(actual, 6)
	// Datapoint present in only one node, second node's metric is extended
	assert.Equal(actual[0].Value, uint64(10000))
	// Datapoint present in both nodes, added up to 10000
	assert.Equal(actual[1].Value, 2*memWorkingEpsilon)

	// Assert Kubernetes Metric aggregation up to namespaces
	ns := cluster.Namespaces["test"]
	mem_work_ts = *(ns.Metrics[memWorking])
	actual = mem_work_ts.Hour.Get(zeroTime, zeroTime)
	require.Len(actual, 8)
	assert.Equal(actual[0].Value, uint64(2408))

	// Invocation with no fresh data - expect no change in cluster
	assert.NoError(cluster.Update(source_cache))
	verifyCacheFactoryCluster(&cluster.ClusterInfo, t)

	// Invocation with empty cache - expect no change in cluster
	assert.NoError(cluster.Update(empty_cache))
	verifyCacheFactoryCluster(&cluster.ClusterInfo, t)
}
Beispiel #5
0
func TestSyncLastUpdated(t *testing.T) {
	as := assert.New(t)
	s1 := &DummySink{}
	c := cache.NewCache(time.Hour, time.Minute)
	m, err := newExternalSinkManager([]sink_api.ExternalSink{s1}, c, time.Microsecond)
	as.Nil(err)
	var (
		pods                                        []source_api.Pod
		containers                                  []source_api.Container
		events                                      []*cache.Event
		expectedESync, expectedPSync, expectedNSync time.Time
	)
	f := fuzz.New().NumElements(10, 10).NilChance(0)
	f.Fuzz(&pods)
	now := time.Now()
	for pidx := range pods {
		for cidx := range pods[pidx].Containers {
			for sidx := range pods[pidx].Containers[cidx].Stats {
				ts := now.Add(time.Duration(sidx) * time.Minute)
				pods[pidx].Containers[cidx].Stats[sidx].Timestamp = ts
				expectedPSync = hUtil.GetLatest(expectedPSync, ts)
			}
		}
	}
	f.Fuzz(&containers)
	for cidx := range containers {
		for sidx := range containers[cidx].Stats {
			ts := now.Add(time.Duration(sidx) * time.Minute)
			containers[cidx].Stats[sidx].Timestamp = ts
			expectedNSync = hUtil.GetLatest(expectedNSync, ts)
		}
	}
	f.Fuzz(&events)
	for eidx := range events {
		ts := now.Add(time.Duration(eidx) * time.Minute)
		events[eidx].LastUpdate = ts
		events[eidx].UID = fmt.Sprintf("id:%d", eidx)
		expectedESync = hUtil.GetLatest(expectedESync, ts)
	}
	err = c.StorePods(pods)
	if err != nil {
		glog.Fatalf("Failed to store pods %v", err)
	}
	err = c.StoreContainers(containers)
	if err != nil {
		glog.Fatalf("Failed to store containers %v", err)
	}
	err = c.StoreEvents(events)
	if err != nil {
		glog.Fatalf("Failed to store events %v", err)
	}
	m.store()
	as.Equal(m.lastSync.eventsSync, expectedESync, "Event now: %v, eventSync: %v, expected: %v", now, m.lastSync.eventsSync, expectedESync)
	as.Equal(m.lastSync.podSync, expectedPSync, "Pod now: %v, podSync: %v, expected: %v", now, m.lastSync.podSync, expectedPSync)
	as.Equal(m.lastSync.nodeSync, expectedNSync, "Node now: %v, nodeSync: %v, expected: %v", now, m.lastSync.nodeSync, expectedNSync)
}
Beispiel #6
0
// TestUpdate tests the normal flows of Update.
// TestUpdate performs consecutive calls to Update with both empty and non-empty caches
func TestUpdate(t *testing.T) {
	var (
		cluster      = newRealCluster(newTimeStore, time.Minute)
		source_cache = cacheFactory()
		assert       = assert.New(t)
		empty_cache  = cache.NewCache(24*time.Hour, time.Hour)
		zeroTime     = time.Time{}
	)

	// Invocation with empty cache
	assert.NoError(cluster.Update(empty_cache))
	assert.Empty(cluster.Nodes)
	assert.Empty(cluster.Namespaces)
	assert.Empty(cluster.Metrics)

	// Invocation with regular parameters
	assert.NoError(cluster.Update(source_cache))
	verifyCacheFactoryCluster(&cluster.ClusterInfo, t)

	// Assert Node Metric aggregation
	assert.NotEmpty(cluster.Nodes)
	assert.NotEmpty(cluster.Metrics)
	assert.NotNil(cluster.Metrics[memWorking])
	mem_work_ts := *(cluster.Metrics[memWorking])
	actual := mem_work_ts.Get(zeroTime, zeroTime)
	assert.Len(actual, 2)
	// Datapoint present in both nodes, added up to 1024
	assert.Equal(actual[1].Value.(uint64), uint64(1204))
	// Datapoint present in only one node
	assert.Equal(actual[0].Value.(uint64), uint64(602))

	assert.NotNil(cluster.Metrics[memUsage])
	mem_usage_ts := *(cluster.Metrics[memUsage])
	actual = mem_usage_ts.Get(zeroTime, zeroTime)
	assert.Len(actual, 2)
	// Datapoint present in both nodes, added up to 10000
	assert.Equal(actual[1].Value.(uint64), uint64(10000))
	// Datapoint present in only one node
	assert.Equal(actual[0].Value.(uint64), uint64(5000))

	// Assert Kubernetes Metric aggregation up to namespaces
	ns := cluster.Namespaces["test"]
	mem_work_ts = *(ns.Metrics[memWorking])
	actual = mem_work_ts.Get(zeroTime, zeroTime)
	assert.Len(actual, 1)
	assert.Equal(actual[0].Value.(uint64), uint64(2408))

	// Invocation with no fresh data - expect no change in cluster
	assert.NoError(cluster.Update(source_cache))
	verifyCacheFactoryCluster(&cluster.ClusterInfo, t)

	// Invocation with empty cache - expect no change in cluster
	assert.NoError(cluster.Update(empty_cache))
	verifyCacheFactoryCluster(&cluster.ClusterInfo, t)
}
Beispiel #7
0
func TestEventsBasic(t *testing.T) {
	handler := util.FakeHandler{
		StatusCode:   200,
		RequestBody:  "something",
		ResponseBody: body(&kube_api.EventList{}),
		T:            t,
	}
	server := httptest.NewServer(&handler)
	defer server.Close()
	client := client.NewOrDie(&client.Config{Host: server.URL, Version: testapi.Version()})
	cache := cache.NewCache(time.Hour, time.Hour)
	source := NewKubeEvents(client, cache)
	_, err := source.GetInfo(time.Now(), time.Now().Add(time.Minute), time.Second)
	require.NoError(t, err)
	require.NotEmpty(t, source.DebugInfo())
}
Beispiel #8
0
func TestSetSinksStore(t *testing.T) {
	as := assert.New(t)
	s1 := &DummySink{}
	c := cache.NewCache(time.Hour, time.Minute)
	m, err := newExternalSinkManager([]sink_api.ExternalSink{s1}, c, time.Microsecond)
	as.Nil(err)
	as.Equal(0, s1.StoredTimeseries)
	as.Equal(0, s1.StoredEvents)
	var (
		pods       []source_api.Pod
		containers []source_api.Container
		events     []*cache.Event
	)
	f := fuzz.New().NumElements(1, 1).NilChance(0)
	f.Fuzz(&pods)
	f.Fuzz(&containers)
	f.Fuzz(&events)
	c.StorePods(pods)
	c.StoreContainers(containers)
	c.StoreEvents(events)
	m.sync()
	as.Equal(1, s1.StoredTimeseries)
	as.Equal(1, s1.StoredEvents)
	err = m.SetSinks([]sink_api.ExternalSink{})
	as.Nil(err)
	m.sync()
	as.Equal(1, s1.StoredTimeseries)
	as.Equal(1, s1.StoredEvents)

	err = m.SetSinks([]sink_api.ExternalSink{s1})
	as.Equal(1, s1.StoredTimeseries)
	as.Equal(1, s1.StoredEvents)
	as.Nil(err)
	f.Fuzz(&pods)
	f.Fuzz(&containers)
	f.Fuzz(&events)
	c.StorePods(pods)
	c.StoreContainers(containers)
	c.StoreEvents(events)
	m.sync()
	time.Sleep(time.Second)
	as.Equal(2, s1.StoredTimeseries)
	as.Equal(2, s1.StoredEvents)
}
Beispiel #9
0
func TestModelMetricPassing(t *testing.T) {
	if testing.Short() {
		t.Skip("skipping heapster model integration test.")
	}
	assert := assert.New(t)
	resolution := 2 * time.Second

	sources := []source_api.Source{newTestSource()}
	cache := cache.NewCache(time.Hour, time.Hour)
	assert.NotNil(cache)
	sinkManager, err := sinks.NewExternalSinkManager([]sink_api.ExternalSink{}, cache, resolution)
	assert.NoError(err)

	manager, err := manager.NewManager(sources, sinkManager, resolution, time.Hour, cache, true, resolution, resolution)
	assert.NoError(err)
	start := time.Now()

	manager.Start()
	defer manager.Stop()
	time.Sleep(10 * time.Second)

	model := manager.GetModel()
	pods := model.GetPods(testNamespace)
	assert.Equal(podCount, len(pods))

	metrics, _, err := model.GetPodMetric(model_api.PodMetricRequest{
		NamespaceName: testNamespace,
		PodName:       "pod-0",
		MetricRequest: model_api.MetricRequest{
			Start:      start,
			End:        time.Time{},
			MetricName: "cpu-usage",
		},
	})
	assert.NoError(err)
	//TODO: Expect more than 1 metric once #551 is fixed
	assert.NotEmpty(metrics)
	assert.InEpsilon(loadAverageMilli, metrics[0].Value, 50)
}
Beispiel #10
0
// cacheFactory generates a cache with a predetermined structure.
// The cache contains two pods, one with two containers and one without any containers.
// The cache also contains a free container and a "machine"-tagged container.
func cacheFactory() cache.Cache {
	source_cache := cache.NewCache(24*time.Hour, time.Hour)

	// Generate Container CMEs - same timestamp for aggregation
	cme_1 := cmeFactory()
	cme_2 := cmeFactory()
	cme_2.Stats.Timestamp = cme_1.Stats.Timestamp

	// Genete Machine CMEs - same timestamp for aggregation
	cme_3 := cmeFactory()
	cme_4 := cmeFactory()
	cme_4.Stats.Timestamp = cme_3.Stats.Timestamp

	cme_5 := cmeFactory()
	cme_5.Stats.Timestamp = cme_4.Stats.Timestamp.Add(time.Hour)
	cme_5.Stats.Cpu.Usage.Total = cme_4.Stats.Cpu.Usage.Total + uint64(3600000000000)

	// Generate a pod with two containers, and a pod without any containers
	container1 := source_api.Container{
		Name:     "container1",
		Hostname: "hostname2",
		Spec:     *cme_1.Spec,
		Stats:    []*cadvisor.ContainerStats{cme_1.Stats},
	}
	container2 := source_api.Container{
		Name:     "container2",
		Hostname: "hostname3",
		Spec:     *cme_2.Spec,
		Stats:    []*cadvisor.ContainerStats{cme_2.Stats},
	}

	containers := []source_api.Container{container1, container2}
	pods := []source_api.Pod{
		{
			PodMetadata: source_api.PodMetadata{
				Name:      "pod1",
				ID:        "123",
				Namespace: "test",
				Hostname:  "hostname2",
				Status:    "Running",
			},
			Containers: containers,
		},
		{
			PodMetadata: source_api.PodMetadata{
				Name:      "pod2",
				ID:        "1234",
				Namespace: "test",
				Hostname:  "hostname3",
				Status:    "Running",
			},
			Containers: containers,
		},
	}

	// Generate two machine containers
	machine_container := source_api.Container{
		Name:     "/",
		Hostname: "hostname2",
		Spec:     *cme_3.Spec,
		Stats:    []*cadvisor.ContainerStats{cme_3.Stats},
	}
	machine_container2 := source_api.Container{
		Name:     "/",
		Hostname: "hostname3",
		Spec:     *cme_4.Spec,
		Stats:    []*cadvisor.ContainerStats{cme_5.Stats, cme_4.Stats},
	}
	// Generate a free container
	free_container := source_api.Container{
		Name:     "free_container1",
		Hostname: "hostname2",
		Spec:     *cme_5.Spec,
		Stats:    []*cadvisor.ContainerStats{cme_5.Stats},
	}

	other_containers := []source_api.Container{
		machine_container,
		machine_container2,
		free_container,
	}

	// Enter everything in the cache
	source_cache.StorePods(pods)
	source_cache.StoreContainers(other_containers)

	return source_cache
}
Beispiel #11
0
// cacheFactory generates a cache with a predetermined structure.
// The cache contains 2 pods, one with two containers and one without any containers.
// The cache also contains a free container and a "machine"-tagged container.
func cacheFactory() cache.Cache {
	source_cache := cache.NewCache(10*time.Minute, time.Minute)

	// Generate Container CMEs - same timestamp for aggregation
	cme_1 := cmeFactory()
	cme_2 := cmeFactory()
	cme_2.Stats.Timestamp = cme_1.Stats.Timestamp
	cme_2.Stats.Cpu.Usage.Total = cme_1.Stats.Cpu.Usage.Total

	// Generate a flush CME for cme_1 and cme_2
	cme_2flush := cmeFactory()
	cme_2flush.Stats.Timestamp = cme_1.Stats.Timestamp.Add(time.Minute)

	// Genete Machine CMEs - same timestamp for aggregation
	cme_3 := cmeFactory()
	cme_4 := cmeFactory()
	cme_3.Stats.Timestamp = cme_1.Stats.Timestamp.Add(2 * time.Minute)
	cme_4.Stats.Timestamp = cme_3.Stats.Timestamp
	cme_3.Stats.Memory.WorkingSet = 602
	cme_4.Stats.Memory.WorkingSet = 1062

	// Generate a flush CME for cme_3 and cme_4
	cme_4flush := cmeFactory()
	cme_4flush.Stats.Timestamp = cme_4.Stats.Timestamp.Add(time.Minute)
	cme_4flush.Stats.Cpu.Usage.Total = cme_4.Stats.Cpu.Usage.Total + uint64(360000000000)

	// Genete a generic container further than one resolution in the future
	cme_5 := cmeFactory()
	cme_5.Stats.Timestamp = cme_4.Stats.Timestamp.Add(4 * time.Minute)
	cme_5.Stats.Cpu.Usage.Total = cme_4.Stats.Cpu.Usage.Total + uint64(4*360000000000)

	// Generate a flush CME for cme_5 and cme_4
	cme_5flush := cmeFactory()
	cme_5flush.Stats.Timestamp = cme_5.Stats.Timestamp.Add(time.Minute)
	cme_5flush.Stats.Cpu.Usage.Total = cme_5.Stats.Cpu.Usage.Total + uint64(360000000000)

	// Generate a pod with two containers, and a pod without any containers
	container1 := source_api.Container{
		Name:     "container1",
		Hostname: "hostname2",
		Spec:     *cme_1.Spec,
		Stats:    []*source_api.ContainerStats{cme_2flush.Stats, cme_1.Stats},
	}
	container2 := source_api.Container{
		Name:     "container2",
		Hostname: "hostname3",
		Spec:     *cme_2.Spec,
		Stats:    []*source_api.ContainerStats{cme_2flush.Stats, cme_2.Stats},
	}

	containers := []source_api.Container{container1, container2}
	pods := []source_api.Pod{
		{
			PodMetadata: source_api.PodMetadata{
				Name:      "pod1",
				ID:        "123",
				Namespace: "test",
				Hostname:  "hostname2",
				Status:    "Running",
			},
			Containers: containers,
		},
		{
			PodMetadata: source_api.PodMetadata{
				Name:      "pod2",
				ID:        "1234",
				Namespace: "test",
				Hostname:  "hostname3",
				Status:    "Running",
			},
			Containers: containers,
		},
	}

	// Generate two machine containers
	machine_container := source_api.Container{
		Name:     "/",
		Hostname: "hostname2",
		Spec:     *cme_3.Spec,
		Stats:    []*source_api.ContainerStats{cme_4flush.Stats, cme_3.Stats},
	}
	machine_container2 := source_api.Container{
		Name:     "/",
		Hostname: "hostname3",
		Spec:     *cme_4.Spec,
		Stats:    []*source_api.ContainerStats{cme_5flush.Stats, cme_5.Stats, cme_4.Stats},
	}
	// Generate a free container
	free_container := source_api.Container{
		Name:     "free_container1",
		Hostname: "hostname2",
		Spec:     *cme_5.Spec,
		Stats:    []*source_api.ContainerStats{cme_5flush.Stats, cme_5.Stats},
	}

	other_containers := []source_api.Container{
		machine_container,
		machine_container2,
		free_container,
	}

	// Enter everything in the cache
	source_cache.StorePods(pods)
	source_cache.StoreContainers(other_containers)

	return source_cache
}
Beispiel #12
0
func TestSetSinksStore(t *testing.T) {
	as := assert.New(t)
	s1 := &DummySink{}
	c := cache.NewCache(time.Hour, time.Minute)
	m, err := newExternalSinkManager([]sink_api.ExternalSink{s1}, c, time.Microsecond)
	as.Nil(err)
	as.Equal(0, s1.StoredTimeseries)
	as.Equal(0, s1.StoredEvents)
	var (
		pods       []source_api.Pod
		containers []source_api.Container
		events     []*cache.Event
	)
	f := fuzz.New().NumElements(1, 1).NilChance(0)

	time1 := time.Now()
	f.Fuzz(&pods)
	for pidx := range pods {
		for cidx := range pods[pidx].Containers {
			for sidx := range pods[pidx].Containers[cidx].Stats {
				pods[pidx].Containers[cidx].Stats[sidx].Timestamp = time1
			}
		}
	}
	f.Fuzz(&containers)
	for cidx := range containers {
		for sidx := range containers[cidx].Stats {
			containers[cidx].Stats[sidx].Timestamp = time1
		}
	}
	f.Fuzz(&events)
	for eidx := range events {
		events[eidx].LastUpdate = time1
		events[eidx].UID = fmt.Sprintf("id1:%d", eidx)
	}

	if err := c.StorePods(pods); err != nil {
		glog.Fatalf("Failed to store pods: %v", err)
	}
	if err := c.StoreContainers(containers); err != nil {
		glog.Fatalf("Failed to store containers: %v", err)
	}
	if err = c.StoreEvents(events); err != nil {
		glog.Fatalf("Failed to store events: %v", err)
	}
	m.sync()
	as.Equal(1, s1.StoredTimeseries)
	as.Equal(1, s1.StoredEvents)
	err = m.SetSinks([]sink_api.ExternalSink{})
	as.Nil(err)
	m.sync()
	as.Equal(1, s1.StoredTimeseries)
	as.Equal(1, s1.StoredEvents)
	err = m.SetSinks([]sink_api.ExternalSink{s1})
	as.Equal(1, s1.StoredTimeseries)
	as.Equal(1, s1.StoredEvents)
	as.Nil(err)

	time2 := time.Now()
	f.Fuzz(&pods)
	for pidx := range pods {
		for cidx := range pods[pidx].Containers {
			for sidx := range pods[pidx].Containers[cidx].Stats {
				pods[pidx].Containers[cidx].Stats[sidx].Timestamp = time2
			}
		}
	}
	f.Fuzz(&containers)
	for cidx := range containers {
		for sidx := range containers[cidx].Stats {
			containers[cidx].Stats[sidx].Timestamp = time2
		}
	}
	f.Fuzz(&events)
	for eidx := range events {
		events[eidx].LastUpdate = time1
		events[eidx].UID = fmt.Sprintf("id2:%d", eidx)
	}

	if err := c.StorePods(pods); err != nil {
		glog.Fatalf("Failed to store pods: %v", err)
	}
	if err := c.StoreContainers(containers); err != nil {
		glog.Fatalf("Failed to store containers: %v", err)
	}
	if err = c.StoreEvents(events); err != nil {
		glog.Fatalf("Failed to store events: %v", err)
	}
	m.sync()
	as.Equal(2, s1.StoredTimeseries)
	as.Equal(2, s1.StoredEvents)
}