コード例 #1
0
ファイル: utils_test.go プロジェクト: bmanas/amazon-ecs-agent
func TestRetryWithBackoff(t *testing.T) {
	test_time := ttime.NewTestTime()
	test_time.LudicrousSpeed(true)
	ttime.SetTime(test_time)

	start := ttime.Now()

	counter := 3
	RetryWithBackoff(NewSimpleBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), func() error {
		if counter == 0 {
			return nil
		}
		counter--
		return errors.New("err")
	})
	if counter != 0 {
		t.Error("Counter didn't go to 0; didn't get retried enough")
	}
	testTime := ttime.Since(start)

	if testTime.Seconds() < .29 || testTime.Seconds() > .31 {
		t.Error("Retry didn't backoff for as long as expected")
	}

	start = ttime.Now()
	RetryWithBackoff(NewSimpleBackoff(10*time.Second, 20*time.Second, 0, 2), func() error {
		return NewRetriableError(NewRetriable(false), errors.New("can't retry"))
	})

	if ttime.Since(start).Seconds() > .1 {
		t.Error("Retry for the trivial function took too long")
	}
}
コード例 #2
0
func TestPullImageECRSuccess(t *testing.T) {
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()
	mockDocker := mock_dockeriface.NewMockClient(ctrl)
	mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
	factory := mock_dockerclient.NewMockFactory(ctrl)
	factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
	client, _ := NewDockerGoClient(factory, "", config.NewSensitiveRawMessage([]byte{}), false)
	goClient, _ := client.(*dockerGoClient)
	ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl)
	ecrClient := mock_ecr.NewMockECRSDK(ctrl)
	goClient.ecrClientFactory = ecrClientFactory
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)

	registryId := "123456789012"
	region := "eu-west-1"
	endpointOverride := "my.endpoint"
	authData := &api.RegistryAuthenticationData{
		Type: "ecr",
		ECRAuthData: &api.ECRAuthData{
			RegistryId:       registryId,
			Region:           region,
			EndpointOverride: endpointOverride,
		},
	}
	imageEndpoint := "registry.endpoint"
	image := imageEndpoint + "/myimage:tag"
	username := "******"
	password := "******"
	dockerAuthConfiguration := docker.AuthConfiguration{
		Username:      username,
		Password:      password,
		ServerAddress: "https://" + imageEndpoint,
	}
	getAuthorizationTokenInput := &ecrapi.GetAuthorizationTokenInput{
		RegistryIds: []*string{aws.String(registryId)},
	}

	ecrClientFactory.EXPECT().GetClient(region, endpointOverride).Return(ecrClient)
	ecrClient.EXPECT().GetAuthorizationToken(getAuthorizationTokenInput).Return(
		&ecrapi.GetAuthorizationTokenOutput{
			AuthorizationData: []*ecrapi.AuthorizationData{
				&ecrapi.AuthorizationData{
					ProxyEndpoint:      aws.String("https://" + imageEndpoint),
					AuthorizationToken: aws.String(base64.StdEncoding.EncodeToString([]byte(username + ":" + password))),
				},
			},
		}, nil)

	mockDocker.EXPECT().PullImage(
		&pullImageOptsMatcher{image},
		dockerAuthConfiguration,
	).Return(nil)

	metadata := client.PullImage(image, authData)
	if metadata.Error != nil {
		t.Error("Expected pull to succeed")
	}
}
コード例 #3
0
func dockerclientSetup(t *testing.T) (*mock_dockerclient.MockClient, *DockerGoClient, *ttime.TestTime, func()) {
	ctrl := gomock.NewController(t)
	mockDocker := mock_dockerclient.NewMockClient(ctrl)
	client := &DockerGoClient{}
	client.SetGoDockerClient(mockDocker)
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)
	return mockDocker, client, testTime, ctrl.Finish
}
コード例 #4
0
func dockerclientSetup(t *testing.T) (*mock_dockeriface.MockClient, *DockerGoClient, *ttime.TestTime, func()) {
	ctrl := gomock.NewController(t)
	mockDocker := mock_dockeriface.NewMockClient(ctrl)
	mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
	factory := mock_dockerclient.NewMockFactory(ctrl)
	factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
	client, _ := NewDockerGoClient(factory)
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)
	return mockDocker, client, testTime, ctrl.Finish
}
コード例 #5
0
func setup(t *testing.T) (*gomock.Controller, ECSClient, *mock_http.MockRoundTripper) {
	ctrl := gomock.NewController(t)
	mockRoundTripper := mock_http.NewMockRoundTripper(ctrl)
	mockHttpClient := httpclient.New(1*time.Second, true)
	mockHttpClient.Transport.(httpclient.OverridableTransport).SetTransport(mockRoundTripper)
	client := NewECSClient(credentials.AnonymousCredentials, &config.Config{AWSRegion: "us-east-1"}, mockHttpClient)
	testTime := ttime.NewTestTime()
	testTime.LudicrousSpeed(true)
	ttime.SetTime(testTime)

	return ctrl, client, mockRoundTripper
}
コード例 #6
0
func dockerclientSetup(t *testing.T) (*mock_dockeriface.MockClient, *dockerGoClient, *ttime.TestTime, func()) {
	ctrl := gomock.NewController(t)
	mockDocker := mock_dockeriface.NewMockClient(ctrl)
	mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
	factory := mock_dockerclient.NewMockFactory(ctrl)
	factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
	client, _ := NewDockerGoClient(factory, "", config.NewSensitiveRawMessage([]byte{}), false)
	goClient, _ := client.(*dockerGoClient)
	ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl)
	goClient.ecrClientFactory = ecrClientFactory
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)
	return mockDocker, goClient, testTime, ctrl.Finish
}
コード例 #7
0
func TestStopWithPendingStops(t *testing.T) {
	ctrl, client, taskEngine := mocks(t, &config.Config{})
	defer ctrl.Finish()
	ttime.SetTime(dte_test_time)

	sleepTask1 := testdata.LoadTask("sleep5")
	sleepTask1.StartSequenceNumber = 5
	sleepTask2 := testdata.LoadTask("sleep5")
	sleepTask2.Arn = "arn2"

	eventStream := make(chan DockerContainerChangeEvent)

	client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)

	err := taskEngine.Init()
	if err != nil {
		t.Fatal(err)
	}
	taskEvents, contEvents := taskEngine.TaskEvents()
	go func() {
		for {
			<-taskEvents
		}
	}()
	go func() {
		for {
			<-contEvents
		}
	}()

	pulling := make(chan bool)
	client.EXPECT().PullImage(gomock.Any(), nil).Do(func(x, y interface{}) {
		<-pulling
	})
	taskEngine.AddTask(sleepTask2)
	stopSleep2 := *sleepTask2
	stopSleep2.DesiredStatus = api.TaskStopped
	stopSleep2.StopSequenceNumber = 4
	taskEngine.AddTask(&stopSleep2)

	taskEngine.AddTask(sleepTask1)
	stopSleep1 := *sleepTask1
	stopSleep1.DesiredStatus = api.TaskStopped
	stopSleep1.StopSequenceNumber = 5
	taskEngine.AddTask(&stopSleep1)
	pulling <- true
	// If we get here without deadlocking, we passed the test
}
コード例 #8
0
func setup(t *testing.T) TaskEngine {
	if testing.Short() {
		t.Skip("Skipping integ test in short mode")
	}
	if _, err := os.Stat("/var/run/docker.sock"); err != nil {
		t.Skip("Docker not running")
	}
	if os.Getenv("ECS_SKIP_ENGINE_INTEG_TEST") != "" {
		t.Skip("ECS_SKIP_ENGINE_INTEG_TEST")
	}
	initOnce.Do(func() {
		taskEngine.Init()
	})
	ttime.SetTime(test_time)
	return taskEngine
}
コード例 #9
0
func TestErrorPropogatesUp(t *testing.T) {
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	mockGetter := mock_ec2.NewMockHttpClient(ctrl)
	testClient := ec2.NewEC2MetadataClient(mockGetter)
	testTime := ttime.NewTestTime()
	testTime.LudicrousSpeed(true)
	ttime.SetTime(testTime)

	mockGetter.EXPECT().Get(ec2.EC2_METADATA_SERVICE_URL+ec2.INSTANCE_IDENTITY_DOCUMENT_RESOURCE).Return(nil, errors.New("Something broke")).AnyTimes()

	_, err := testClient.InstanceIdentityDocument()
	if err == nil {
		t.Fatal("Expected error to result")
	}
}
コード例 #10
0
func setup(t *testing.T) (TaskEngine, func()) {
	if testing.Short() {
		t.Skip("Skipping integ test in short mode")
	}
	if _, err := os.Stat("/var/run/docker.sock"); err != nil {
		t.Skip("Docker not running")
	}
	if os.Getenv("ECS_SKIP_ENGINE_INTEG_TEST") != "" {
		t.Skip("ECS_SKIP_ENGINE_INTEG_TEST")
	}
	taskEngine := NewDockerTaskEngine(cfg)
	taskEngine.Init()
	ttime.SetTime(test_time)
	return taskEngine, func() {
		taskEngine.Shutdown()
	}
}
コード例 #11
0
ファイル: utils_test.go プロジェクト: bmanas/amazon-ecs-agent
func TestRetryNWithBackoff(t *testing.T) {
	test_time := ttime.NewTestTime()
	test_time.LudicrousSpeed(true)
	ttime.SetTime(test_time)

	start := ttime.Now()

	counter := 3
	err := RetryNWithBackoff(NewSimpleBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 2, func() error {
		counter--
		return errors.New("err")
	})
	if counter != 1 {
		t.Error("Should have stopped after two tries")
	}
	if err == nil {
		t.Error("Should have returned appropriate error")
	}
	testTime := ttime.Since(start)
	// Expect that it tried twice, sleeping once between them
	if testTime.Seconds() < 0.09 || testTime.Seconds() > 0.11 {
		t.Errorf("Retry didn't backoff for as long as expected: %v", testTime.Seconds())
	}

	start = ttime.Now()
	counter = 3
	err = RetryNWithBackoff(NewSimpleBackoff(100*time.Millisecond, 100*time.Millisecond, 0, 1), 5, func() error {
		counter--
		if counter == 0 {
			return nil
		}
		return errors.New("err")
	})
	testTime = ttime.Since(start)
	if counter != 0 {
		t.Errorf("Counter expected to be 0, was %v", counter)
	}
	if err != nil {
		t.Errorf("Expected no error, got %v", err)
	}
	// 3 tries; 2 backoffs
	if testTime.Seconds() < 0.190 || testTime.Seconds() > 0.210 {
		t.Errorf("Retry didn't backoff for as long as expected: %v", testTime.Seconds())
	}
}
コード例 #12
0
func TestPullImageECRAuthFail(t *testing.T) {
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()
	mockDocker := mock_dockeriface.NewMockClient(ctrl)
	mockDocker.EXPECT().Ping().AnyTimes().Return(nil)
	factory := mock_dockerclient.NewMockFactory(ctrl)
	factory.EXPECT().GetDefaultClient().AnyTimes().Return(mockDocker, nil)
	client, _ := NewDockerGoClient(factory, "", config.NewSensitiveRawMessage([]byte{}), false)
	goClient, _ := client.(*dockerGoClient)
	ecrClientFactory := mock_ecr.NewMockECRFactory(ctrl)
	ecrClient := mock_ecr.NewMockECRSDK(ctrl)
	goClient.ecrClientFactory = ecrClientFactory
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)

	registryId := "123456789012"
	region := "eu-west-1"
	endpointOverride := "my.endpoint"
	authData := &api.RegistryAuthenticationData{
		Type: "ecr",
		ECRAuthData: &api.ECRAuthData{
			RegistryId:       registryId,
			Region:           region,
			EndpointOverride: endpointOverride,
		},
	}
	imageEndpoint := "registry.endpoint"
	image := imageEndpoint + "/myimage:tag"

	ecrClientFactory.EXPECT().GetClient(region, endpointOverride).Return(ecrClient)
	ecrClient.EXPECT().GetAuthorizationToken(gomock.Any()).Return(&ecrapi.GetAuthorizationTokenOutput{}, errors.New("test error"))

	metadata := client.PullImage(image, authData)
	if metadata.Error == nil {
		t.Error("Expected pull to fail")
	}
}
コード例 #13
0
func TestRetriesOnError(t *testing.T) {
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()

	mockGetter := mock_ec2.NewMockHttpClient(ctrl)
	testClient := ec2.NewEC2MetadataClient(mockGetter)
	testTime := ttime.NewTestTime()
	testTime.LudicrousSpeed(true)
	ttime.SetTime(testTime)

	gomock.InOrder(
		mockGetter.EXPECT().Get(ec2.EC2_METADATA_SERVICE_URL+ec2.INSTANCE_IDENTITY_DOCUMENT_RESOURCE).Return(nil, errors.New("Something broke")),
		mockGetter.EXPECT().Get(ec2.EC2_METADATA_SERVICE_URL+ec2.INSTANCE_IDENTITY_DOCUMENT_RESOURCE).Return(testErrorResponse()),
		mockGetter.EXPECT().Get(ec2.EC2_METADATA_SERVICE_URL+ec2.INSTANCE_IDENTITY_DOCUMENT_RESOURCE).Return(testSuccessResponse(testInstanceIdentityDoc)),
	)

	doc, err := testClient.InstanceIdentityDocument()
	if err != nil {
		t.Fatal("Expected to be able to get doc")
	}
	if doc.Region != "us-east-1" {
		t.Error("Wrong region; expected us-east-1 but got " + doc.Region)
	}
}
コード例 #14
0
func TestHeartbeatOnlyWhenIdle(t *testing.T) {
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()
	taskEngine := engine.NewMockTaskEngine(ctrl)
	ecsclient := mock_api.NewMockECSClient(ctrl)
	statemanager := statemanager.NewNoopStateManager()

	closeWS := make(chan bool)
	server, serverIn, requestsChan, errChan, err := startMockAcsServer(t, closeWS)
	defer close(serverIn)

	go func() {
		for {
			<-requestsChan
		}
	}()
	if err != nil {
		t.Fatal(err)
	}

	// We're testing that it does not reconnect here; must be the case
	ecsclient.EXPECT().DiscoverPollEndpoint("myArn").Return(server.URL, nil).Times(1)
	taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes()

	ctx, cancel := context.WithCancel(context.Background())
	ended := make(chan bool, 1)
	go func() {
		handler.StartSession(ctx, handler.StartSessionArguments{
			ContainerInstanceArn: "myArn",
			CredentialProvider:   credentials.AnonymousCredentials,
			Config:               &config.Config{Cluster: "someCluster"},
			TaskEngine:           taskEngine,
			ECSClient:            ecsclient,
			StateManager:         statemanager,
			AcceptInvalidCert:    true,
		})
		ended <- true
	}()

	taskAdded := make(chan bool)
	taskEngine.EXPECT().AddTask(gomock.Any()).Do(func(interface{}) {
		taskAdded <- true
	}).Times(10)
	for i := 0; i < 10; i++ {
		serverIn <- samplePayloadMessage
		testTime.Warp(1 * time.Minute)
		<-taskAdded
	}

	select {
	case <-ended:
		t.Fatal("Should not have stop session")
	case err := <-errChan:
		t.Fatal("Error should not have been returned from server", err)
	default:
	}
	go server.Close()
	cancel()
	<-ended
}
コード例 #15
0
func TestBatchContainerHappyPath(t *testing.T) {
	ctrl, client, taskEngine := mocks(t, &defaultConfig)
	defer ctrl.Finish()
	ttime.SetTime(dte_test_time)

	sleepTask := testdata.LoadTask("sleep5")

	eventStream := make(chan DockerContainerChangeEvent)
	eventsReported := sync.WaitGroup{}

	dockerEvent := func(status api.ContainerStatus) DockerContainerChangeEvent {
		meta := DockerContainerMetadata{
			DockerId: "containerId",
		}
		return DockerContainerChangeEvent{Status: status, DockerContainerMetadata: meta}
	}

	client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
	for _, container := range sleepTask.Containers {
		client.EXPECT().PullImage(container.Image, nil).Return(DockerContainerMetadata{})

		dockerConfig, err := sleepTask.DockerConfig(container)
		if err != nil {
			t.Fatal(err)
		}
		client.EXPECT().CreateContainer(dockerConfig, gomock.Any(), gomock.Any()).Do(func(x, y, z interface{}) {
			eventsReported.Add(1)
			go func() {
				eventStream <- dockerEvent(api.ContainerCreated)
				eventsReported.Done()
			}()
		}).Return(DockerContainerMetadata{DockerId: "containerId"})

		client.EXPECT().StartContainer("containerId").Do(func(id string) {
			eventsReported.Add(1)
			go func() {
				eventStream <- dockerEvent(api.ContainerRunning)
				eventsReported.Done()
			}()
		}).Return(DockerContainerMetadata{DockerId: "containerId"})
	}

	err := taskEngine.Init()
	taskEvents, contEvents := taskEngine.TaskEvents()
	if err != nil {
		t.Fatal(err)
	}

	taskEngine.AddTask(sleepTask)

	if (<-contEvents).Status != api.ContainerRunning {
		t.Fatal("Expected container to run first")
	}
	if (<-taskEvents).Status != api.TaskRunning {
		t.Fatal("And then task")
	}
	select {
	case <-taskEvents:
		t.Fatal("Should be out of events")
	case <-contEvents:
		t.Fatal("Should be out of events")
	default:
	}
	eventsReported.Wait()
	// Wait for all events to be consumed prior to moving it towards stopped; we
	// don't want to race the below with these or we'll end up with the "going
	// backwards in state" stop and we haven't 'expect'd for that

	exitCode := 0
	// And then docker reports that sleep died, as sleep is wont to do
	eventStream <- DockerContainerChangeEvent{Status: api.ContainerStopped, DockerContainerMetadata: DockerContainerMetadata{DockerId: "containerId", ExitCode: &exitCode}}

	if cont := <-contEvents; cont.Status != api.ContainerStopped {
		t.Fatal("Expected container to stop first")
		if *cont.ExitCode != 0 {
			t.Fatal("Exit code should be present")
		}
	}
	if (<-taskEvents).Status != api.TaskStopped {
		t.Fatal("And then task")
	}

	// Extra events should not block forever; duplicate acs and docker events are possible
	go func() { eventStream <- dockerEvent(api.ContainerStopped) }()
	go func() { eventStream <- dockerEvent(api.ContainerStopped) }()

	sleepTaskStop := testdata.LoadTask("sleep5")
	sleepTaskStop.DesiredStatus = api.TaskStopped
	taskEngine.AddTask(sleepTaskStop)
	// As above, duplicate events should not be a problem
	taskEngine.AddTask(sleepTaskStop)
	taskEngine.AddTask(sleepTaskStop)

	// Expect a bunch of steady state 'poll' describes when we warp 4 hours
	client.EXPECT().DescribeContainer(gomock.Any()).AnyTimes()
	client.EXPECT().RemoveContainer("containerId").Return(nil)

	dte_test_time.Warp(4 * time.Hour)
	go func() { eventStream <- dockerEvent(api.ContainerStopped) }()

	// Wait for the task to actually be dead; if we just fallthrough immediately,
	// the remove might not have happened (expectation failure)
	for {
		tasks, _ := taskEngine.(*DockerTaskEngine).ListTasks()
		if len(tasks) == 0 {
			break
		}
		time.Sleep(5 * time.Millisecond)
	}
}
コード例 #16
0
func TestSteadyStatePoll(t *testing.T) {
	ctrl, client, taskEngine := mocks(t, &config.Config{})
	defer ctrl.Finish()
	ttime.SetTime(dte_test_time)

	sleepTask := testdata.LoadTask("sleep5")

	eventStream := make(chan DockerContainerChangeEvent)

	dockerEvent := func(status api.ContainerStatus) DockerContainerChangeEvent {
		meta := DockerContainerMetadata{
			DockerId: "containerId",
		}
		return DockerContainerChangeEvent{Status: status, DockerContainerMetadata: meta}
	}

	client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
	for _, container := range sleepTask.Containers {

		client.EXPECT().PullImage(container.Image, nil).Return(DockerContainerMetadata{})

		dockerConfig, err := sleepTask.DockerConfig(container)
		if err != nil {
			t.Fatal(err)
		}
		client.EXPECT().CreateContainer(dockerConfig, gomock.Any(), gomock.Any()).Do(func(x, y, z interface{}) {
			go func() { eventStream <- dockerEvent(api.ContainerCreated) }()
		}).Return(DockerContainerMetadata{DockerId: "containerId"})

		client.EXPECT().StartContainer("containerId").Do(func(id string) {
			go func() { eventStream <- dockerEvent(api.ContainerRunning) }()
		}).Return(DockerContainerMetadata{DockerId: "containerId"})
	}

	err := taskEngine.Init()
	taskEvents, contEvents := taskEngine.TaskEvents()
	if err != nil {
		t.Fatal(err)
	}

	taskEngine.AddTask(sleepTask)

	if (<-contEvents).Status != api.ContainerRunning {
		t.Fatal("Expected container to run first")
	}
	if (<-taskEvents).Status != api.TaskRunning {
		t.Fatal("And then task")
	}
	select {
	case <-taskEvents:
		t.Fatal("Should be out of events")
	case <-contEvents:
		t.Fatal("Should be out of events")
	default:
	}

	// Two steady state oks, one stop
	gomock.InOrder(
		client.EXPECT().DescribeContainer("containerId").Return(api.ContainerRunning, DockerContainerMetadata{DockerId: "containerId"}).Times(2),
		client.EXPECT().DescribeContainer("containerId").Return(api.ContainerStopped, DockerContainerMetadata{DockerId: "containerId"}),
	)
	// Due to how the mock time works, we actually have to warp 10 minutes per
	// steady-state event. That's 10 per timeout + 10 per describe * 1 container.
	// The reason for this is the '.After' call happens regardless of whether the
	// value gets read, and the test sleep will add that time to elapsed, even
	// though in real-time-units that much time would not have elapsed.
	dte_test_time.Warp(60 * time.Minute)

	contEvent := <-contEvents
	if contEvent.Status != api.ContainerStopped {
		t.Error("Expected container to be stopped")
	}
	if (<-taskEvents).Status != api.TaskStopped {
		t.Fatal("And then task")
	}
	select {
	case <-taskEvents:
		t.Fatal("Should be out of events")
	case <-contEvents:
		t.Fatal("Should be out of events")
	default:
	}
}
コード例 #17
0
func TestStartTimeoutThenStart(t *testing.T) {
	ctrl, client, taskEngine := mocks(t, &defaultConfig)
	defer ctrl.Finish()
	ttime.SetTime(dte_test_time)

	sleepTask := testdata.LoadTask("sleep5")

	eventStream := make(chan DockerContainerChangeEvent)

	dockerEvent := func(status api.ContainerStatus) DockerContainerChangeEvent {
		meta := DockerContainerMetadata{
			DockerId: "containerId",
		}
		return DockerContainerChangeEvent{Status: status, DockerContainerMetadata: meta}
	}

	client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
	for _, container := range sleepTask.Containers {

		client.EXPECT().PullImage(container.Image, nil).Return(DockerContainerMetadata{})

		dockerConfig, err := sleepTask.DockerConfig(container)
		if err != nil {
			t.Fatal(err)
		}
		client.EXPECT().CreateContainer(dockerConfig, gomock.Any(), gomock.Any()).Do(func(x, y, z interface{}) {
			go func() { eventStream <- dockerEvent(api.ContainerCreated) }()
		}).Return(DockerContainerMetadata{DockerId: "containerId"})

		client.EXPECT().StartContainer("containerId").Return(DockerContainerMetadata{Error: &DockerTimeoutError{}})

		// Expect it to try to stop the container before going on;
		// in the future the agent might optimize to not stop unless the known
		// status is running, at which poitn this can be safeuly removed
		client.EXPECT().StopContainer("containerId").Return(DockerContainerMetadata{Error: errors.New("Cannot start")})
	}

	err := taskEngine.Init()
	taskEvents, contEvents := taskEngine.TaskEvents()
	if err != nil {
		t.Fatal(err)
	}

	taskEngine.AddTask(sleepTask)

	// Expect it to go to stopped
	contEvent := <-contEvents
	if contEvent.Status != api.ContainerStopped {
		t.Fatal("Expected container to timeout on start and stop")
	}
	*contEvent.SentStatus = api.ContainerStopped

	taskEvent := <-taskEvents
	if taskEvent.Status != api.TaskStopped {
		t.Fatal("And then task")
	}
	*taskEvent.SentStatus = api.TaskStopped
	select {
	case <-taskEvents:
		t.Fatal("Should be out of events")
	case <-contEvents:
		t.Fatal("Should be out of events")
	default:
	}

	// Expect it to try to stop it once now
	client.EXPECT().StopContainer("containerId").Return(DockerContainerMetadata{Error: errors.New("Cannot start")})
	// Now surprise surprise, it actually did start!
	eventStream <- dockerEvent(api.ContainerRunning)

	// However, if it starts again, we should not see it be killed; no additional expect
	eventStream <- dockerEvent(api.ContainerRunning)
	eventStream <- dockerEvent(api.ContainerRunning)

	select {
	case <-taskEvents:
		t.Fatal("Should be out of events")
	case ev := <-contEvents:
		t.Fatal("Should be out of events", ev)
	default:
	}
}
コード例 #18
0
func TestHandlerDoesntLeakGouroutines(t *testing.T) {
	ctrl := gomock.NewController(t)
	defer ctrl.Finish()
	taskEngine := engine.NewMockTaskEngine(ctrl)
	ecsclient := mock_api.NewMockECSClient(ctrl)
	statemanager := statemanager.NewNoopStateManager()
	testTime := ttime.NewTestTime()
	ttime.SetTime(testTime)

	closeWS := make(chan bool)
	server, serverIn, requests, errs, err := startMockAcsServer(t, closeWS)
	if err != nil {
		t.Fatal(err)
	}
	go func() {
		for {
			select {
			case <-requests:
			case <-errs:
			}
		}
	}()

	timesConnected := 0
	ecsclient.EXPECT().DiscoverPollEndpoint("myArn").Return(server.URL, nil).AnyTimes().Do(func(_ interface{}) {
		timesConnected++
	})
	taskEngine.EXPECT().Version().Return("Docker: 1.5.0", nil).AnyTimes()
	taskEngine.EXPECT().AddTask(gomock.Any()).AnyTimes()

	ctx, cancel := context.WithCancel(context.Background())
	ended := make(chan bool, 1)
	go func() {
		handler.StartSession(ctx, handler.StartSessionArguments{"myArn", credentials.AnonymousCredentials, &config.Config{Cluster: "someCluster"}, taskEngine, ecsclient, statemanager, true})
		ended <- true
	}()
	// Warm it up
	serverIn <- `{"type":"HeartbeatMessage","message":{"healthy":true}}`
	serverIn <- samplePayloadMessage

	beforeGoroutines := runtime.NumGoroutine()
	for i := 0; i < 100; i++ {
		serverIn <- `{"type":"HeartbeatMessage","message":{"healthy":true}}`
		serverIn <- samplePayloadMessage
		closeWS <- true
	}

	cancel()
	testTime.Cancel()
	<-ended

	afterGoroutines := runtime.NumGoroutine()

	t.Logf("Gorutines after 1 and after 100 acs messages: %v and %v", beforeGoroutines, afterGoroutines)

	if timesConnected < 50 {
		t.Fatal("Expected times connected to be a large number, was ", timesConnected)
	}
	if afterGoroutines > beforeGoroutines+5 {
		t.Error("Goroutine leak, oh no!")
		pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
	}

}
コード例 #19
0
func TestBatchContainerHappyPath(t *testing.T) {
	ctrl, client, taskEngine := mocks(t, &config.Config{})
	defer ctrl.Finish()
	ttime.SetTime(test_time)

	sleepTask := testdata.LoadTask("sleep5")

	eventStream := make(chan engine.DockerContainerChangeEvent)

	dockerEvent := func(status api.ContainerStatus) engine.DockerContainerChangeEvent {
		meta := engine.DockerContainerMetadata{
			DockerId: "containerId",
		}
		return engine.DockerContainerChangeEvent{Status: status, DockerContainerMetadata: meta}
	}

	client.EXPECT().ContainerEvents(gomock.Any()).Return(eventStream, nil)
	for _, container := range sleepTask.Containers {

		client.EXPECT().PullImage(container.Image).Return(engine.DockerContainerMetadata{})

		dockerConfig, err := sleepTask.DockerConfig(container)
		if err != nil {
			t.Fatal(err)
		}
		client.EXPECT().CreateContainer(dockerConfig, gomock.Any(), gomock.Any()).Do(func(x, y, z interface{}) {
			go func() { eventStream <- dockerEvent(api.ContainerCreated) }()
		}).Return(engine.DockerContainerMetadata{DockerId: "containerId"})

		client.EXPECT().StartContainer("containerId").Do(func(id string) {
			go func() { eventStream <- dockerEvent(api.ContainerRunning) }()
		}).Return(engine.DockerContainerMetadata{DockerId: "containerId"})
	}

	err := taskEngine.Init()
	taskEvents, contEvents := taskEngine.TaskEvents()
	if err != nil {
		t.Fatal(err)
	}

	taskEngine.AddTask(sleepTask)

	if (<-contEvents).Status != api.ContainerRunning {
		t.Fatal("Expected container to run first")
	}
	if (<-taskEvents).Status != api.TaskRunning {
		t.Fatal("And then task")
	}
	select {
	case <-taskEvents:
		t.Fatal("Should be out of events")
	case <-contEvents:
		t.Fatal("Should be out of events")
	default:
	}

	exitCode := 0
	// And then docker reports that sleep died, as sleep is wont to do
	eventStream <- engine.DockerContainerChangeEvent{Status: api.ContainerStopped, DockerContainerMetadata: engine.DockerContainerMetadata{DockerId: "containerId", ExitCode: &exitCode}}

	if cont := <-contEvents; cont.Status != api.ContainerStopped {
		t.Fatal("Expected container to stop first")
		if *cont.ExitCode != 0 {
			t.Fatal("Exit code should be present")
		}
	}
	if (<-taskEvents).Status != api.TaskStopped {
		t.Fatal("And then task")
	}

	// Extra events should not block forever; duplicate acs and docker events are possible
	go func() { eventStream <- dockerEvent(api.ContainerStopped) }()
	go func() { eventStream <- dockerEvent(api.ContainerStopped) }()

	sleepTaskStop := testdata.LoadTask("sleep5")
	sleepTaskStop.DesiredStatus = api.TaskStopped
	taskEngine.AddTask(sleepTaskStop)
	// As above, duplicate events should not be a problem
	taskEngine.AddTask(sleepTaskStop)
	taskEngine.AddTask(sleepTaskStop)

	// Expect a bunch of steady state 'poll' describes when we warp 4 hours
	client.EXPECT().DescribeContainer(gomock.Any()).AnyTimes()
	client.EXPECT().RemoveContainer("containerId").Return(nil)

	test_time.Warp(4 * time.Hour)
	go func() { eventStream <- dockerEvent(api.ContainerStopped) }()

	for {
		tasks, _ := taskEngine.(*engine.DockerTaskEngine).ListTasks()
		if len(tasks) == 0 {
			break
		}
		time.Sleep(5 * time.Millisecond)
	}
}