Example #1
0
// Fail reports a failure through
func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) {
	if !assert.Fail(t, failureMessage, msgAndArgs...) {
		t.FailNow()
	}
}
Example #2
0
func TestResolve(t *testing.T) {
	assert := assert.New(t)
	//sous.Log.Vomit.SetOutput(os.Stderr)
	sous.Log.Debug.SetOutput(os.Stderr)

	ResetSingularity()
	defer ResetSingularity()

	clusterDefs := sous.Defs{
		Clusters: sous.Clusters{
			"test-cluster": &sous.Cluster{
				BaseURL: SingularityURL,
			},
		},
	}
	repoOne := "github.com/opentable/one"
	repoTwo := "github.com/opentable/two"
	repoThree := "github.com/opentable/three"

	drc := docker_registry.NewClient()
	drc.BecomeFoolishlyTrusting()

	db := newInMemoryDB("testresolve")

	nc := docker.NewNameCache("", drc, db)

	stateOneTwo := sous.State{
		Defs: clusterDefs,
		Manifests: sous.NewManifests(
			manifest(nc, "opentable/one", "test-one", repoOne, "1.1.1"),
			manifest(nc, "opentable/two", "test-two", repoTwo, "1.1.1"),
		),
	}
	deploymentsOneTwo, err := stateOneTwo.Deployments()
	if err != nil {
		t.Fatal(err)
	}
	stateTwoThree := sous.State{
		Defs: clusterDefs,
		Manifests: sous.NewManifests(
			manifest(nc, "opentable/two", "test-two", repoTwo, "1.1.1"),
			manifest(nc, "opentable/three", "test-three", repoThree, "1.1.1"),
		),
	}
	deploymentsTwoThree, err := stateTwoThree.Deployments()
	if err != nil {
		t.Fatal(err)
	}

	// ****
	log.Print("Resolving from nothing to one+two")
	client := singularity.NewRectiAgent()
	deployer := singularity.NewDeployer(client)

	r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{})

	err = r.Resolve(deploymentsOneTwo, clusterDefs.Clusters)
	if err != nil {
		assert.Fail(err.Error())
	}
	// ****
	time.Sleep(3 * time.Second)

	clusters := []string{"test-cluster"}
	ds, which := deploymentWithRepo(clusters, nc, assert, deployer, repoOne)
	deps := ds.Snapshot()
	if assert.NotEqual(which, none, "opentable/one not successfully deployed") {
		one := deps[which]
		assert.Equal(1, one.NumInstances)
	}

	which = findRepo(ds, repoTwo)
	if assert.NotEqual(none, which, "opentable/two not successfully deployed") {
		two := deps[which]
		assert.Equal(1, two.NumInstances)
	}

	// ****
	log.Println("Resolving from one+two to two+three")
	conflictRE := regexp.MustCompile(`Pending deploy already in progress`)

	// XXX Let's hope this is a temporary solution to a testing issue
	// The problem is laid out in DCOPS-7625
	for tries := 0; tries < 3; tries++ {
		client := singularity.NewRectiAgent()
		deployer := singularity.NewDeployer(client)

		r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{})

		err := r.Resolve(deploymentsTwoThree, clusterDefs.Clusters)
		if err != nil {
			if !conflictRE.MatchString(err.Error()) {
				assert.FailNow(err.Error())
			}
			log.Printf("Singularity conflict - waiting for previous deploy to complete - try #%d", tries+1)
			time.Sleep(1 * time.Second)
		}
	}

	if !assert.NoError(err) {
		assert.Fail(err.Error())
	}
	// ****

	ds, which = deploymentWithRepo(clusters, nc, assert, deployer, repoTwo)
	deps = ds.Snapshot()
	if assert.NotEqual(none, which, "opentable/two no longer deployed after resolve") {
		assert.Equal(1, deps[which].NumInstances)
	}

	which = findRepo(ds, repoThree)
	if assert.NotEqual(none, which, "opentable/three not successfully deployed") {
		assert.Equal(1, deps[which].NumInstances)
		if assert.Len(deps[which].DeployConfig.Volumes, 1) {
			assert.Equal("RO", string(deps[which].DeployConfig.Volumes[0].Mode))
		}
	}

	// We no longer expect any deletions; See deployer.RectifySingleDelete.
	//expectedInstances := 0
	expectedInstances := 1

	which = findRepo(ds, repoOne)
	if which != none {
		assert.Equal(expectedInstances, deps[which].NumInstances)
	}

}