func TestGetRunningDeploymentSet_otherCluster(t *testing.T) { //sous.Log.Vomit.SetFlags(sous.Log.Vomit.Flags() | log.Ltime) //sous.Log.Vomit.SetOutput(os.Stderr) //sous.Log.Vomit.Print("Starting stderr output") sous.Log.Debug.SetFlags(sous.Log.Debug.Flags() | log.Ltime) sous.Log.Debug.SetOutput(os.Stderr) sous.Log.Debug.Print("Starting stderr output") assert := assert.New(t) registerLabelledContainers() drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() nc := docker.NewNameCache("", drc, newInMemoryDB("grds")) client := singularity.NewRectiAgent() d := singularity.NewDeployer(client) clusters := []string{"other-cluster"} ds, which := deploymentWithRepo(clusters, nc, assert, d, "github.com/opentable/docker-grafana") deps := ds.Snapshot() if assert.Equal(1, len(deps)) { grafana := deps[which] assert.Equal(SingularityURL, grafana.Cluster.BaseURL) assert.Regexp("^0\\.1", grafana.Resources["cpus"]) // XXX strings and floats... assert.Regexp("^100\\.", grafana.Resources["memory"]) // XXX strings and floats... assert.Equal("1", grafana.Resources["ports"]) // XXX strings and floats... assert.Equal(17, grafana.SourceID.Version.Patch) assert.Equal("91495f1b1630084e301241100ecf2e775f6b672c", grafana.SourceID.Version.Meta) assert.Equal(1, grafana.NumInstances) assert.Equal(sous.ManifestKindService, grafana.Kind) } ResetSingularity() }
func newDeployer(dryrun DryrunOption) sous.Deployer { // Eventually, based on configuration, we may make different decisions here. if dryrun == DryrunBoth || dryrun == DryrunScheduler { drc := sous.NewDummyRectificationClient() drc.SetLogger(log.New(os.Stdout, "rectify: ", 0)) return singularity.NewDeployer(drc) } return singularity.NewDeployer(singularity.NewRectiAgent()) }
func TestMissingImage(t *testing.T) { assert := assert.New(t) clusterDefs := sous.Defs{ Clusters: sous.Clusters{ "test-cluster": &sous.Cluster{ BaseURL: SingularityURL, }, }, } repoOne := "github.com/opentable/one" drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() // easiest way to make sure that the manifest doesn't actually get registered dummyNc := docker.NewNameCache("", drc, newInMemoryDB("bitbucket")) stateOne := sous.State{ Defs: clusterDefs, Manifests: sous.NewManifests( manifest(dummyNc, "opentable/one", "test-one", repoOne, "1.1.1"), ), } // **** nc := docker.NewNameCache("", drc, newInMemoryDB("missingimage")) client := singularity.NewRectiAgent() deployer := singularity.NewDeployer(client) r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{}) deploymentsOne, err := stateOne.Deployments() if err != nil { t.Fatal(err) } err = r.Resolve(deploymentsOne, clusterDefs.Clusters) assert.Error(err) // **** time.Sleep(1 * time.Second) clusters := []string{"test-cluster"} _, which := deploymentWithRepo(clusters, nc, assert, deployer, repoOne) assert.Equal(which, none, "opentable/one was deployed") ResetSingularity() }
func TestGetRunningDeploymentSet_testCluster(t *testing.T) { //sous.Log.Vomit.SetFlags(sous.Log.Vomit.Flags() | log.Ltime) //sous.Log.Vomit.SetOutput(os.Stderr) //sous.Log.Vomit.Print("Starting stderr output") sous.Log.Debug.SetFlags(sous.Log.Debug.Flags() | log.Ltime) sous.Log.Debug.SetOutput(os.Stderr) sous.Log.Debug.Print("Starting stderr output") assert := assert.New(t) registerLabelledContainers() drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() nc := docker.NewNameCache("", drc, newInMemoryDB("grds")) client := singularity.NewRectiAgent() d := singularity.NewDeployer(client) clusters := []string{"test-cluster"} // We run this test more than once to check that cache behaviour is // consistent whether the cache is already warmed up or not. const numberOfTestRuns = 2 for i := 0; i < numberOfTestRuns; i++ { ds, which := deploymentWithRepo(clusters, nc, assert, d, "github.com/opentable/docker-grafana") deps := ds.Snapshot() if assert.Equal(3, len(deps)) { grafana := deps[which] cacheHitText := fmt.Sprintf("on cache hit %d", i+1) assert.Equal(SingularityURL, grafana.Cluster.BaseURL, cacheHitText) assert.Regexp("^0\\.1", grafana.Resources["cpus"], cacheHitText) // XXX strings and floats... assert.Regexp("^100\\.", grafana.Resources["memory"], cacheHitText) // XXX strings and floats... assert.Equal("1", grafana.Resources["ports"], cacheHitText) // XXX strings and floats... assert.Equal(17, grafana.SourceID.Version.Patch, cacheHitText) assert.Equal("91495f1b1630084e301241100ecf2e775f6b672c", grafana.SourceID.Version.Meta, cacheHitText) assert.Equal(1, grafana.NumInstances, cacheHitText) assert.Equal(sous.ManifestKindService, grafana.Kind, cacheHitText) } } ResetSingularity() }
func TestResolve(t *testing.T) { assert := assert.New(t) //sous.Log.Vomit.SetOutput(os.Stderr) sous.Log.Debug.SetOutput(os.Stderr) ResetSingularity() defer ResetSingularity() clusterDefs := sous.Defs{ Clusters: sous.Clusters{ "test-cluster": &sous.Cluster{ BaseURL: SingularityURL, }, }, } repoOne := "github.com/opentable/one" repoTwo := "github.com/opentable/two" repoThree := "github.com/opentable/three" drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() db := newInMemoryDB("testresolve") nc := docker.NewNameCache("", drc, db) stateOneTwo := sous.State{ Defs: clusterDefs, Manifests: sous.NewManifests( manifest(nc, "opentable/one", "test-one", repoOne, "1.1.1"), manifest(nc, "opentable/two", "test-two", repoTwo, "1.1.1"), ), } deploymentsOneTwo, err := stateOneTwo.Deployments() if err != nil { t.Fatal(err) } stateTwoThree := sous.State{ Defs: clusterDefs, Manifests: sous.NewManifests( manifest(nc, "opentable/two", "test-two", repoTwo, "1.1.1"), manifest(nc, "opentable/three", "test-three", repoThree, "1.1.1"), ), } deploymentsTwoThree, err := stateTwoThree.Deployments() if err != nil { t.Fatal(err) } // **** log.Print("Resolving from nothing to one+two") client := singularity.NewRectiAgent() deployer := singularity.NewDeployer(client) r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{}) err = r.Resolve(deploymentsOneTwo, clusterDefs.Clusters) if err != nil { assert.Fail(err.Error()) } // **** time.Sleep(3 * time.Second) clusters := []string{"test-cluster"} ds, which := deploymentWithRepo(clusters, nc, assert, deployer, repoOne) deps := ds.Snapshot() if assert.NotEqual(which, none, "opentable/one not successfully deployed") { one := deps[which] assert.Equal(1, one.NumInstances) } which = findRepo(ds, repoTwo) if assert.NotEqual(none, which, "opentable/two not successfully deployed") { two := deps[which] assert.Equal(1, two.NumInstances) } // **** log.Println("Resolving from one+two to two+three") conflictRE := regexp.MustCompile(`Pending deploy already in progress`) // XXX Let's hope this is a temporary solution to a testing issue // The problem is laid out in DCOPS-7625 for tries := 0; tries < 3; tries++ { client := singularity.NewRectiAgent() deployer := singularity.NewDeployer(client) r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{}) err := r.Resolve(deploymentsTwoThree, clusterDefs.Clusters) if err != nil { if !conflictRE.MatchString(err.Error()) { assert.FailNow(err.Error()) } log.Printf("Singularity conflict - waiting for previous deploy to complete - try #%d", tries+1) time.Sleep(1 * time.Second) } } if !assert.NoError(err) { assert.Fail(err.Error()) } // **** ds, which = deploymentWithRepo(clusters, nc, assert, deployer, repoTwo) deps = ds.Snapshot() if assert.NotEqual(none, which, "opentable/two no longer deployed after resolve") { assert.Equal(1, deps[which].NumInstances) } which = findRepo(ds, repoThree) if assert.NotEqual(none, which, "opentable/three not successfully deployed") { assert.Equal(1, deps[which].NumInstances) if assert.Len(deps[which].DeployConfig.Volumes, 1) { assert.Equal("RO", string(deps[which].DeployConfig.Volumes[0].Mode)) } } // We no longer expect any deletions; See deployer.RectifySingleDelete. //expectedInstances := 0 expectedInstances := 1 which = findRepo(ds, repoOne) if which != none { assert.Equal(expectedInstances, deps[which].NumInstances) } }