func TestGetRunningDeploymentSet_otherCluster(t *testing.T) { //sous.Log.Vomit.SetFlags(sous.Log.Vomit.Flags() | log.Ltime) //sous.Log.Vomit.SetOutput(os.Stderr) //sous.Log.Vomit.Print("Starting stderr output") sous.Log.Debug.SetFlags(sous.Log.Debug.Flags() | log.Ltime) sous.Log.Debug.SetOutput(os.Stderr) sous.Log.Debug.Print("Starting stderr output") assert := assert.New(t) registerLabelledContainers() drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() nc := docker.NewNameCache("", drc, newInMemoryDB("grds")) client := singularity.NewRectiAgent() d := singularity.NewDeployer(client) clusters := []string{"other-cluster"} ds, which := deploymentWithRepo(clusters, nc, assert, d, "github.com/opentable/docker-grafana") deps := ds.Snapshot() if assert.Equal(1, len(deps)) { grafana := deps[which] assert.Equal(SingularityURL, grafana.Cluster.BaseURL) assert.Regexp("^0\\.1", grafana.Resources["cpus"]) // XXX strings and floats... assert.Regexp("^100\\.", grafana.Resources["memory"]) // XXX strings and floats... assert.Equal("1", grafana.Resources["ports"]) // XXX strings and floats... assert.Equal(17, grafana.SourceID.Version.Patch) assert.Equal("91495f1b1630084e301241100ecf2e775f6b672c", grafana.SourceID.Version.Meta) assert.Equal(1, grafana.NumInstances) assert.Equal(sous.ManifestKindService, grafana.Kind) } ResetSingularity() }
func main() { log.SetFlags(log.Flags() | log.Lshortfile) parsed, err := docopt.Parse(whitespace.CleanWS(` Usage: docker_labels [options] <image-name> Options: --insecure makes the connection to e.g. a self-signed registry `), nil, true, "", false) if err != nil { log.Fatal(err) } imageName := parsed["<image-name>"].(string) client := docker_registry.NewClient() if _, ok := parsed["--insecure"]; ok { client.BecomeFoolishlyTrusting() } labels, err := client.LabelsForImageName(imageName) if err != nil { log.Fatal(err) } fmt.Printf("Found %d labels:\n", len(labels)) for key, value := range labels { fmt.Printf("%s: %s\n", key, value) } }
func TestNameCache(t *testing.T) { assert := assert.New(t) sous.Log.Debug.SetOutput(os.Stdout) ResetSingularity() defer ResetSingularity() drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() db, err := docker.GetDatabase(&docker.DBConfig{ Driver: "sqlite3_sous", Connection: docker.InMemoryConnection("testnamecache"), }) if err != nil { t.Fatal(err) } nc := docker.NewNameCache("", drc, db) repoOne := "https://github.com/opentable/one.git" manifest(nc, "opentable/one", "test-one", repoOne, "1.1.1") cn, err := nc.GetCanonicalName(BuildImageName("opentable/one", "1.1.1")) if err != nil { assert.FailNow(err.Error()) } labels, err := drc.LabelsForImageName(cn) if assert.NoError(err) { assert.Equal("1.1.1", labels[docker.DockerVersionLabel]) } }
func TestGetLabels(t *testing.T) { registerLabelledContainers() assert := assert.New(t) cl := docker_registry.NewClient() cl.BecomeFoolishlyTrusting() labels, err := cl.LabelsForImageName(imageName) assert.Nil(err) assert.Contains(labels, docker.DockerRepoLabel) ResetSingularity() }
func TestMissingImage(t *testing.T) { assert := assert.New(t) clusterDefs := sous.Defs{ Clusters: sous.Clusters{ "test-cluster": &sous.Cluster{ BaseURL: SingularityURL, }, }, } repoOne := "github.com/opentable/one" drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() // easiest way to make sure that the manifest doesn't actually get registered dummyNc := docker.NewNameCache("", drc, newInMemoryDB("bitbucket")) stateOne := sous.State{ Defs: clusterDefs, Manifests: sous.NewManifests( manifest(dummyNc, "opentable/one", "test-one", repoOne, "1.1.1"), ), } // **** nc := docker.NewNameCache("", drc, newInMemoryDB("missingimage")) client := singularity.NewRectiAgent() deployer := singularity.NewDeployer(client) r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{}) deploymentsOne, err := stateOne.Deployments() if err != nil { t.Fatal(err) } err = r.Resolve(deploymentsOne, clusterDefs.Clusters) assert.Error(err) // **** time.Sleep(1 * time.Second) clusters := []string{"test-cluster"} _, which := deploymentWithRepo(clusters, nc, assert, deployer, repoOne) assert.Equal(which, none, "opentable/one was deployed") ResetSingularity() }
func TestGetRunningDeploymentSet_testCluster(t *testing.T) { //sous.Log.Vomit.SetFlags(sous.Log.Vomit.Flags() | log.Ltime) //sous.Log.Vomit.SetOutput(os.Stderr) //sous.Log.Vomit.Print("Starting stderr output") sous.Log.Debug.SetFlags(sous.Log.Debug.Flags() | log.Ltime) sous.Log.Debug.SetOutput(os.Stderr) sous.Log.Debug.Print("Starting stderr output") assert := assert.New(t) registerLabelledContainers() drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() nc := docker.NewNameCache("", drc, newInMemoryDB("grds")) client := singularity.NewRectiAgent() d := singularity.NewDeployer(client) clusters := []string{"test-cluster"} // We run this test more than once to check that cache behaviour is // consistent whether the cache is already warmed up or not. const numberOfTestRuns = 2 for i := 0; i < numberOfTestRuns; i++ { ds, which := deploymentWithRepo(clusters, nc, assert, d, "github.com/opentable/docker-grafana") deps := ds.Snapshot() if assert.Equal(3, len(deps)) { grafana := deps[which] cacheHitText := fmt.Sprintf("on cache hit %d", i+1) assert.Equal(SingularityURL, grafana.Cluster.BaseURL, cacheHitText) assert.Regexp("^0\\.1", grafana.Resources["cpus"], cacheHitText) // XXX strings and floats... assert.Regexp("^100\\.", grafana.Resources["memory"], cacheHitText) // XXX strings and floats... assert.Equal("1", grafana.Resources["ports"], cacheHitText) // XXX strings and floats... assert.Equal(17, grafana.SourceID.Version.Patch, cacheHitText) assert.Equal("91495f1b1630084e301241100ecf2e775f6b672c", grafana.SourceID.Version.Meta, cacheHitText) assert.Equal(1, grafana.NumInstances, cacheHitText) assert.Equal(sous.ManifestKindService, grafana.Kind, cacheHitText) } } ResetSingularity() }
func TestBuildDeployments(t *testing.T) { t.Skipf("Failing test on master preventing progress on other stories.") assert := assert.New(t) sous.Log.Debug.SetOutput(os.Stdout) ResetSingularity() defer ResetSingularity() drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() db, err := docker.GetDatabase(&docker.DBConfig{ Driver: "sqlite3_sous", Connection: docker.InMemoryConnection("testresolve"), }) if err != nil { panic(err) } appLocation := "testhelloreq" clusterNick := "tcluster" reqID := appLocation + clusterNick nc := docker.NewNameCache("", drc, db) singCl := sing.NewClient(SingularityURL) //singCl.Debug = true sr, err := singReqDep( SingularityURL, whitespace.CleanWS(` { "instances": 1, "id": "`+reqID+`", "requestType": "SERVICE", "owners": ["*****@*****.**", "*****@*****.**"] }`), whitespace.CleanWS(` { "deploy": { "id": "`+singularity.MakeDeployID(uuid.NewV4().String())+`", "requestId": "`+reqID+`", "resources": { "cpus": 0.1, "memoryMb": 32, "numPorts": 1 }, "containerInfo": { "type": "DOCKER", "docker": { "image": "`+BuildImageName("hello-server-labels", "latest")+`" }, "volumes": [{"hostPath":"/tmp", "containerPath":"/tmp","mode":"RO"}] }, "env": { "TEST": "yes" } } }`), ) req := singularity.SingReq{ SourceURL: SingularityURL, Sing: singCl, ReqParent: sr, } if assert.NoError(err) { clusters := sous.Clusters{clusterNick: {BaseURL: SingularityURL}} dep, err := singularity.BuildDeployment(nc, clusters, req) if assert.NoError(err) { if assert.Len(dep.DeployConfig.Volumes, 1) { assert.Equal(dep.DeployConfig.Volumes[0].Host, "/tmp") } assert.Equal("github.com/docker/dockercloud-hello-world", dep.SourceID.Location.Repo) } } }
func newDockerClient() LocalDockerClient { return LocalDockerClient{docker_registry.NewClient()} }
func TestResolve(t *testing.T) { assert := assert.New(t) //sous.Log.Vomit.SetOutput(os.Stderr) sous.Log.Debug.SetOutput(os.Stderr) ResetSingularity() defer ResetSingularity() clusterDefs := sous.Defs{ Clusters: sous.Clusters{ "test-cluster": &sous.Cluster{ BaseURL: SingularityURL, }, }, } repoOne := "github.com/opentable/one" repoTwo := "github.com/opentable/two" repoThree := "github.com/opentable/three" drc := docker_registry.NewClient() drc.BecomeFoolishlyTrusting() db := newInMemoryDB("testresolve") nc := docker.NewNameCache("", drc, db) stateOneTwo := sous.State{ Defs: clusterDefs, Manifests: sous.NewManifests( manifest(nc, "opentable/one", "test-one", repoOne, "1.1.1"), manifest(nc, "opentable/two", "test-two", repoTwo, "1.1.1"), ), } deploymentsOneTwo, err := stateOneTwo.Deployments() if err != nil { t.Fatal(err) } stateTwoThree := sous.State{ Defs: clusterDefs, Manifests: sous.NewManifests( manifest(nc, "opentable/two", "test-two", repoTwo, "1.1.1"), manifest(nc, "opentable/three", "test-three", repoThree, "1.1.1"), ), } deploymentsTwoThree, err := stateTwoThree.Deployments() if err != nil { t.Fatal(err) } // **** log.Print("Resolving from nothing to one+two") client := singularity.NewRectiAgent() deployer := singularity.NewDeployer(client) r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{}) err = r.Resolve(deploymentsOneTwo, clusterDefs.Clusters) if err != nil { assert.Fail(err.Error()) } // **** time.Sleep(3 * time.Second) clusters := []string{"test-cluster"} ds, which := deploymentWithRepo(clusters, nc, assert, deployer, repoOne) deps := ds.Snapshot() if assert.NotEqual(which, none, "opentable/one not successfully deployed") { one := deps[which] assert.Equal(1, one.NumInstances) } which = findRepo(ds, repoTwo) if assert.NotEqual(none, which, "opentable/two not successfully deployed") { two := deps[which] assert.Equal(1, two.NumInstances) } // **** log.Println("Resolving from one+two to two+three") conflictRE := regexp.MustCompile(`Pending deploy already in progress`) // XXX Let's hope this is a temporary solution to a testing issue // The problem is laid out in DCOPS-7625 for tries := 0; tries < 3; tries++ { client := singularity.NewRectiAgent() deployer := singularity.NewDeployer(client) r := sous.NewResolver(deployer, nc, &sous.ResolveFilter{}) err := r.Resolve(deploymentsTwoThree, clusterDefs.Clusters) if err != nil { if !conflictRE.MatchString(err.Error()) { assert.FailNow(err.Error()) } log.Printf("Singularity conflict - waiting for previous deploy to complete - try #%d", tries+1) time.Sleep(1 * time.Second) } } if !assert.NoError(err) { assert.Fail(err.Error()) } // **** ds, which = deploymentWithRepo(clusters, nc, assert, deployer, repoTwo) deps = ds.Snapshot() if assert.NotEqual(none, which, "opentable/two no longer deployed after resolve") { assert.Equal(1, deps[which].NumInstances) } which = findRepo(ds, repoThree) if assert.NotEqual(none, which, "opentable/three not successfully deployed") { assert.Equal(1, deps[which].NumInstances) if assert.Len(deps[which].DeployConfig.Volumes, 1) { assert.Equal("RO", string(deps[which].DeployConfig.Volumes[0].Mode)) } } // We no longer expect any deletions; See deployer.RectifySingleDelete. //expectedInstances := 0 expectedInstances := 1 which = findRepo(ds, repoOne) if which != none { assert.Equal(expectedInstances, deps[which].NumInstances) } }