func TestReadContainerTransact(t *testing.T) { conn := db.New() conn.Transact(func(view db.Database) error { testReadContainerTransact(t, view) return nil }) }
func TestUpdateDBLabels(t *testing.T) { conn := db.New() conn.Transact(func(view db.Database) error { testUpdateDBLabels(t, view) return nil }) }
// Run starts the daemon. func (dCmd *Daemon) Run() int { conn := db.New() go engine.Run(conn) go server.Run(conn, dCmd.host) cluster.Run(conn) return 0 }
func TestACLs(t *testing.T) { conn := db.New() code := `createDeployment({adminACL: ["1.2.3.4/32", "local"]}) .deploy([ new Machine({provider: "Amazon", role: "Master"}), new Machine({provider: "Amazon", role: "Worker"}) ]);` myIP = func() (string, error) { return "5.6.7.8", nil } updateStitch(t, conn, prog(t, code)) acl, err := selectACL(conn) assert.Nil(t, err) assert.Equal(t, []string{"1.2.3.4/32", "5.6.7.8/32"}, acl.Admin) myIP = func() (string, error) { return "", errors.New("") } updateStitch(t, conn, prog(t, code)) acl, err = selectACL(conn) assert.Nil(t, err) assert.Equal(t, []string{"1.2.3.4/32"}, acl.Admin) }
func TestPlaceContainers(t *testing.T) { t.Parallel() conn := db.New() conn.Transact(func(view db.Database) error { m := view.InsertMinion() m.PrivateIP = "1" m.Role = db.Worker view.Commit(m) e := view.InsertEtcd() e.Leader = true view.Commit(e) c := view.InsertContainer() view.Commit(c) return nil }) conn.Transact(func(view db.Database) error { placeContainers(view) return nil }) conn.Transact(func(view db.Database) error { dbcs := view.SelectFromContainer(nil) assert.Len(t, dbcs, 1) assert.Equal(t, "1", dbcs[0].Minion) return nil }) }
func TestDeploy(t *testing.T) { conn := db.New() s := server{dbConn: conn} createMachineDeployment := ` {"Machines":[ {"Provider":"Amazon", "Role":"Master", "Size":"m4.large" }, {"Provider":"Amazon", "Role":"Worker", "Size":"m4.large" }]}` _, err := s.Deploy(context.Background(), &pb.DeployRequest{Deployment: createMachineDeployment}) assert.NoError(t, err) var spec string conn.Transact(func(view db.Database) error { clst, err := view.GetCluster() assert.NoError(t, err) spec = clst.Spec return nil }) exp, err := stitch.FromJSON(createMachineDeployment) assert.NoError(t, err) actual, err := stitch.FromJSON(spec) assert.NoError(t, err) assert.Equal(t, exp, actual) }
func main() { // XXX Uncomment the following line to run the profiler //runProfiler(5 * time.Minute) log.Info("Minion Start") log.SetFormatter(util.Formatter{}) conn := db.New() dk := docker.New("unix:///var/run/docker.sock") go minionServerRun(conn) go supervisor.Run(conn, dk) go scheduler.Run(conn) go network.Run(conn, dk) go etcd.Run(conn) for range conn.Trigger(db.MinionTable).C { conn.Transact(func(view db.Database) error { minion, err := view.MinionSelf() if err != nil { return err } updatePolicy(view, minion.Role, minion.Spec) return nil }) } }
func initTest() *testCtx { conn := db.New() md, dk := docker.NewMock() ctx := testCtx{supervisor{}, fakeDocker{dk, md}, nil, conn, conn.Trigger(db.MinionTable, db.EtcdTable)} ctx.sv.conn = ctx.conn ctx.sv.dk = ctx.fd.Client ctx.conn.Transact(func(view db.Database) error { m := view.InsertMinion() m.Self = true view.Commit(m) e := view.InsertEtcd() view.Commit(e) return nil }) ctx.sv.runSystemOnce() execRun = func(name string, args ...string) error { ctx.execs = append(ctx.execs, append([]string{name}, args...)) return nil } return &ctx }
func TestSort(t *testing.T) { pre := `var baseMachine = new Machine({provider: "Amazon", size: "m4.large"});` conn := db.New() updateStitch(t, conn, prog(t, pre+` deployment.deploy(baseMachine.asMaster().replicate(3)); deployment.deploy(baseMachine.asWorker().replicate(1));`)) conn.Transact(func(view db.Database) error { machines := view.SelectFromMachine(func(m db.Machine) bool { return m.Role == db.Master }) assert.Equal(t, 3, len(machines)) machines[2].PublicIP = "a" machines[2].PrivateIP = "b" view.Commit(machines[2]) machines[1].PrivateIP = "c" view.Commit(machines[1]) return nil }) updateStitch(t, conn, prog(t, pre+` deployment.deploy(baseMachine.asMaster().replicate(2)); deployment.deploy(baseMachine.asWorker().replicate(1));`)) conn.Transact(func(view db.Database) error { machines := view.SelectFromMachine(func(m db.Machine) bool { return m.Role == db.Master }) assert.Equal(t, 2, len(machines)) for _, m := range machines { assert.False(t, m.PublicIP == "" && m.PrivateIP == "") } return nil }) updateStitch(t, conn, prog(t, pre+` deployment.deploy(baseMachine.asMaster().replicate(1)); deployment.deploy(baseMachine.asWorker().replicate(1));`)) conn.Transact(func(view db.Database) error { machines := view.SelectFromMachine(func(m db.Machine) bool { return m.Role == db.Master }) assert.Equal(t, 1, len(machines)) for _, m := range machines { assert.False(t, m.PublicIP == "" && m.PrivateIP == "") } return nil }) }
func startTestWithRole(role pb.MinionConfig_Role) db.Conn { clientInst := &clients{make(map[string]*fakeClient), 0} newClient = func(ip string) (client, error) { fc := &fakeClient{clientInst, ip, pb.MinionConfig{Role: role}} clientInst.clients[ip] = fc clientInst.newCalls++ return fc, nil } return db.New() }
func TestBadDeployment(t *testing.T) { conn := db.New() s := server{dbConn: conn} badDeployment := `{` _, err := s.Deploy(context.Background(), &pb.DeployRequest{Deployment: badDeployment}) assert.EqualError(t, err, "unexpected end of JSON input") }
func TestPanicBadProvider(t *testing.T) { temp := allProviders defer func() { r := recover() assert.NotNil(t, r) allProviders = temp }() allProviders = []db.Provider{FakeAmazon} conn := db.New() newCluster(conn, "test") }
func main() { /* XXX: GRPC spews a lot of uselss log message so we tell to eat its logs. * Once we have more sophistcated logging support, we should enable the log * messages when in debug mode. */ grpclog.SetLogger(l_mod.New(ioutil.Discard, "", 0)) log.SetFormatter(util.Formatter{}) flag.Usage = func() { fmt.Println("Usage: quilt [inspect <stitch> | run <stitch>" + " | stop <namespace> | get <import_path>]" + " [-log-level=<level> | -l=<level>]") fmt.Println("\nWhen provided a stitch, quilt takes responsibility\n" + "for deploying it as specified. Alternatively, quilt may be\n" + "instructed to stop all deployments in a given namespace,\n" + "or the default namespace if none is provided.\n") flag.PrintDefaults() fmt.Println(" Valid logger levels are:\n" + " debug, info, warn, error, fatal or panic.") } var logLevel = flag.String("log-level", "info", "level to set logger to") flag.StringVar(logLevel, "l", "info", "level to set logger to") flag.Parse() level, err := parseLogLevel(*logLevel) if err != nil { fmt.Println(err) usage() } log.SetLevel(level) conn := db.New() if len(flag.Args()) != 2 { usage() } switch flag.Arg(0) { case "run": go configLoop(conn, flag.Arg(1)) case "stop": stop(conn, flag.Arg(1)) case "get": getSpec(flag.Arg(1)) case "inspect": inspect.Main(flag.Args()) return default: usage() } cluster.Run(conn) }
func TestPanicBadProvider(t *testing.T) { temp := allProviders defer func() { if r := recover(); r == nil { t.Error("newCluster did not panic on bad provider") } allProviders = temp }() allProviders = []db.Provider{FakeAmazon} conn := db.New() newCluster(conn, "test") }
func startTestWithRole(role pb.MinionConfig_Role) foreman { fm := createForeman(db.New()) clientInst := &clients{make(map[string]*fakeClient), 0} fm.newClient = func(ip string) (client, error) { fc := &fakeClient{clientInst, ip, pb.MinionConfig{Role: role}, pb.EtcdMembers{}} clientInst.clients[ip] = fc clientInst.newCalls++ return fc, nil } return fm }
func newTestCluster() cluster { conn := db.New() clst := cluster{ conn: conn, providers: make(map[db.Provider]provider.Provider), } clst.providers[FakeAmazon] = newFakeProvider(amazonCloudConfig) clst.providers[FakeVagrant] = newFakeProvider(vagrantCloudConfig) sleep = func(t time.Duration) {} return clst }
func TestGetMinionConfig(t *testing.T) { t.Parallel() s := server{db.New()} // Should set Role to None if no config. cfg, err := s.GetMinionConfig(nil, &pb.Request{}) assert.NoError(t, err) assert.Equal(t, pb.MinionConfig{Role: pb.MinionConfig_NONE}, *cfg) // Should only return config for "self". s.Conn.Transact(func(view db.Database) error { m := view.InsertMinion() m.Self = false m.Spec = "spec" m.Role = db.Master m.PrivateIP = "priv" m.Provider = "provider" m.Size = "size" m.Region = "region" m.AuthorizedKeys = "key1\nkey2" view.Commit(m) return nil }) cfg, err = s.GetMinionConfig(nil, &pb.Request{}) assert.NoError(t, err) assert.Equal(t, pb.MinionConfig{Role: pb.MinionConfig_NONE}, *cfg) // Test returning a full config. s.Conn.Transact(func(view db.Database) error { m := view.SelectFromMinion(nil)[0] m.Self = true view.Commit(m) etcd := view.InsertEtcd() etcd.EtcdIPs = []string{"etcd1", "etcd2"} view.Commit(etcd) return nil }) cfg, err = s.GetMinionConfig(nil, &pb.Request{}) assert.NoError(t, err) assert.Equal(t, pb.MinionConfig{ Role: pb.MinionConfig_MASTER, PrivateIP: "priv", Spec: "spec", Provider: "provider", Size: "size", Region: "region", EtcdMembers: []string{"etcd1", "etcd2"}, AuthorizedKeys: []string{"key1", "key2"}, }, *cfg) }
func startTest() (foreman, *clients) { fm := createForeman(db.New()) clients := &clients{make(map[string]*fakeClient), 0} fm.newClient = func(ip string) (client, error) { if fc, ok := clients.clients[ip]; ok { return fc, nil } fc := &fakeClient{clients, ip, pb.MinionConfig{}, pb.EtcdMembers{}} clients.clients[ip] = fc clients.newCalls++ return fc, nil } return fm, clients }
func startTest() (db.Conn, *clients) { conn := db.New() minions = map[string]*minion{} clients := &clients{make(map[string]*fakeClient), 0} newClient = func(ip string) (client, error) { if fc, ok := clients.clients[ip]; ok { return fc, nil } fc := &fakeClient{clients, ip, pb.MinionConfig{}} clients.clients[ip] = fc clients.newCalls++ return fc, nil } return conn, clients }
func TestSyncKeys(t *testing.T) { tests := []keyTest{ { dbKeys: "key1\nkey2", expKeyFile: "key1\nkey2", }, { dbKeys: "key1\nkey2", keyFile: "key1", expKeyFile: "key1\nkey2", }, { dbKeys: "key1\nkey2", keyFile: "key1\nkey2", expKeyFile: "key1\nkey2", }, { keyFile: "key1\nkey2", expKeyFile: "", }, } for _, test := range tests { util.AppFs = afero.NewMemMapFs() if test.keyFile != "" { err := util.WriteFile( authorizedKeysFile, []byte(test.keyFile), 0644) assert.NoError(t, err) } conn := db.New() conn.Transact(func(view db.Database) error { m := view.InsertMinion() m.Self = true m.AuthorizedKeys = test.dbKeys view.Commit(m) return nil }) err := runOnce(conn) assert.NoError(t, err) actual, err := util.ReadFile(authorizedKeysFile) assert.NoError(t, err) assert.Equal(t, test.expKeyFile, actual) } }
func TestConnectionTxn(t *testing.T) { conn := db.New() trigg := conn.Trigger(db.ConnectionTable).C pre := `var a = new Service("a", [new Container("alpine")]); var b = new Service("b", [new Container("alpine")]); var c = new Service("c", [new Container("alpine")]); deployment.deploy([a, b, c]);` spec := "" testConnectionTxn(t, conn, spec) assert.False(t, fired(trigg)) spec = pre + `a.connect(80, a);` testConnectionTxn(t, conn, spec) assert.True(t, fired(trigg)) testConnectionTxn(t, conn, spec) assert.False(t, fired(trigg)) spec = pre + `a.connect(90, a);` testConnectionTxn(t, conn, spec) assert.True(t, fired(trigg)) testConnectionTxn(t, conn, spec) assert.False(t, fired(trigg)) spec = pre + `b.connect(90, a); b.connect(90, c); b.connect(100, b); c.connect(101, a);` testConnectionTxn(t, conn, spec) assert.True(t, fired(trigg)) testConnectionTxn(t, conn, spec) assert.False(t, fired(trigg)) spec = pre testConnectionTxn(t, conn, spec) assert.True(t, fired(trigg)) testConnectionTxn(t, conn, spec) assert.False(t, fired(trigg)) }
func TestSetMinionConfig(t *testing.T) { t.Parallel() s := server{db.New()} cfg := pb.MinionConfig{ Role: pb.MinionConfig_MASTER, PrivateIP: "priv", Spec: "spec", Provider: "provider", Size: "size", Region: "region", EtcdMembers: []string{"etcd1", "etcd2"}, AuthorizedKeys: []string{"key1", "key2"}, } expMinion := db.Minion{ Self: true, Spec: "spec", Role: db.Master, PrivateIP: "priv", Provider: "provider", Size: "size", Region: "region", AuthorizedKeys: "key1\nkey2", } _, err := s.SetMinionConfig(nil, &cfg) assert.NoError(t, err) checkMinionEquals(t, s.Conn, expMinion) checkEtcdEquals(t, s.Conn, db.Etcd{ EtcdIPs: []string{"etcd1", "etcd2"}, }) // Update a field. cfg.Spec = "new" expMinion.Spec = "new" cfg.EtcdMembers = []string{"etcd3"} _, err = s.SetMinionConfig(nil, &cfg) assert.NoError(t, err) checkMinionEquals(t, s.Conn, expMinion) checkEtcdEquals(t, s.Conn, db.Etcd{ EtcdIPs: []string{"etcd3"}, }) }
func TestUpdateLeaderDBC(t *testing.T) { conn := db.New() conn.Transact(func(view db.Database) error { dbc := view.InsertContainer() dbc.StitchID = 1 view.Commit(dbc) updateLeaderDBC(view, view.SelectFromContainer(nil), storeData{ containers: []storeContainer{{StitchID: 1}}, }, map[string]string{"1": "foo"}) dbcs := view.SelectFromContainer(nil) if len(dbcs) != 1 || dbcs[0].StitchID != 1 || dbcs[0].IP != "foo" || dbcs[0].Mac != "" { t.Error(spew.Sprintf("Unexpected dbc: %v", dbc)) } return nil }) }
func TestContainerResponse(t *testing.T) { t.Parallel() conn := db.New() conn.Transact(func(view db.Database) error { c := view.InsertContainer() c.DockerID = "docker-id" c.Image = "image" c.Command = []string{"cmd", "arg"} c.Labels = []string{"labelA", "labelB"} view.Commit(c) return nil }) exp := `[{"ID":1,"Pid":0,"IP":"","Mac":"","Minion":"",` + `"EndpointID":"","StitchID":0,"DockerID":"docker-id","Image":"image",` + `"Command":["cmd","arg"],"Labels":["labelA","labelB"],"Env":null}]` checkQuery(t, server{conn}, db.ContainerTable, exp) }
func initTest() testCtx { conn := db.New() ctx := testCtx{supervisor{}, fakeDocker{make(map[string][]string), make(map[string][]string), make(map[string]bool)}, conn, conn.Trigger(db.MinionTable, db.EtcdTable)} ctx.sv.conn = ctx.conn ctx.sv.dk = ctx.fd ctx.conn.Transact(func(view db.Database) error { m := view.InsertMinion() m.Self = true view.Commit(m) e := view.InsertEtcd() view.Commit(e) return nil }) ctx.sv.runSystemOnce() return ctx }
func TestMachineResponse(t *testing.T) { t.Parallel() conn := db.New() conn.Transact(func(view db.Database) error { m := view.InsertMachine() m.Role = db.Master m.Provider = db.Amazon m.Size = "size" m.PublicIP = "8.8.8.8" m.PrivateIP = "9.9.9.9" view.Commit(m) return nil }) exp := `[{"ID":1,"Role":"Master","Provider":"Amazon","Region":"",` + `"Size":"size","DiskSize":0,"SSHKeys":null,"CloudID":"",` + `"PublicIP":"8.8.8.8","PrivateIP":"9.9.9.9","Connected":false}]` checkQuery(t, server{conn}, db.MachineTable, exp) }
func TestVagrantDeployment(t *testing.T) { conn := db.New() s := server{dbConn: conn} vagrantDeployment := ` {"Machines":[ {"Provider":"Vagrant", "Role":"Master", "Size":"m4.large" }, {"Provider":"Vagrant", "Role":"Worker", "Size":"m4.large" }]}` vagrantErrMsg := "The Vagrant provider is in development." + " The stitch will continue to run, but" + " probably won't work correctly." _, err := s.Deploy(context.Background(), &pb.DeployRequest{Deployment: vagrantDeployment}) assert.Error(t, err, vagrantErrMsg) var spec string conn.Transact(func(view db.Database) error { clst, err := view.GetCluster() assert.NoError(t, err) spec = clst.Spec return nil }) exp, err := stitch.FromJSON(vagrantDeployment) assert.NoError(t, err) actual, err := stitch.FromJSON(spec) assert.NoError(t, err) assert.Equal(t, exp, actual) }
func TestRunWorker(t *testing.T) { t.Parallel() md, dk := docker.NewMock() conn := db.New() conn.Transact(func(view db.Database) error { container := view.InsertContainer() container.Image = "Image" container.Minion = "1.2.3.4" view.Commit(container) m := view.InsertMinion() m.Self = true m.PrivateIP = "1.2.3.4" view.Commit(m) return nil }) // Wrong Minion IP, should do nothing. runWorker(conn, dk, "1.2.3.5", *subnet) dkcs, err := dk.List(nil) assert.NoError(t, err) assert.Len(t, dkcs, 0) // Run with a list error, should do nothing. md.ListError = true runWorker(conn, dk, "1.2.3.4", *subnet) md.ListError = false dkcs, err = dk.List(nil) assert.NoError(t, err) assert.Len(t, dkcs, 0) runWorker(conn, dk, "1.2.3.4", *subnet) dkcs, err = dk.List(nil) assert.NoError(t, err) assert.Len(t, dkcs, 1) assert.Equal(t, "Image", dkcs[0].Image) }
func TestSyncKeysError(t *testing.T) { util.AppFs = afero.NewMemMapFs() conn := db.New() err := runOnce(conn) assert.EqualError(t, err, "no self minion") fs := afero.NewMemMapFs() util.AppFs = afero.NewReadOnlyFs(fs) conn.Transact(func(view db.Database) error { m := view.InsertMinion() m.Self = true m.AuthorizedKeys = "keys" view.Commit(m) return nil }) err = runOnce(conn) assert.EqualError(t, err, "open /home/quilt/.ssh/authorized_keys: "+ "file does not exist") fs.Create(authorizedKeysFile) err = runOnce(conn) assert.EqualError(t, err, "operation not permitted") }
// Run blocks executing the minion. func Run() { // XXX Uncomment the following line to run the profiler //runProfiler(5 * time.Minute) log.Info("Minion Start") conn := db.New() dk := docker.New("unix:///var/run/docker.sock") // Not in a goroutine, want the plugin to start before the scheduler plugin.Run() go minionServerRun(conn) go supervisor.Run(conn, dk) go scheduler.Run(conn, dk) go network.Run(conn, dk) go etcd.Run(conn) go syncAuthorizedKeys(conn) go apiServer.Run(conn, fmt.Sprintf("tcp://0.0.0.0:%d", api.DefaultRemotePort)) loopLog := util.NewEventTimer("Minion-Update") for range conn.Trigger(db.MinionTable).C { loopLog.LogStart() conn.Transact(func(view db.Database) error { minion, err := view.MinionSelf() if err != nil { return err } updatePolicy(view, minion.Role, minion.Spec) return nil }) loopLog.LogEnd() } }