// toDBMachine converts machines specified in the Stitch into db.Machines that can // be compared against what's already in the db. // Specifically, it sets the role of the db.Machine, the size (which may depend // on RAM and CPU constraints), and the provider. // Additionally, it skips machines with invalid roles, sizes or providers. func toDBMachine(machines []stitch.Machine, maxPrice float64) []db.Machine { var hasMaster, hasWorker bool var dbMachines []db.Machine for _, stitchm := range machines { var m db.Machine role, err := db.ParseRole(stitchm.Role) if err != nil { log.WithError(err).Error("Error parsing role.") continue } m.Role = role hasMaster = hasMaster || role == db.Master hasWorker = hasWorker || role == db.Worker p, err := db.ParseProvider(stitchm.Provider) if err != nil { log.WithError(err).Error("Error parsing provider.") continue } m.Provider = p m.Size = stitchm.Size if m.Size == "" { providerInst := provider.New(p) m.Size = providerInst.ChooseSize( stitchm.RAM, stitchm.CPU, maxPrice) if m.Size == "" { log.Errorf("No valid size for %v, skipping.", m) continue } } m.DiskSize = stitchm.DiskSize if m.DiskSize == 0 { m.DiskSize = defaultDiskSize } m.SSHKeys = stitchm.SSHKeys m.Region = stitchm.Region dbMachines = append(dbMachines, provider.DefaultRegion(m)) } if !hasMaster && hasWorker { log.Warning("A Master was specified but no workers.") return nil } else if hasMaster && !hasWorker { log.Warning("A Worker was specified but no masters.") return nil } return dbMachines }
func TestDefaultRegion(t *testing.T) { exp := "foo" m := db.Machine{Provider: "Amazon", Region: exp} m = DefaultRegion(m) if m.Region != exp { t.Errorf("expected %s, found %s", exp, m.Region) } m.Region = "" m = DefaultRegion(m) exp = "us-west-1" if m.Region != exp { t.Errorf("expected %s, found %s", exp, m.Region) } m.Region = "" m.Provider = "Google" exp = "us-east1-b" m = DefaultRegion(m) if m.Region != exp { t.Errorf("expected %s, found %s", exp, m.Region) } m.Region = "" m.Provider = "Azure" exp = "Central US" m = DefaultRegion(m) if m.Region != exp { t.Errorf("expected %s, found %s", exp, m.Region) } m.Region = "" m.Provider = "Vagrant" exp = "" m = DefaultRegion(m) if m.Region != exp { t.Errorf("expected %s, found %s", exp, m.Region) } m.Region = "" m.Provider = "Panic" defer func() { if r := recover(); r == nil { t.Error("Expected panic") } }() m = DefaultRegion(m) }
// DefaultRegion populates `m.Region` for the provided db.Machine if one isn't // specified. This is intended to allow users to omit the cloud provider region when // they don't particularly care where a system is placed. func DefaultRegion(m db.Machine) db.Machine { if m.Region != "" { return m } switch m.Provider { case db.Amazon: m.Region = amazon.DefaultRegion case db.Google: m.Region = google.DefaultRegion case db.Vagrant: default: panic(fmt.Sprintf("Unknown Cloud Provider: %s", m.Provider)) } return m }
// DefaultRegion populates `m.Region` for the provided db.Machine if one isn't // specified. This is intended to allow users to omit the cloud provider region when // they don't particularly care where a system is placed. func DefaultRegion(m db.Machine) db.Machine { if m.Region != "" { return m } region := "" switch m.Provider { case "Amazon": region = "us-west-1" case "Google": region = "us-east1-b" case "Azure": region = "Central US" case "Vagrant": default: panic(fmt.Sprintf("Unknown Cloud Provider: %s", m.Provider)) } m.Region = region return m }
func TestConfigConsistency(t *testing.T) { masterRole := db.RoleToPB(db.Master) workerRole := db.RoleToPB(db.Worker) fm, _ := startTest() var master, worker db.Machine fm.conn.Transact(func(view db.Database) error { master = view.InsertMachine() master.PublicIP = "1.1.1.1" master.PrivateIP = master.PublicIP master.CloudID = "ID1" view.Commit(master) worker = view.InsertMachine() worker.PublicIP = "2.2.2.2" worker.PrivateIP = worker.PublicIP worker.CloudID = "ID2" view.Commit(worker) return nil }) fm.init() fm.conn.Transact(func(view db.Database) error { master.Role = db.Master worker.Role = db.Worker view.Commit(master) view.Commit(worker) return nil }) fm.runOnce() checkRoles := func(fore foreman) { r := fore.minions["1.1.1.1"].client.(*fakeClient).mc.Role if r != masterRole { t.Errorf("Master has role %v, should be %v", r, masterRole) } r = fore.minions["2.2.2.2"].client.(*fakeClient).mc.Role if r != workerRole { t.Errorf("Worker has role %v, should be %v", r, workerRole) } } checkRoles(fm) fm.stop() newfm, clients := startTest() newfm.conn = fm.conn // Insert the clients into the client list to simulate fetching // from the remote cluster clients.clients["1.1.1.1"] = &fakeClient{clients, "1.1.1.1", pb.MinionConfig{Role: masterRole}, pb.EtcdMembers{}} clients.clients["2.2.2.2"] = &fakeClient{clients, "2.2.2.2", pb.MinionConfig{Role: workerRole}, pb.EtcdMembers{}} newfm.init() newfm.runOnce() checkRoles(newfm) // After many runs, the roles should never change for i := 0; i < 25; i++ { newfm.runOnce() } checkRoles(newfm) // Ensure that the DB machines have the correct roles as well. newfm.conn.Transact(func(view db.Database) error { machines := view.SelectFromMachine(nil) for _, m := range machines { if m.PublicIP == "1.1.1.1" && m.Role != db.Master { t.Errorf("db Master had role %v, expected %v", m.Role, db.Master) } if m.PublicIP == "2.2.2.2" && m.Role != db.Worker { t.Errorf("db Worker had role %v, expected %v", m.Role, db.Worker) } } return nil }) }
func TestConfigConsistency(t *testing.T) { masterRole := db.RoleToPB(db.Master) workerRole := db.RoleToPB(db.Worker) conn, clients := startTest() var master, worker db.Machine conn.Transact(func(view db.Database) error { master = view.InsertMachine() master.PublicIP = "1.1.1.1" master.PrivateIP = master.PublicIP master.CloudID = "ID1" view.Commit(master) worker = view.InsertMachine() worker.PublicIP = "2.2.2.2" worker.PrivateIP = worker.PublicIP worker.CloudID = "ID2" view.Commit(worker) return nil }) Init(conn) conn.Transact(func(view db.Database) error { master.Role = db.Master worker.Role = db.Worker view.Commit(master) view.Commit(worker) return nil }) RunOnce(conn) checkRoles := func() { r := minions["1.1.1.1"].client.(*fakeClient).mc.Role assert.Equal(t, masterRole, r) r = minions["2.2.2.2"].client.(*fakeClient).mc.Role assert.Equal(t, workerRole, r) } checkRoles() minions = map[string]*minion{} // Insert the clients into the client list to simulate fetching // from the remote cluster clients.clients["1.1.1.1"] = &fakeClient{clients, "1.1.1.1", pb.MinionConfig{Role: masterRole}} clients.clients["2.2.2.2"] = &fakeClient{clients, "2.2.2.2", pb.MinionConfig{Role: workerRole}} Init(conn) RunOnce(conn) checkRoles() // After many runs, the roles should never change for i := 0; i < 25; i++ { RunOnce(conn) } checkRoles() // Ensure that the DB machines have the correct roles as well. conn.Transact(func(view db.Database) error { machines := view.SelectFromMachine(nil) for _, m := range machines { if m.PublicIP == "1.1.1.1" { assert.Equal(t, db.Role(db.Master), m.Role) } if m.PublicIP == "2.2.2.2" { assert.Equal(t, db.Role(db.Worker), m.Role) } } return nil }) }