func syncWorker(dbcs []db.Container, dkcs []docker.Container, subnet net.IPNet) ( changed []db.Container, toBoot, toKill []interface{}) { pairs, dbci, dkci := join.Join(dbcs, dkcs, syncJoinScore) for _, i := range dkci { toKill = append(toKill, i.(docker.Container)) } for _, pair := range pairs { dbc := pair.L.(db.Container) dkc := pair.R.(docker.Container) if dbc.DockerID != dkc.ID { dbc.DockerID = dkc.ID dbc.Pid = dkc.Pid dbc.IP = dkc.IP dbc.Mac = dkc.Mac dbc.EndpointID = dkc.EID changed = append(changed, dbc) } } for _, i := range dbci { dbc := i.(db.Container) toBoot = append(toBoot, dbc) } return changed, toBoot, toKill }
func machineTxn(view db.Database, stitch stitch.Stitch) error { // XXX: How best to deal with machines that don't specify enough information? maxPrice, _ := stitch.QueryFloat("MaxPrice") stitchMachines := toDBMachine(stitch.QueryMachines(), maxPrice) dbMachines := view.SelectFromMachine(nil) scoreFun := func(left, right interface{}) int { stitchMachine := left.(db.Machine) dbMachine := right.(db.Machine) switch { case dbMachine.Provider != stitchMachine.Provider: return -1 case dbMachine.Region != stitchMachine.Region: return -1 case dbMachine.Size != "" && stitchMachine.Size != dbMachine.Size: return -1 case dbMachine.Role != db.None && dbMachine.Role != stitchMachine.Role: return -1 case dbMachine.DiskSize != stitchMachine.DiskSize: return -1 case dbMachine.PrivateIP == "": return 2 case dbMachine.PublicIP == "": return 1 default: return 0 } } pairs, bootList, terminateList := join.Join(stitchMachines, dbMachines, scoreFun) for _, toTerminate := range terminateList { toTerminate := toTerminate.(db.Machine) view.Remove(toTerminate) } for _, bootSet := range bootList { bootSet := bootSet.(db.Machine) pairs = append(pairs, join.Pair{L: bootSet, R: view.InsertMachine()}) } for _, pair := range pairs { stitchMachine := pair.L.(db.Machine) dbMachine := pair.R.(db.Machine) dbMachine.Role = stitchMachine.Role dbMachine.Size = stitchMachine.Size dbMachine.DiskSize = stitchMachine.DiskSize dbMachine.Provider = stitchMachine.Provider dbMachine.Region = stitchMachine.Region dbMachine.SSHKeys = stitchMachine.SSHKeys view.Commit(dbMachine) } return nil }
func syncDB(view db.Database, dkcsArg []docker.Container) ([]string, []db.Container) { score := func(left, right interface{}) int { dbc := left.(db.Container) dkc := right.(docker.Container) // Depending on the container, the command in the database could be // either The command plus it's arguments, or just it's arguments. To // handle that case, we check both. cmd1 := dkc.Args cmd2 := append([]string{dkc.Path}, dkc.Args...) dbcCmd := dbc.Command for key, value := range dbc.Env { if dkc.Env[key] != value { return -1 } } var dkcLabels []string for label, value := range dkc.Labels { if !docker.IsUserLabel(label) || value != docker.LabelTrueValue { continue } dkcLabels = append(dkcLabels, docker.ParseUserLabel(label)) } switch { case dkc.Image != dbc.Image: return -1 case len(dbcCmd) != 0 && !strEq(dbcCmd, cmd1) && !strEq(dbcCmd, cmd2): return -1 case dkc.ID == dbc.DockerID: return 0 default: return util.EditDistance(dbc.Labels, dkcLabels) } } pairs, dbcs, dkcs := join.Join(view.SelectFromContainer(nil), dkcsArg, score) for _, pair := range pairs { dbc := pair.L.(db.Container) dbc.DockerID = pair.R.(docker.Container).ID view.Commit(dbc) } var term []string for _, dkc := range dkcs { term = append(term, dkc.(docker.Container).ID) } var boot []db.Container for _, dbc := range dbcs { boot = append(boot, dbc.(db.Container)) } return term, boot }
func updateWorker(view db.Database, self db.Minion, store Store, etcdData storeData) { var containers []storeContainer for _, etcdc := range etcdData.containers { if etcdc.Minion == self.PrivateIP { containers = append(containers, etcdc) } } pairs, dbcs, etcdcs := join.Join(view.SelectFromContainer(nil), containers, func(left, right interface{}) int { dbc := left.(db.Container) l := storeContainer{ StitchID: dbc.StitchID, Minion: dbc.Minion, Image: dbc.Image, Command: dbc.Command, Env: dbc.Env, Labels: dbc.Labels, } return containerJoinScore(l, right.(storeContainer)) }) for _, i := range dbcs { dbc := i.(db.Container) view.Remove(dbc) } for _, etcdc := range etcdcs { pairs = append(pairs, join.Pair{ L: view.InsertContainer(), R: etcdc, }) } for _, pair := range pairs { dbc := pair.L.(db.Container) etcdc := pair.R.(storeContainer) dbc.StitchID = etcdc.StitchID dbc.Minion = etcdc.Minion dbc.Image = etcdc.Image dbc.Command = etcdc.Command dbc.Env = etcdc.Env dbc.Labels = etcdc.Labels view.Commit(dbc) } updateContainerIP(view.SelectFromContainer(nil), self.PrivateIP, store) }
func updateContainers(view db.Database, spec stitch.Stitch) { score := func(l, r interface{}) int { left := l.(db.Container) right := r.(db.Container) if left.Image != right.Image || !util.StrSliceEqual(left.Command, right.Command) || !util.StrStrMapEqual(left.Env, right.Env) { return -1 } score := util.EditDistance(left.Labels, right.Labels) if left.StitchID != right.StitchID { score++ } return score } pairs, news, dbcs := join.Join(queryContainers(spec), view.SelectFromContainer(nil), score) for _, dbc := range dbcs { view.Remove(dbc.(db.Container)) } for _, new := range news { pairs = append(pairs, join.Pair{L: new, R: view.InsertContainer()}) } for _, pair := range pairs { newc := pair.L.(db.Container) dbc := pair.R.(db.Container) // By sorting the labels we prevent the database from getting confused // when their order is non deterministic. dbc.Labels = newc.Labels sort.Sort(sort.StringSlice(dbc.Labels)) dbc.Command = newc.Command dbc.Image = newc.Image dbc.Env = newc.Env dbc.StitchID = newc.StitchID view.Commit(dbc) } }
func syncDB(cloudMachines []provider.Machine, dbMachines []db.Machine) ( pairs []join.Pair, bootSet []provider.Machine, terminateSet []provider.Machine) { scoreFun := func(left, right interface{}) int { dbm := left.(db.Machine) m := right.(provider.Machine) switch { case dbm.Provider != m.Provider: return -1 case m.Region != "" && dbm.Region != m.Region: return -1 case m.Size != "" && dbm.Size != m.Size: return -1 case m.DiskSize != 0 && dbm.DiskSize != m.DiskSize: return -1 case dbm.CloudID == m.ID: return 0 case dbm.PublicIP == m.PublicIP: return 1 case dbm.PrivateIP == m.PrivateIP: return 2 default: return 3 } } pairs, dbmIface, cmIface := join.Join(dbMachines, cloudMachines, scoreFun) for _, cm := range cmIface { m := cm.(provider.Machine) terminateSet = append(terminateSet, m) } for _, dbm := range dbmIface { m := dbm.(db.Machine) bootSet = append(bootSet, provider.Machine{ Size: m.Size, Provider: m.Provider, Region: m.Region, DiskSize: m.DiskSize, SSHKeys: m.SSHKeys}) } return pairs, bootSet, terminateSet }
func TestLogicalPorts(t *testing.T) { ovsdbClient := NewFakeOvsdbClient() scoreFun := func(left, right interface{}) int { ovsdbPort := left.(LPort) localPort := right.(LPort) switch { case ovsdbPort.Name != localPort.Name: return -1 case !reflect.DeepEqual(ovsdbPort.Addresses, localPort.Addresses): return -1 default: return 0 } } checkCorrectness := func(ovsdbLPorts []LPort, localLPorts ...LPort) { pair, _, _ := join.Join(ovsdbLPorts, localLPorts, scoreFun) assert.Equal(t, len(pair), len(localLPorts)) } // Create new switch. lswitch := "test-switch" err := ovsdbClient.CreateLogicalSwitch(lswitch) assert.Nil(t, err) // Nothing happens yet. It should have zero logical port to be listed. ovsdbLPorts, err := ovsdbClient.ListLogicalPorts(lswitch) assert.Nil(t, err) assert.Zero(t, len(ovsdbLPorts)) // Create logical port. name1, mac1, ip1 := "lp1", "00:00:00:00:00:00", "0.0.0.0" lport1 := LPort{ Name: "lp1", Addresses: []string{fmt.Sprintf("%s %s", mac1, ip1)}, } err = ovsdbClient.CreateLogicalPort(lswitch, name1, mac1, ip1) assert.Nil(t, err) // It should now have one logical port to be listed. ovsdbLPorts, err = ovsdbClient.ListLogicalPorts(lswitch) assert.Nil(t, err) assert.Equal(t, 1, len(ovsdbLPorts)) ovsdbLPort1 := ovsdbLPorts[0] checkCorrectness(ovsdbLPorts, lport1) // Create a second logical port. name2, mac2, ip2 := "lp2", "00:00:00:00:00:01", "0.0.0.1" lport2 := LPort{ Name: "lp2", Addresses: []string{fmt.Sprintf("%s %s", mac2, ip2)}, } err = ovsdbClient.CreateLogicalPort(lswitch, name2, mac2, ip2) assert.Nil(t, err) // It should now have two logical ports to be listed. ovsdbLPorts, err = ovsdbClient.ListLogicalPorts(lswitch) assert.Nil(t, err) assert.Equal(t, 2, len(ovsdbLPorts)) checkCorrectness(ovsdbLPorts, lport1, lport2) // Delete the first logical port. err = ovsdbClient.DeleteLogicalPort(lswitch, ovsdbLPort1) assert.Nil(t, err) // It should now have one logical port to be listed. ovsdbLPorts, err = ovsdbClient.ListLogicalPorts(lswitch) assert.Nil(t, err) assert.Equal(t, 1, len(ovsdbLPorts)) ovsdbLPort2 := ovsdbLPorts[0] checkCorrectness(ovsdbLPorts, lport2) // Delete the last one as well. err = ovsdbClient.DeleteLogicalPort(lswitch, ovsdbLPort2) assert.Nil(t, err) // It should now have one logical port to be listed. ovsdbLPorts, err = ovsdbClient.ListLogicalPorts(lswitch) assert.Nil(t, err) assert.Zero(t, len(ovsdbLPorts)) }