// Run blocks implementing the scheduler module. func Run(conn db.Conn, dk docker.Client) { bootWait(conn) subnet := getMinionSubnet(conn) err := dk.ConfigureNetwork(plugin.NetworkName, subnet) if err != nil { log.WithError(err).Fatal("Failed to configure network plugin") } loopLog := util.NewEventTimer("Scheduler") trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable, db.PlacementTable, db.EtcdTable).C for range trig { loopLog.LogStart() minion, err := conn.MinionSelf() if err != nil { log.WithError(err).Warn("Missing self in the minion table.") continue } if minion.Role == db.Worker { subnet = updateNetwork(conn, dk, subnet) runWorker(conn, dk, minion.PrivateIP, subnet) } else if minion.Role == db.Master { runMaster(conn) } loopLog.LogEnd() } }
func waitForMinion(conn db.Conn) { for { if _, err := conn.MinionSelf(); err == nil { return } time.Sleep(500 * time.Millisecond) } }
func campaign(conn db.Conn, store Store) { watch := store.Watch(leaderKey, 1*time.Second) trigg := conn.TriggerTick(electionTTL/2, db.EtcdTable) oldMaster := false for { select { case <-watch: case <-trigg.C: } etcdRows := conn.SelectFromEtcd(nil) minion, err := conn.MinionSelf() master := err == nil && minion.Role == db.Master && len(etcdRows) == 1 if !master { if oldMaster { commitLeader(conn, false, "") } continue } IP := minion.PrivateIP if IP == "" { continue } ttl := electionTTL * time.Second if etcdRows[0].Leader { err = store.Update(leaderKey, IP, ttl) } else { err = store.Create(leaderKey, IP, ttl) } if err == nil { commitLeader(conn, true, IP) } else { clientErr, ok := err.(client.Error) if !ok || clientErr.Code != client.ErrorCodeNodeExist { log.WithError(err).Warn("Error setting leader key") commitLeader(conn, false, "") // Give things a chance to settle down. time.Sleep(electionTTL * time.Second) } else { commitLeader(conn, false) } } } }
// Run blocks implementing the scheduler module. func Run(conn db.Conn) { var sched scheduler for range conn.TriggerTick(30, db.MinionTable, db.EtcdTable, db.ContainerTable, db.PlacementTable).C { minion, err := conn.MinionSelf() if err != nil || !conn.EtcdLeader() || minion.Role != db.Master || minion.PrivateIP == "" { sched = nil continue } if sched == nil { ip := minion.PrivateIP sched = newSwarm(docker.New(fmt.Sprintf("tcp://%s:2377", ip))) time.Sleep(60 * time.Second) } placements := conn.SelectFromPlacement(nil) connections := conn.SelectFromConnection(nil) // Each time we run through this loop, we may boot or terminate // containers. These modification should, in turn, be reflected in the // database themselves. For this reason, we attempt to sync until no // database modifications happen (up to an arbitrary limit of three // tries). for i := 0; i < 3; i++ { dkc, err := sched.list() if err != nil { log.WithError(err).Warning("Failed to get containers.") break } var boot []db.Container var term []string conn.Transact(func(view db.Database) error { term, boot = syncDB(view, dkc) return nil }) if len(term) == 0 && len(boot) == 0 { break } sched.terminate(term) sched.boot(boot, placements, connections) } } }
func checkMinionEquals(t *testing.T, conn db.Conn, exp db.Minion) { timeout := time.After(1 * time.Second) var actual db.Minion for { actual, _ = conn.MinionSelf() actual.ID = 0 if reflect.DeepEqual(exp, actual) { return } select { case <-timeout: t.Errorf("Expected minion to be %v, but got %v\n", exp, actual) return default: time.Sleep(100 * time.Millisecond) } } }
func runWorker(conn db.Conn, dk docker.Client) { minion, err := conn.MinionSelf() if err != nil || minion.Role != db.Worker { return } odb, err := ovsdb.Open() if err != nil { log.Warning("Failed to connect to ovsdb-server: %s", err) return } defer odb.Close() var labels []db.Label var containers []db.Container var connections []db.Connection conn.Transact(func(view db.Database) error { containers = view.SelectFromContainer(func(c db.Container) bool { return c.DockerID != "" && c.IP != "" && c.Mac != "" }) labels = view.SelectFromLabel(func(l db.Label) bool { return l.IP != "" }) connections = view.SelectFromConnection(nil) return nil }) updateNamespaces(containers) updateVeths(containers) updateNAT(containers, connections) updatePorts(odb, containers) if exists, err := linkExists("", quiltBridge); exists { updateDefaultGw(odb) updateOpenFlow(dk, odb, containers, labels, connections) } else if err != nil { log.WithError(err).Error("failed to check if link exists") } updateNameservers(dk, containers) updateContainerIPs(containers, labels) updateRoutes(containers) updateEtcHosts(dk, containers, labels, connections) updateLoopback(containers) }
func getMinionSubnet(conn db.Conn) net.IPNet { for { minion, err := conn.MinionSelf() if err != nil { log.WithError(err).Debug("Failed to get self") } else if minion.PrivateIP == "" { log.Error("This minion has no PrivateIP") } else if minion.Subnet == "" { log.Debug("This minion has no subnet yet") } else { _, subnet, err := net.ParseCIDR(minion.Subnet) if err != nil { log.WithError(err).Errorf("Malformed subnet: %s", minion.Subnet) } return *subnet } time.Sleep(50 * time.Millisecond) } }
func runOnce(conn db.Conn) error { if _, err := util.AppFs.Stat(authorizedKeysFile); os.IsNotExist(err) { util.AppFs.Create(authorizedKeysFile) } currKeys, err := util.ReadFile(authorizedKeysFile) if err != nil { return err } m, err := conn.MinionSelf() if err != nil { return err } if m.AuthorizedKeys == currKeys { return nil } return util.WriteFile(authorizedKeysFile, []byte(m.AuthorizedKeys), 0644) }