Пример #1
0
// Run blocks implementing the scheduler module.
func Run(conn db.Conn, dk docker.Client) {
	bootWait(conn)

	subnet := getMinionSubnet(conn)
	err := dk.ConfigureNetwork(plugin.NetworkName, subnet)
	if err != nil {
		log.WithError(err).Fatal("Failed to configure network plugin")
	}

	loopLog := util.NewEventTimer("Scheduler")
	trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable,
		db.PlacementTable, db.EtcdTable).C
	for range trig {
		loopLog.LogStart()
		minion, err := conn.MinionSelf()
		if err != nil {
			log.WithError(err).Warn("Missing self in the minion table.")
			continue
		}

		if minion.Role == db.Worker {
			subnet = updateNetwork(conn, dk, subnet)
			runWorker(conn, dk, minion.PrivateIP, subnet)
		} else if minion.Role == db.Master {
			runMaster(conn)
		}
		loopLog.LogEnd()
	}
}
Пример #2
0
func watchLeader(conn db.Conn, store Store) {
	tickRate := electionTTL
	if tickRate > 30 {
		tickRate = 30
	}

	watch := store.Watch(leaderKey, 1*time.Second)
	trigg := conn.TriggerTick(tickRate, db.EtcdTable)
	for {
		leader, _ := store.Get(leaderKey)
		conn.Transact(func(view db.Database) error {
			etcdRows := view.SelectFromEtcd(nil)
			if len(etcdRows) == 1 {
				etcdRows[0].LeaderIP = leader
				view.Commit(etcdRows[0])
			}
			return nil
		})

		select {
		case <-watch:
		case <-trigg.C:
		}
	}
}
Пример #3
0
func syncAuthorizedKeys(conn db.Conn) {
	waitForMinion(conn)
	for range conn.TriggerTick(30, db.MinionTable).C {
		if err := runOnce(conn); err != nil {
			log.WithError(err).Error("Failed to sync keys")
		}
	}
}
Пример #4
0
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
	var clst *cluster
	for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
		clst = updateCluster(conn, clst)

		// Somewhat of a crude rate-limit of once every five seconds to avoid
		// stressing out the cloud providers with too many API calls.
		sleep(5 * time.Second)
	}
}
Пример #5
0
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
	loopLog := util.NewEventTimer("Network")
	for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
		db.ConnectionTable, db.LabelTable, db.EtcdTable).C {

		loopLog.LogStart()
		runWorker(conn, dk)
		runMaster(conn)
		loopLog.LogEnd()
	}
}
Пример #6
0
func campaign(conn db.Conn, store Store) {
	watch := store.Watch(leaderKey, 1*time.Second)
	trigg := conn.TriggerTick(electionTTL/2, db.EtcdTable)
	oldMaster := false

	for {
		select {
		case <-watch:
		case <-trigg.C:
		}

		etcdRows := conn.SelectFromEtcd(nil)

		minion, err := conn.MinionSelf()
		master := err == nil && minion.Role == db.Master && len(etcdRows) == 1

		if !master {
			if oldMaster {
				commitLeader(conn, false, "")
			}
			continue
		}

		IP := minion.PrivateIP
		if IP == "" {
			continue
		}

		ttl := electionTTL * time.Second

		if etcdRows[0].Leader {
			err = store.Update(leaderKey, IP, ttl)
		} else {
			err = store.Create(leaderKey, IP, ttl)
		}

		if err == nil {
			commitLeader(conn, true, IP)
		} else {
			clientErr, ok := err.(client.Error)
			if !ok || clientErr.Code != client.ErrorCodeNodeExist {
				log.WithError(err).Warn("Error setting leader key")
				commitLeader(conn, false, "")

				// Give things a chance to settle down.
				time.Sleep(electionTTL * time.Second)
			} else {
				commitLeader(conn, false)
			}
		}
	}
}
Пример #7
0
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
	for {
		odb, err := ovsdb.Open()
		if err == nil {
			odb.Close()
			break
		}
		log.WithError(err).Debug("Could not connect to ovsdb-server.")
		time.Sleep(5 * time.Second)
	}

	for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
		db.ConnectionTable, db.LabelTable, db.EtcdTable).C {
		runWorker(conn, dk)
		runMaster(conn)
	}
}
Пример #8
0
// Run blocks implementing the scheduler module.
func Run(conn db.Conn) {
	var sched scheduler
	for range conn.TriggerTick(30, db.MinionTable, db.EtcdTable, db.ContainerTable,
		db.PlacementTable).C {
		minion, err := conn.MinionSelf()
		if err != nil || !conn.EtcdLeader() || minion.Role != db.Master ||
			minion.PrivateIP == "" {
			sched = nil
			continue
		}

		if sched == nil {
			ip := minion.PrivateIP
			sched = newSwarm(docker.New(fmt.Sprintf("tcp://%s:2377", ip)))
			time.Sleep(60 * time.Second)
		}

		placements := conn.SelectFromPlacement(nil)
		connections := conn.SelectFromConnection(nil)
		// Each time we run through this loop, we may boot or terminate
		// containers.  These modification should, in turn, be reflected in the
		// database themselves.  For this reason, we attempt to sync until no
		// database modifications happen (up to an arbitrary limit of three
		// tries).
		for i := 0; i < 3; i++ {
			dkc, err := sched.list()
			if err != nil {
				log.WithError(err).Warning("Failed to get containers.")
				break
			}

			var boot []db.Container
			var term []string
			conn.Transact(func(view db.Database) error {
				term, boot = syncDB(view, dkc)
				return nil
			})

			if len(term) == 0 && len(boot) == 0 {
				break
			}
			sched.terminate(term)
			sched.boot(boot, placements, connections)
		}
	}
}
Пример #9
0
func newCluster(conn db.Conn, namespace string) *cluster {
	clst := &cluster{
		conn:      conn,
		trigger:   conn.TriggerTick(30, db.ClusterTable, db.MachineTable),
		fm:        createForeman(conn),
		namespace: namespace,
		providers: make(map[db.Provider]provider.Provider),
	}

	for _, p := range allProviders {
		inst := provider.New(p)
		if err := inst.Connect(namespace); err == nil {
			clst.providers[p] = inst
		} else {
			log.Debugf("Failed to connect to provider %s: %s", p, err)
		}
	}

	return clst
}
Пример #10
0
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
	var clst *cluster
	for range conn.TriggerTick(60, db.ClusterTable).C {
		var dbCluster db.Cluster
		err := conn.Transact(func(db db.Database) error {
			var err error
			dbCluster, err = db.GetCluster()
			return err
		})

		if err == nil && clst.namespace != dbCluster.Namespace {
			if clst != nil {
				clst.fm.stop()
				clst.trigger.Stop()
			}
			clst = newCluster(conn, dbCluster.Namespace)
			go clst.listen()
		}
	}
}
Пример #11
0
// wakeChan collapses the various channels these functions wait on into a single
// channel. Multiple redundant pings will be coalesced into a single message.
func wakeChan(conn db.Conn, store Store) chan struct{} {
	minionWatch := store.Watch(minionDir, 1*time.Second)
	trigg := conn.TriggerTick(30, db.MinionTable, db.ContainerTable, db.LabelTable,
		db.EtcdTable).C

	c := make(chan struct{}, 1)
	go func() {
		for {
			select {
			case <-minionWatch:
			case <-trigg:
			}

			select {
			case c <- struct{}{}:
			default: // There's a notification in queue, no need for another.
			}
		}
	}()

	return c
}
Пример #12
0
// Run updates the database in response to stitch changes in the cluster table.
func Run(conn db.Conn) {
	for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
		conn.Transact(updateTxn)
	}
}