Пример #1
0
// Init the first time the foreman operates on a new namespace.  It queries the currently
// running VMs for their previously assigned roles, and writes them to the database.
func Init(conn db.Conn) {
	for _, m := range minions {
		m.client.Close()
	}
	minions = map[string]*minion{}

	conn.Transact(func(view db.Database) error {
		machines := view.SelectFromMachine(func(m db.Machine) bool {
			return m.PublicIP != "" && m.PrivateIP != "" && m.CloudID != ""
		})

		updateMinionMap(machines)

		forEachMinion(func(m *minion) {
			var err error
			m.config, err = m.client.getMinion()
			m.connected = err == nil
		})

		for _, m := range minions {
			role := db.PBToRole(m.config.Role)
			if m.connected && role != db.None {
				m.machine.Role = role
				m.machine.Connected = m.connected
				view.Commit(m.machine)
			}
		}

		return nil
	})
}
Пример #2
0
// Run blocks implementing the scheduler module.
func Run(conn db.Conn, dk docker.Client) {
	bootWait(conn)

	subnet := getMinionSubnet(conn)
	err := dk.ConfigureNetwork(plugin.NetworkName, subnet)
	if err != nil {
		log.WithError(err).Fatal("Failed to configure network plugin")
	}

	loopLog := util.NewEventTimer("Scheduler")
	trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable,
		db.PlacementTable, db.EtcdTable).C
	for range trig {
		loopLog.LogStart()
		minion, err := conn.MinionSelf()
		if err != nil {
			log.WithError(err).Warn("Missing self in the minion table.")
			continue
		}

		if minion.Role == db.Worker {
			subnet = updateNetwork(conn, dk, subnet)
			runWorker(conn, dk, minion.PrivateIP, subnet)
		} else if minion.Role == db.Master {
			runMaster(conn)
		}
		loopLog.LogEnd()
	}
}
Пример #3
0
func testContainerTxn(t *testing.T, conn db.Conn, spec string) {
	compiled, err := stitch.FromJavascript(spec, stitch.DefaultImportGetter)
	assert.Nil(t, err)

	var containers []db.Container
	conn.Transact(func(view db.Database) error {
		updatePolicy(view, db.Master, compiled.String())
		containers = view.SelectFromContainer(nil)
		return nil
	})

	for _, e := range queryContainers(compiled) {
		found := false
		for i, c := range containers {
			if e.Image == c.Image &&
				reflect.DeepEqual(e.Command, c.Command) &&
				util.EditDistance(c.Labels, e.Labels) == 0 {
				containers = append(containers[:i], containers[i+1:]...)
				found = true
				break
			}
		}

		assert.True(t, found)
	}

	assert.Empty(t, containers)
}
Пример #4
0
func selectACL(conn db.Conn) (acl db.ACL, err error) {
	err = conn.Transact(func(view db.Database) error {
		acl, err = view.GetACL()
		return err
	})
	return
}
Пример #5
0
func watchLeader(conn db.Conn, store Store) {
	tickRate := electionTTL
	if tickRate > 30 {
		tickRate = 30
	}

	watch := store.Watch(leaderKey, 1*time.Second)
	trigg := conn.TriggerTick(tickRate, db.EtcdTable)
	for {
		leader, _ := store.Get(leaderKey)
		conn.Transact(func(view db.Database) error {
			etcdRows := view.SelectFromEtcd(nil)
			if len(etcdRows) == 1 {
				etcdRows[0].LeaderIP = leader
				view.Commit(etcdRows[0])
			}
			return nil
		})

		select {
		case <-watch:
		case <-trigg.C:
		}
	}
}
Пример #6
0
func runNetworkWorker(conn db.Conn, store Store) {
	// If the directories don't exist, create them so we may watch them.  If they
	// exist already these will return an error that we won't log, but that's ok
	// cause the loop will error too.
	store.Mkdir(labelDir)
	store.Mkdir(containerDir)

	for range wakeChan(conn, store) {
		labelDir, err := getDirectory(store, labelDir)
		containerDir, err2 := getDirectory(store, containerDir)
		if err2 != nil {
			err = err2
		}

		if err != nil {
			log.WithError(err).Warn("Failed to read from cluster store.")
			continue
		}

		conn.Transact(func(view db.Database) error {
			readContainerTransact(view, containerDir)
			readLabelTransact(view, labelDir)
			return nil
		})
	}
}
Пример #7
0
func testConnectionTxn(t *testing.T, conn db.Conn, spec string) {
	compiled, err := stitch.FromJavascript(spec, stitch.DefaultImportGetter)
	assert.Nil(t, err)

	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		updatePolicy(view, db.Master, compiled.String())
		connections = view.SelectFromConnection(nil)
		return nil
	})

	exp := compiled.Connections
	for _, e := range exp {
		found := false
		for i, c := range connections {
			if e.From == c.From && e.To == c.To && e.MinPort == c.MinPort &&
				e.MaxPort == c.MaxPort {
				connections = append(
					connections[:i], connections[i+1:]...)
				found = true
				break
			}
		}

		assert.True(t, found)
	}

	assert.Empty(t, connections)
}
Пример #8
0
func runMaster(conn db.Conn) {
	conn.Transact(func(view db.Database) error {
		if view.EtcdLeader() {
			placeContainers(view)
		}
		return nil
	})
}
Пример #9
0
func syncAuthorizedKeys(conn db.Conn) {
	waitForMinion(conn)
	for range conn.TriggerTick(30, db.MinionTable).C {
		if err := runOnce(conn); err != nil {
			log.WithError(err).Error("Failed to sync keys")
		}
	}
}
Пример #10
0
func waitForMinion(conn db.Conn) {
	for {
		if _, err := conn.MinionSelf(); err == nil {
			return
		}
		time.Sleep(500 * time.Millisecond)
	}
}
Пример #11
0
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
	var clst *cluster
	for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
		clst = updateCluster(conn, clst)

		// Somewhat of a crude rate-limit of once every five seconds to avoid
		// stressing out the cloud providers with too many API calls.
		sleep(5 * time.Second)
	}
}
Пример #12
0
func runNetwork(conn db.Conn, store Store) {
	for range wakeChan(conn, store) {
		// If the etcd read failed, we only want to update the db if it
		// failed because a key was missing (has not been created yet).
		// In all other cases, we skip this iteration.
		etcdData, err := readEtcd(store)
		if err != nil {
			etcdErr, ok := err.(client.Error)
			if !ok || etcdErr.Code != client.ErrorCodeKeyNotFound {
				log.WithError(err).Error("Etcd transaction failed.")
				continue
			}
			log.WithError(err).Debug()
		}

		leader := false
		var containers []db.Container
		conn.Transact(func(view db.Database) error {
			leader = view.EtcdLeader()
			containers = view.SelectFromContainer(func(c db.Container) bool {
				return c.Minion != ""
			})

			minion, err := view.MinionSelf()
			if err == nil && minion.Role == db.Worker {
				updateWorker(view, minion, store, etcdData)
			}

			ipMap, err := loadMinionIPs(store)
			if err != nil {
				log.WithError(err).Error("Etcd read minion IPs failed")
				return nil
			}

			// It would likely be more efficient to perform the etcd write
			// outside of the DB transact. But, if we perform the writes
			// after the transact, there is no way to ensure that the writes
			// were successful before updating the DB with the information
			// produced by the updateEtcd* functions (not considering the
			// etcd writes they perform).
			if leader {
				etcdData, err = updateEtcd(store, etcdData, containers)
				if err != nil {
					log.WithError(err).Error("Etcd update failed.")
					return nil
				}

				updateLeaderDBC(view, containers, etcdData, ipMap)
			}

			updateDBLabels(view, etcdData, ipMap)
			return nil
		})
	}
}
Пример #13
0
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
	loopLog := util.NewEventTimer("Network")
	for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
		db.ConnectionTable, db.LabelTable, db.EtcdTable).C {

		loopLog.LogStart()
		runWorker(conn, dk)
		runMaster(conn)
		loopLog.LogEnd()
	}
}
Пример #14
0
// UpdatePolicy executes transactions on 'conn' to make it reflect a new policy,
// 'stitch'.
func UpdatePolicy(conn db.Conn, stitch stitch.Stitch) error {
	txn := func(db db.Database) error {
		return updateTxn(db, stitch)
	}

	if err := conn.Transact(txn); err != nil {
		return err
	}

	return nil
}
Пример #15
0
func bootWait(conn db.Conn) {
	for workerCount := 0; workerCount <= 0; {
		workerCount = 0
		for _, m := range conn.SelectFromMinion(nil) {
			if m.Role == db.Worker {
				workerCount++
			}
		}
		time.Sleep(30 * time.Second)
	}
}
Пример #16
0
func setNamespace(conn db.Conn, ns string) {
	conn.Transact(func(view db.Database) error {
		clst, err := view.GetCluster()
		if err != nil {
			clst = view.InsertCluster()
		}

		clst.Namespace = ns
		view.Commit(clst)
		return nil
	})
}
Пример #17
0
func updateStitch(t *testing.T, conn db.Conn, stitch stitch.Stitch) {
	conn.Transact(func(view db.Database) error {
		cluster, err := view.GetCluster()
		if err != nil {
			cluster = view.InsertCluster()
		}
		cluster.Spec = stitch.String()
		view.Commit(cluster)
		return nil
	})
	assert.Nil(t, conn.Transact(updateTxn))
}
Пример #18
0
func selectMachines(conn db.Conn) (masters, workers []db.Machine) {
	conn.Transact(func(view db.Database) error {
		masters = view.SelectFromMachine(func(m db.Machine) bool {
			return m.Role == db.Master
		})
		workers = view.SelectFromMachine(func(m db.Machine) bool {
			return m.Role == db.Worker
		})
		return nil
	})
	return
}
Пример #19
0
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
	for {
		odb, err := ovsdb.Open()
		if err == nil {
			odb.Close()
			break
		}
		log.WithError(err).Debug("Could not connect to ovsdb-server.")
		time.Sleep(5 * time.Second)
	}

	for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
		db.ConnectionTable, db.LabelTable, db.EtcdTable).C {
		runWorker(conn, dk)
		runMaster(conn)
	}
}
Пример #20
0
func updateCluster(conn db.Conn, clst *cluster) *cluster {
	namespace, err := conn.GetClusterNamespace()
	if err != nil {
		return clst
	}

	if clst == nil || clst.namespace != namespace {
		clst = newCluster(conn, namespace)
		clst.runOnce()
		foreman.Init(clst.conn)
	}

	clst.runOnce()
	foreman.RunOnce(clst.conn)

	return clst
}
Пример #21
0
func checkMinionEquals(t *testing.T, conn db.Conn, exp db.Minion) {
	timeout := time.After(1 * time.Second)
	var actual db.Minion
	for {
		actual, _ = conn.MinionSelf()
		actual.ID = 0
		if reflect.DeepEqual(exp, actual) {
			return
		}
		select {
		case <-timeout:
			t.Errorf("Expected minion to be %v, but got %v\n", exp, actual)
			return
		default:
			time.Sleep(100 * time.Millisecond)
		}
	}
}
Пример #22
0
func runWorker(conn db.Conn, dk docker.Client) {
	minion, err := conn.MinionSelf()
	if err != nil || minion.Role != db.Worker {
		return
	}

	odb, err := ovsdb.Open()
	if err != nil {
		log.Warning("Failed to connect to ovsdb-server: %s", err)
		return
	}
	defer odb.Close()

	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		containers = view.SelectFromContainer(func(c db.Container) bool {
			return c.DockerID != "" && c.IP != "" && c.Mac != ""
		})
		labels = view.SelectFromLabel(func(l db.Label) bool {
			return l.IP != ""
		})
		connections = view.SelectFromConnection(nil)
		return nil
	})

	updateNamespaces(containers)
	updateVeths(containers)
	updateNAT(containers, connections)
	updatePorts(odb, containers)

	if exists, err := linkExists("", quiltBridge); exists {
		updateDefaultGw(odb)
		updateOpenFlow(dk, odb, containers, labels, connections)
	} else if err != nil {
		log.WithError(err).Error("failed to check if link exists")
	}
	updateNameservers(dk, containers)
	updateContainerIPs(containers, labels)
	updateRoutes(containers)
	updateEtcHosts(dk, containers, labels, connections)
	updateLoopback(containers)
}
Пример #23
0
func commitLeader(conn db.Conn, leader bool, ip ...string) {
	if len(ip) > 1 {
		panic("Not Reached")
	}

	conn.Transact(func(view db.Database) error {
		etcdRows := view.SelectFromEtcd(nil)
		if len(etcdRows) == 1 {
			etcdRows[0].Leader = leader

			if len(ip) == 1 {
				etcdRows[0].LeaderIP = ip[0]
			}

			view.Commit(etcdRows[0])
		}
		return nil
	})
}
Пример #24
0
func newCluster(conn db.Conn, namespace string) *cluster {
	clst := &cluster{
		conn:      conn,
		trigger:   conn.TriggerTick(30, db.ClusterTable, db.MachineTable),
		fm:        createForeman(conn),
		namespace: namespace,
		providers: make(map[db.Provider]provider.Provider),
	}

	for _, p := range allProviders {
		inst := provider.New(p)
		if err := inst.Connect(namespace); err == nil {
			clst.providers[p] = inst
		} else {
			log.Debugf("Failed to connect to provider %s: %s", p, err)
		}
	}

	return clst
}
Пример #25
0
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
	var clst *cluster
	for range conn.TriggerTick(60, db.ClusterTable).C {
		var dbCluster db.Cluster
		err := conn.Transact(func(db db.Database) error {
			var err error
			dbCluster, err = db.GetCluster()
			return err
		})

		if err == nil && clst.namespace != dbCluster.Namespace {
			if clst != nil {
				clst.fm.stop()
				clst.trigger.Stop()
			}
			clst = newCluster(conn, dbCluster.Namespace)
			go clst.listen()
		}
	}
}
Пример #26
0
func getMinionSubnet(conn db.Conn) net.IPNet {
	for {
		minion, err := conn.MinionSelf()
		if err != nil {
			log.WithError(err).Debug("Failed to get self")
		} else if minion.PrivateIP == "" {
			log.Error("This minion has no PrivateIP")
		} else if minion.Subnet == "" {
			log.Debug("This minion has no subnet yet")
		} else {
			_, subnet, err := net.ParseCIDR(minion.Subnet)
			if err != nil {
				log.WithError(err).Errorf("Malformed subnet: %s",
					minion.Subnet)
			}
			return *subnet
		}
		time.Sleep(50 * time.Millisecond)
	}
}
Пример #27
0
func runOnce(conn db.Conn) error {
	if _, err := util.AppFs.Stat(authorizedKeysFile); os.IsNotExist(err) {
		util.AppFs.Create(authorizedKeysFile)
	}
	currKeys, err := util.ReadFile(authorizedKeysFile)
	if err != nil {
		return err
	}

	m, err := conn.MinionSelf()
	if err != nil {
		return err
	}

	if m.AuthorizedKeys == currKeys {
		return nil
	}

	return util.WriteFile(authorizedKeysFile, []byte(m.AuthorizedKeys), 0644)
}
Пример #28
0
func runWorker(conn db.Conn, dk docker.Client, myIP string, subnet net.IPNet) {
	if myIP == "" {
		return
	}

	filter := map[string][]string{"label": {labelPair}}

	var toBoot, toKill []interface{}
	for i := 0; i < 2; i++ {
		dkcs, err := dk.List(filter)
		if err != nil {
			log.WithError(err).Warning("Failed to list docker containers.")
			return
		}

		conn.Transact(func(view db.Database) error {
			_, err := view.MinionSelf()
			if err != nil {
				return nil
			}

			dbcs := view.SelectFromContainer(func(dbc db.Container) bool {
				return dbc.Minion == myIP
			})

			dkcs, badDcks := filterOnSubnet(subnet, dkcs)

			var changed []db.Container
			changed, toBoot, toKill = syncWorker(dbcs, dkcs, subnet)
			for _, dbc := range changed {
				view.Commit(dbc)
			}

			toKill = append(toKill, badDcks...)
			return nil
		})

		doContainers(dk, toBoot, dockerRun)
		doContainers(dk, toKill, dockerKill)
	}
}
Пример #29
0
func runNetworkMaster(conn db.Conn, store Store) {
	for range wakeChan(conn, store) {
		leader := false
		var containers []db.Container
		conn.Transact(func(view db.Database) error {
			leader = view.EtcdLeader()
			containers = view.SelectFromContainer(nil)
			return nil
		})

		if !leader {
			continue
		}

		if err := writeStoreContainers(store, containers); err != nil {
			log.WithError(err).Warning("Failed to update containers in ETCD")
		}

		writeStoreLabels(store, containers)
	}
}
Пример #30
0
func checkEtcdEquals(t *testing.T, conn db.Conn, exp db.Etcd) {
	timeout := time.After(1 * time.Second)
	var actual db.Etcd
	for {
		conn.Transact(func(view db.Database) error {
			actual, _ = view.GetEtcd()
			return nil
		})
		actual.ID = 0
		if reflect.DeepEqual(exp, actual) {
			return
		}
		select {
		case <-timeout:
			t.Errorf("Expected etcd row to be %v, but got %v\n", exp, actual)
			return
		default:
			time.Sleep(100 * time.Millisecond)
		}
	}
}