예제 #1
0
파일: network.go 프로젝트: yuenmeiwan/quilt
func runNetworkWorker(conn db.Conn, store Store) {
	// If the directories don't exist, create them so we may watch them.  If they
	// exist already these will return an error that we won't log, but that's ok
	// cause the loop will error too.
	store.Mkdir(labelDir)
	store.Mkdir(containerDir)

	for range wakeChan(conn, store) {
		labelDir, err := getDirectory(store, labelDir)
		containerDir, err2 := getDirectory(store, containerDir)
		if err2 != nil {
			err = err2
		}

		if err != nil {
			log.WithError(err).Warn("Failed to read from cluster store.")
			continue
		}

		conn.Transact(func(view db.Database) error {
			readContainerTransact(view, containerDir)
			readLabelTransact(view, labelDir)
			return nil
		})
	}
}
예제 #2
0
파일: engine_test.go 프로젝트: NetSys/quilt
func testContainerTxn(t *testing.T, conn db.Conn, spec string) {
	compiled, err := stitch.FromJavascript(spec, stitch.DefaultImportGetter)
	assert.Nil(t, err)

	var containers []db.Container
	conn.Transact(func(view db.Database) error {
		updatePolicy(view, db.Master, compiled.String())
		containers = view.SelectFromContainer(nil)
		return nil
	})

	for _, e := range queryContainers(compiled) {
		found := false
		for i, c := range containers {
			if e.Image == c.Image &&
				reflect.DeepEqual(e.Command, c.Command) &&
				util.EditDistance(c.Labels, e.Labels) == 0 {
				containers = append(containers[:i], containers[i+1:]...)
				found = true
				break
			}
		}

		assert.True(t, found)
	}

	assert.Empty(t, containers)
}
예제 #3
0
파일: engine_test.go 프로젝트: NetSys/quilt
func selectACL(conn db.Conn) (acl db.ACL, err error) {
	err = conn.Transact(func(view db.Database) error {
		acl, err = view.GetACL()
		return err
	})
	return
}
예제 #4
0
파일: foreman.go 프로젝트: NetSys/quilt
// Init the first time the foreman operates on a new namespace.  It queries the currently
// running VMs for their previously assigned roles, and writes them to the database.
func Init(conn db.Conn) {
	for _, m := range minions {
		m.client.Close()
	}
	minions = map[string]*minion{}

	conn.Transact(func(view db.Database) error {
		machines := view.SelectFromMachine(func(m db.Machine) bool {
			return m.PublicIP != "" && m.PrivateIP != "" && m.CloudID != ""
		})

		updateMinionMap(machines)

		forEachMinion(func(m *minion) {
			var err error
			m.config, err = m.client.getMinion()
			m.connected = err == nil
		})

		for _, m := range minions {
			role := db.PBToRole(m.config.Role)
			if m.connected && role != db.None {
				m.machine.Role = role
				m.machine.Connected = m.connected
				view.Commit(m.machine)
			}
		}

		return nil
	})
}
예제 #5
0
파일: elector.go 프로젝트: yuenmeiwan/quilt
func watchLeader(conn db.Conn, store Store) {
	tickRate := electionTTL
	if tickRate > 30 {
		tickRate = 30
	}

	watch := store.Watch(leaderKey, 1*time.Second)
	trigg := conn.TriggerTick(tickRate, db.EtcdTable)
	for {
		leader, _ := store.Get(leaderKey)
		conn.Transact(func(view db.Database) error {
			etcdRows := view.SelectFromEtcd(nil)
			if len(etcdRows) == 1 {
				etcdRows[0].LeaderIP = leader
				view.Commit(etcdRows[0])
			}
			return nil
		})

		select {
		case <-watch:
		case <-trigg.C:
		}
	}
}
예제 #6
0
파일: engine_test.go 프로젝트: NetSys/quilt
func testConnectionTxn(t *testing.T, conn db.Conn, spec string) {
	compiled, err := stitch.FromJavascript(spec, stitch.DefaultImportGetter)
	assert.Nil(t, err)

	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		updatePolicy(view, db.Master, compiled.String())
		connections = view.SelectFromConnection(nil)
		return nil
	})

	exp := compiled.Connections
	for _, e := range exp {
		found := false
		for i, c := range connections {
			if e.From == c.From && e.To == c.To && e.MinPort == c.MinPort &&
				e.MaxPort == c.MaxPort {
				connections = append(
					connections[:i], connections[i+1:]...)
				found = true
				break
			}
		}

		assert.True(t, found)
	}

	assert.Empty(t, connections)
}
예제 #7
0
파일: master.go 프로젝트: NetSys/quilt
func runMaster(conn db.Conn) {
	conn.Transact(func(view db.Database) error {
		if view.EtcdLeader() {
			placeContainers(view)
		}
		return nil
	})
}
예제 #8
0
파일: network.go 프로젝트: NetSys/quilt
func runNetwork(conn db.Conn, store Store) {
	for range wakeChan(conn, store) {
		// If the etcd read failed, we only want to update the db if it
		// failed because a key was missing (has not been created yet).
		// In all other cases, we skip this iteration.
		etcdData, err := readEtcd(store)
		if err != nil {
			etcdErr, ok := err.(client.Error)
			if !ok || etcdErr.Code != client.ErrorCodeKeyNotFound {
				log.WithError(err).Error("Etcd transaction failed.")
				continue
			}
			log.WithError(err).Debug()
		}

		leader := false
		var containers []db.Container
		conn.Transact(func(view db.Database) error {
			leader = view.EtcdLeader()
			containers = view.SelectFromContainer(func(c db.Container) bool {
				return c.Minion != ""
			})

			minion, err := view.MinionSelf()
			if err == nil && minion.Role == db.Worker {
				updateWorker(view, minion, store, etcdData)
			}

			ipMap, err := loadMinionIPs(store)
			if err != nil {
				log.WithError(err).Error("Etcd read minion IPs failed")
				return nil
			}

			// It would likely be more efficient to perform the etcd write
			// outside of the DB transact. But, if we perform the writes
			// after the transact, there is no way to ensure that the writes
			// were successful before updating the DB with the information
			// produced by the updateEtcd* functions (not considering the
			// etcd writes they perform).
			if leader {
				etcdData, err = updateEtcd(store, etcdData, containers)
				if err != nil {
					log.WithError(err).Error("Etcd update failed.")
					return nil
				}

				updateLeaderDBC(view, containers, etcdData, ipMap)
			}

			updateDBLabels(view, etcdData, ipMap)
			return nil
		})
	}
}
예제 #9
0
파일: engine.go 프로젝트: yuenmeiwan/quilt
// UpdatePolicy executes transactions on 'conn' to make it reflect a new policy,
// 'stitch'.
func UpdatePolicy(conn db.Conn, stitch stitch.Stitch) error {
	txn := func(db db.Database) error {
		return updateTxn(db, stitch)
	}

	if err := conn.Transact(txn); err != nil {
		return err
	}

	return nil
}
예제 #10
0
func setNamespace(conn db.Conn, ns string) {
	conn.Transact(func(view db.Database) error {
		clst, err := view.GetCluster()
		if err != nil {
			clst = view.InsertCluster()
		}

		clst.Namespace = ns
		view.Commit(clst)
		return nil
	})
}
예제 #11
0
파일: engine_test.go 프로젝트: NetSys/quilt
func updateStitch(t *testing.T, conn db.Conn, stitch stitch.Stitch) {
	conn.Transact(func(view db.Database) error {
		cluster, err := view.GetCluster()
		if err != nil {
			cluster = view.InsertCluster()
		}
		cluster.Spec = stitch.String()
		view.Commit(cluster)
		return nil
	})
	assert.Nil(t, conn.Transact(updateTxn))
}
예제 #12
0
파일: engine_test.go 프로젝트: NetSys/quilt
func selectMachines(conn db.Conn) (masters, workers []db.Machine) {
	conn.Transact(func(view db.Database) error {
		masters = view.SelectFromMachine(func(m db.Machine) bool {
			return m.Role == db.Master
		})
		workers = view.SelectFromMachine(func(m db.Machine) bool {
			return m.Role == db.Worker
		})
		return nil
	})
	return
}
예제 #13
0
// Run blocks implementing the scheduler module.
func Run(conn db.Conn) {
	var sched scheduler
	for range conn.TriggerTick(30, db.MinionTable, db.EtcdTable, db.ContainerTable,
		db.PlacementTable).C {
		minion, err := conn.MinionSelf()
		if err != nil || !conn.EtcdLeader() || minion.Role != db.Master ||
			minion.PrivateIP == "" {
			sched = nil
			continue
		}

		if sched == nil {
			ip := minion.PrivateIP
			sched = newSwarm(docker.New(fmt.Sprintf("tcp://%s:2377", ip)))
			time.Sleep(60 * time.Second)
		}

		placements := conn.SelectFromPlacement(nil)
		connections := conn.SelectFromConnection(nil)
		// Each time we run through this loop, we may boot or terminate
		// containers.  These modification should, in turn, be reflected in the
		// database themselves.  For this reason, we attempt to sync until no
		// database modifications happen (up to an arbitrary limit of three
		// tries).
		for i := 0; i < 3; i++ {
			dkc, err := sched.list()
			if err != nil {
				log.WithError(err).Warning("Failed to get containers.")
				break
			}

			var boot []db.Container
			var term []string
			conn.Transact(func(view db.Database) error {
				term, boot = syncDB(view, dkc)
				return nil
			})

			if len(term) == 0 && len(boot) == 0 {
				break
			}
			sched.terminate(term)
			sched.boot(boot, placements, connections)
		}
	}
}
예제 #14
0
파일: worker.go 프로젝트: yuenmeiwan/quilt
func runWorker(conn db.Conn, dk docker.Client) {
	minion, err := conn.MinionSelf()
	if err != nil || minion.Role != db.Worker {
		return
	}

	odb, err := ovsdb.Open()
	if err != nil {
		log.Warning("Failed to connect to ovsdb-server: %s", err)
		return
	}
	defer odb.Close()

	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		containers = view.SelectFromContainer(func(c db.Container) bool {
			return c.DockerID != "" && c.IP != "" && c.Mac != ""
		})
		labels = view.SelectFromLabel(func(l db.Label) bool {
			return l.IP != ""
		})
		connections = view.SelectFromConnection(nil)
		return nil
	})

	updateNamespaces(containers)
	updateVeths(containers)
	updateNAT(containers, connections)
	updatePorts(odb, containers)

	if exists, err := linkExists("", quiltBridge); exists {
		updateDefaultGw(odb)
		updateOpenFlow(dk, odb, containers, labels, connections)
	} else if err != nil {
		log.WithError(err).Error("failed to check if link exists")
	}
	updateNameservers(dk, containers)
	updateContainerIPs(containers, labels)
	updateRoutes(containers)
	updateEtcHosts(dk, containers, labels, connections)
	updateLoopback(containers)
}
예제 #15
0
파일: elector.go 프로젝트: yuenmeiwan/quilt
func commitLeader(conn db.Conn, leader bool, ip ...string) {
	if len(ip) > 1 {
		panic("Not Reached")
	}

	conn.Transact(func(view db.Database) error {
		etcdRows := view.SelectFromEtcd(nil)
		if len(etcdRows) == 1 {
			etcdRows[0].Leader = leader

			if len(ip) == 1 {
				etcdRows[0].LeaderIP = ip[0]
			}

			view.Commit(etcdRows[0])
		}
		return nil
	})
}
예제 #16
0
파일: cluster.go 프로젝트: yuenmeiwan/quilt
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
	var clst *cluster
	for range conn.TriggerTick(60, db.ClusterTable).C {
		var dbCluster db.Cluster
		err := conn.Transact(func(db db.Database) error {
			var err error
			dbCluster, err = db.GetCluster()
			return err
		})

		if err == nil && clst.namespace != dbCluster.Namespace {
			if clst != nil {
				clst.fm.stop()
				clst.trigger.Stop()
			}
			clst = newCluster(conn, dbCluster.Namespace)
			go clst.listen()
		}
	}
}
예제 #17
0
파일: worker.go 프로젝트: NetSys/quilt
func runWorker(conn db.Conn, dk docker.Client, myIP string, subnet net.IPNet) {
	if myIP == "" {
		return
	}

	filter := map[string][]string{"label": {labelPair}}

	var toBoot, toKill []interface{}
	for i := 0; i < 2; i++ {
		dkcs, err := dk.List(filter)
		if err != nil {
			log.WithError(err).Warning("Failed to list docker containers.")
			return
		}

		conn.Transact(func(view db.Database) error {
			_, err := view.MinionSelf()
			if err != nil {
				return nil
			}

			dbcs := view.SelectFromContainer(func(dbc db.Container) bool {
				return dbc.Minion == myIP
			})

			dkcs, badDcks := filterOnSubnet(subnet, dkcs)

			var changed []db.Container
			changed, toBoot, toKill = syncWorker(dbcs, dkcs, subnet)
			for _, dbc := range changed {
				view.Commit(dbc)
			}

			toKill = append(toKill, badDcks...)
			return nil
		})

		doContainers(dk, toBoot, dockerRun)
		doContainers(dk, toKill, dockerKill)
	}
}
예제 #18
0
파일: network.go 프로젝트: yuenmeiwan/quilt
func runNetworkMaster(conn db.Conn, store Store) {
	for range wakeChan(conn, store) {
		leader := false
		var containers []db.Container
		conn.Transact(func(view db.Database) error {
			leader = view.EtcdLeader()
			containers = view.SelectFromContainer(nil)
			return nil
		})

		if !leader {
			continue
		}

		if err := writeStoreContainers(store, containers); err != nil {
			log.WithError(err).Warning("Failed to update containers in ETCD")
		}

		writeStoreLabels(store, containers)
	}
}
예제 #19
0
파일: server_test.go 프로젝트: NetSys/quilt
func checkEtcdEquals(t *testing.T, conn db.Conn, exp db.Etcd) {
	timeout := time.After(1 * time.Second)
	var actual db.Etcd
	for {
		conn.Transact(func(view db.Database) error {
			actual, _ = view.GetEtcd()
			return nil
		})
		actual.ID = 0
		if reflect.DeepEqual(exp, actual) {
			return
		}
		select {
		case <-timeout:
			t.Errorf("Expected etcd row to be %v, but got %v\n", exp, actual)
			return
		default:
			time.Sleep(100 * time.Millisecond)
		}
	}
}
예제 #20
0
func testContainerTxn(conn db.Conn, spec string) string {
	var containers []db.Container
	conn.Transact(func(view db.Database) error {
		updatePolicy(view, db.Master, spec)
		containers = view.SelectFromContainer(nil)
		return nil
	})

	var sc scanner.Scanner
	compiled, err := stitch.New(*sc.Init(strings.NewReader(spec)), "", false)
	if err != nil {
		return err.Error()
	}

	for _, e := range queryContainers(compiled) {
		found := false
		for i, c := range containers {
			if e.Image == c.Image &&
				reflect.DeepEqual(e.Command, c.Command) &&
				util.EditDistance(c.Labels, e.Labels) == 0 {
				containers = append(containers[:i], containers[i+1:]...)
				found = true
				break
			}
		}

		if found == false {
			return fmt.Sprintf("Missing expected label set: %v\n%v",
				e, containers)
		}
	}

	if len(containers) > 0 {
		return spew.Sprintf("Unexpected containers: %s", containers)
	}

	return ""
}
예제 #21
0
func testConnectionTxn(conn db.Conn, spec string) string {
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		updatePolicy(view, db.Master, spec)
		connections = view.SelectFromConnection(nil)
		return nil
	})

	var sc scanner.Scanner
	compiled, err := stitch.New(*sc.Init(strings.NewReader(spec)), "", false)
	if err != nil {
		return err.Error()
	}

	exp := compiled.QueryConnections()
	for _, e := range exp {
		found := false
		for i, c := range connections {
			if e.From == c.From && e.To == c.To && e.MinPort == c.MinPort &&
				e.MaxPort == c.MaxPort {
				connections = append(
					connections[:i], connections[i+1:]...)
				found = true
				break
			}
		}

		if found == false {
			return fmt.Sprintf("Missing expected connection: %v", e)
		}
	}

	if len(connections) > 0 {
		return spew.Sprintf("Unexpected connections: %s", connections)
	}

	return ""
}
예제 #22
0
파일: network.go 프로젝트: NetSys/quilt
// The leader of the cluster is responsible for properly configuring OVN northd for
// container networking.  This simply means creating a logical port for each container
// and label.  The specialized OpenFlow rules Quilt requires are managed by the workers
// individuallly.
func runMaster(conn db.Conn) {
	var leader, init bool
	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		init = checkSupervisorInit(view)
		leader = view.EtcdLeader()

		labels = view.SelectFromLabel(func(label db.Label) bool {
			return label.IP != ""
		})

		containers = view.SelectFromContainer(func(dbc db.Container) bool {
			return dbc.Mac != "" && dbc.IP != ""
		})

		connections = view.SelectFromConnection(nil)
		return nil
	})

	if !init || !leader {
		return
	}

	var dbData []dbport
	for _, l := range labels {
		if l.MultiHost {
			dbData = append(dbData, dbport{
				bridge: lSwitch,
				ip:     l.IP,
				mac:    labelMac,
			})
		}
	}
	for _, c := range containers {
		dbData = append(dbData, dbport{bridge: lSwitch, ip: c.IP, mac: c.Mac})
	}

	ovsdbClient, err := ovsdb.Open()
	if err != nil {
		log.WithError(err).Error("Failed to connect to OVSDB.")
		return
	}
	defer ovsdbClient.Close()

	ovsdbClient.CreateLogicalSwitch(lSwitch)
	lports, err := ovsdbClient.ListLogicalPorts(lSwitch)
	if err != nil {
		log.WithError(err).Error("Failed to list OVN ports.")
		return
	}

	portKey := func(val interface{}) interface{} {
		port := val.(ovsdb.LPort)
		return fmt.Sprintf("bridge:%s\nname:%s", port.Bridge, port.Name)
	}

	dbKey := func(val interface{}) interface{} {
		dbPort := val.(dbport)
		return fmt.Sprintf("bridge:%s\nname:%s", dbPort.bridge, dbPort.ip)
	}

	_, ovsps, dbps := join.HashJoin(ovsdb.LPortSlice(lports), dbslice(dbData),
		portKey, dbKey)

	for _, dbp := range dbps {
		lport := dbp.(dbport)
		log.WithField("IP", lport.ip).Info("New logical port.")
		err := ovsdbClient.CreateLogicalPort(lport.bridge, lport.ip, lport.mac,
			lport.ip)
		if err != nil {
			log.WithError(err).Warnf("Failed to create port %s.", lport.ip)
		}
	}

	for _, ovsp := range ovsps {
		lport := ovsp.(ovsdb.LPort)
		log.Infof("Delete logical port %s.", lport.Name)
		if err := ovsdbClient.DeleteLogicalPort(lSwitch, lport); err != nil {
			log.WithError(err).Warn("Failed to delete logical port.")
		}
	}

	updateACLs(ovsdbClient, connections, labels)
}
예제 #23
0
파일: foreman.go 프로젝트: NetSys/quilt
// RunOnce should be called regularly to allow the foreman to update minion roles.
func RunOnce(conn db.Conn) {
	var spec string
	var machines []db.Machine
	conn.Transact(func(view db.Database) error {
		machines = view.SelectFromMachine(func(m db.Machine) bool {
			return m.PublicIP != "" && m.PrivateIP != "" && m.CloudID != ""
		})

		clst, _ := view.GetCluster()
		spec = clst.Spec

		return nil
	})

	updateMinionMap(machines)

	/* Request the current configuration from each minion. */
	forEachMinion(func(m *minion) {
		var err error
		m.config, err = m.client.getMinion()

		connected := err == nil
		if connected && !m.connected {
			log.WithField("machine", m.machine).Debug("New connection.")
		}

		if connected != m.machine.Connected {
			conn.Transact(func(view db.Database) error {
				m.machine.Connected = connected
				view.Commit(m.machine)
				return nil
			})
		}

		m.connected = connected
	})

	var etcdIPs []string
	for _, m := range minions {
		if m.machine.Role == db.Master && m.machine.PrivateIP != "" {
			etcdIPs = append(etcdIPs, m.machine.PrivateIP)
		}
	}

	// Assign all of the minions their new configs
	forEachMinion(func(m *minion) {
		if !m.connected {
			return
		}

		newConfig := pb.MinionConfig{
			Role:           db.RoleToPB(m.machine.Role),
			PrivateIP:      m.machine.PrivateIP,
			Spec:           spec,
			Provider:       string(m.machine.Provider),
			Size:           m.machine.Size,
			Region:         m.machine.Region,
			EtcdMembers:    etcdIPs,
			AuthorizedKeys: m.machine.SSHKeys,
		}

		if reflect.DeepEqual(newConfig, m.config) {
			return
		}

		if err := m.client.setMinion(newConfig); err != nil {
			log.WithError(err).Error("Failed to set minion config.")
			return
		}
	})
}
예제 #24
0
파일: network.go 프로젝트: yuenmeiwan/quilt
// The leader of the cluster is responsible for properly configuring OVN northd for
// container networking.  This simply means creating a logical port for each container
// and label.  The specialized OpenFlow rules Quilt requires are managed by the workers
// individuallly.
func runMaster(conn db.Conn) {
	var leader bool
	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		leader = view.EtcdLeader()

		labels = view.SelectFromLabel(func(label db.Label) bool {
			return label.IP != ""
		})

		containers = view.SelectFromContainer(func(dbc db.Container) bool {
			return dbc.DockerID != "" && dbc.Mac != "" && dbc.IP != ""
		})

		connections = view.SelectFromConnection(nil)
		return nil
	})

	if !leader {
		return
	}

	ovsdb, err := ovsdb.Open()
	if err != nil {
		log.WithError(err).Error("Failed to connect to OVSDB.")
		return
	}
	defer ovsdb.Close()

	ovsdb.CreateSwitch(lSwitch)
	lportSlice, err := ovsdb.ListPorts(lSwitch)
	if err != nil {
		log.WithError(err).Error("Failed to list OVN ports.")
		return
	}

	// The garbageMap starts of containing every logical port in OVN.  As we find
	// that these ports are still useful, they're deleted from garbageMap until only
	// leftover garbage ports are remaining.  These are then deleted.
	garbageMap := make(map[string]struct{})
	for _, lport := range lportSlice {
		garbageMap[lport.Name] = struct{}{}
	}

	for _, dbl := range labels {
		if !dbl.MultiHost {
			continue
		}

		if _, ok := garbageMap[dbl.Label]; ok {
			delete(garbageMap, dbl.Label)
			continue
		}

		log.WithFields(log.Fields{
			"name": dbl.Label,
			"IP":   dbl.IP,
		}).Info("New logical port.")
		err := ovsdb.CreatePort(lSwitch, dbl.Label, labelMac, dbl.IP)
		if err != nil {
			log.WithError(err).Warnf("Failed to create port %s.", dbl.Label)
		}
	}

	for _, dbc := range containers {
		if _, ok := garbageMap[dbc.DockerID]; ok {
			delete(garbageMap, dbc.DockerID)
			continue
		}

		log.WithFields(log.Fields{
			"name": util.ShortUUID(dbc.DockerID),
			"IP":   dbc.IP,
		}).Info("New logical port.")
		err := ovsdb.CreatePort("quilt", dbc.DockerID, dbc.Mac, dbc.IP)
		if err != nil {
			log.WithFields(log.Fields{
				"error": err,
				"name":  dbc.DockerID,
			}).Warn("Failed to create port.")
		}
	}

	// Ports still in the map don't have a corresponding label otherwise they would
	// have been deleted in the preceding loop.
	for lport := range garbageMap {
		log.Infof("Delete logical port %s.", lport)
		if err := ovsdb.DeletePort(lSwitch, lport); err != nil {
			log.WithError(err).Warn("Failed to delete logical port.")
		}
	}

	updateACLs(connections, labels, containers)
}
예제 #25
0
파일: engine.go 프로젝트: NetSys/quilt
// Run updates the database in response to stitch changes in the cluster table.
func Run(conn db.Conn) {
	for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
		conn.Transact(updateTxn)
	}
}