コード例 #1
0
ファイル: network.go プロジェクト: yuenmeiwan/quilt
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
	for {
		odb, err := ovsdb.Open()
		if err == nil {
			odb.Close()
			break
		}
		log.WithError(err).Debug("Could not connect to ovsdb-server.")
		time.Sleep(5 * time.Second)
	}

	for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
		db.ConnectionTable, db.LabelTable, db.EtcdTable).C {
		runWorker(conn, dk)
		runMaster(conn)
	}
}
コード例 #2
0
ファイル: worker.go プロジェクト: yuenmeiwan/quilt
func runWorker(conn db.Conn, dk docker.Client) {
	minion, err := conn.MinionSelf()
	if err != nil || minion.Role != db.Worker {
		return
	}

	odb, err := ovsdb.Open()
	if err != nil {
		log.Warning("Failed to connect to ovsdb-server: %s", err)
		return
	}
	defer odb.Close()

	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		containers = view.SelectFromContainer(func(c db.Container) bool {
			return c.DockerID != "" && c.IP != "" && c.Mac != ""
		})
		labels = view.SelectFromLabel(func(l db.Label) bool {
			return l.IP != ""
		})
		connections = view.SelectFromConnection(nil)
		return nil
	})

	updateNamespaces(containers)
	updateVeths(containers)
	updateNAT(containers, connections)
	updatePorts(odb, containers)

	if exists, err := linkExists("", quiltBridge); exists {
		updateDefaultGw(odb)
		updateOpenFlow(dk, odb, containers, labels, connections)
	} else if err != nil {
		log.WithError(err).Error("failed to check if link exists")
	}
	updateNameservers(dk, containers)
	updateContainerIPs(containers, labels)
	updateRoutes(containers)
	updateEtcHosts(dk, containers, labels, connections)
	updateLoopback(containers)
}
コード例 #3
0
ファイル: network.go プロジェクト: yuenmeiwan/quilt
// The leader of the cluster is responsible for properly configuring OVN northd for
// container networking.  This simply means creating a logical port for each container
// and label.  The specialized OpenFlow rules Quilt requires are managed by the workers
// individuallly.
func runMaster(conn db.Conn) {
	var leader bool
	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		leader = view.EtcdLeader()

		labels = view.SelectFromLabel(func(label db.Label) bool {
			return label.IP != ""
		})

		containers = view.SelectFromContainer(func(dbc db.Container) bool {
			return dbc.DockerID != "" && dbc.Mac != "" && dbc.IP != ""
		})

		connections = view.SelectFromConnection(nil)
		return nil
	})

	if !leader {
		return
	}

	ovsdb, err := ovsdb.Open()
	if err != nil {
		log.WithError(err).Error("Failed to connect to OVSDB.")
		return
	}
	defer ovsdb.Close()

	ovsdb.CreateSwitch(lSwitch)
	lportSlice, err := ovsdb.ListPorts(lSwitch)
	if err != nil {
		log.WithError(err).Error("Failed to list OVN ports.")
		return
	}

	// The garbageMap starts of containing every logical port in OVN.  As we find
	// that these ports are still useful, they're deleted from garbageMap until only
	// leftover garbage ports are remaining.  These are then deleted.
	garbageMap := make(map[string]struct{})
	for _, lport := range lportSlice {
		garbageMap[lport.Name] = struct{}{}
	}

	for _, dbl := range labels {
		if !dbl.MultiHost {
			continue
		}

		if _, ok := garbageMap[dbl.Label]; ok {
			delete(garbageMap, dbl.Label)
			continue
		}

		log.WithFields(log.Fields{
			"name": dbl.Label,
			"IP":   dbl.IP,
		}).Info("New logical port.")
		err := ovsdb.CreatePort(lSwitch, dbl.Label, labelMac, dbl.IP)
		if err != nil {
			log.WithError(err).Warnf("Failed to create port %s.", dbl.Label)
		}
	}

	for _, dbc := range containers {
		if _, ok := garbageMap[dbc.DockerID]; ok {
			delete(garbageMap, dbc.DockerID)
			continue
		}

		log.WithFields(log.Fields{
			"name": util.ShortUUID(dbc.DockerID),
			"IP":   dbc.IP,
		}).Info("New logical port.")
		err := ovsdb.CreatePort("quilt", dbc.DockerID, dbc.Mac, dbc.IP)
		if err != nil {
			log.WithFields(log.Fields{
				"error": err,
				"name":  dbc.DockerID,
			}).Warn("Failed to create port.")
		}
	}

	// Ports still in the map don't have a corresponding label otherwise they would
	// have been deleted in the preceding loop.
	for lport := range garbageMap {
		log.Infof("Delete logical port %s.", lport)
		if err := ovsdb.DeletePort(lSwitch, lport); err != nil {
			log.WithError(err).Warn("Failed to delete logical port.")
		}
	}

	updateACLs(connections, labels, containers)
}
コード例 #4
0
ファイル: network.go プロジェクト: yuenmeiwan/quilt
func updateACLs(connections []db.Connection, labels []db.Label,
	containers []db.Container) {
	// Get the ACLs currently stored in the database.
	ovsdbClient, err := ovsdb.Open()
	if err != nil {
		log.WithError(err).Error("Failed to connect to OVSDB.")
		return
	}
	defer ovsdbClient.Close()

	ovsdbACLs, err := ovsdbClient.ListACLs(lSwitch)
	if err != nil {
		log.WithError(err).Error("Failed to list ACLS.")
		return
	}

	// Generate the ACLs that should be in the database.
	labelIPMap := map[string]string{}
	for _, l := range labels {
		labelIPMap[l.Label] = l.IP
	}

	labelDbcMap := map[string][]db.Container{}
	for _, dbc := range containers {
		for _, l := range dbc.Labels {
			labelDbcMap[l] = append(labelDbcMap[l], dbc)
		}
	}

	matchSet := map[string]struct{}{}
	for _, conn := range connections {
		for _, fromDbc := range labelDbcMap[conn.From] {
			fromIP := fromDbc.IP
			toIP := labelIPMap[conn.To]
			if fromIP == "" || toIP == "" {
				continue
			}

			min := conn.MinPort
			max := conn.MaxPort

			match := fmt.Sprintf("ip4.src==%s && ip4.dst==%s && "+
				"(%d <= udp.dst <= %d || %[3]d <= tcp.dst <= %[4]d)",
				fromIP, toIP, min, max)
			reverse := fmt.Sprintf("ip4.src==%s && ip4.dst==%s && "+
				"(%d <= udp.src <= %d || %[3]d <= tcp.src <= %[4]d)",
				toIP, fromIP, min, max)

			matchSet[match] = struct{}{}
			matchSet[reverse] = struct{}{}

			icmp := fmt.Sprintf("ip4.src==%s && ip4.dst==%s && icmp",
				fromIP, toIP)
			revIcmp := fmt.Sprintf("ip4.src==%s && ip4.dst==%s && icmp",
				toIP, fromIP)

			matchSet[icmp] = struct{}{}
			matchSet[revIcmp] = struct{}{}
		}
	}

	acls := make(map[ovsdb.AclCore]struct{})

	// Drop all ip traffic by default.
	new := ovsdb.AclCore{
		Priority:  0,
		Match:     "ip",
		Action:    "drop",
		Direction: "to-lport"}
	acls[new] = struct{}{}

	new = ovsdb.AclCore{
		Priority:  0,
		Match:     "ip",
		Action:    "drop",
		Direction: "from-lport"}
	acls[new] = struct{}{}

	for match := range matchSet {
		new = ovsdb.AclCore{
			Priority:  1,
			Direction: "to-lport",
			Action:    "allow",
			Match:     match}
		acls[new] = struct{}{}

		new = ovsdb.AclCore{
			Priority:  1,
			Direction: "from-lport",
			Action:    "allow",
			Match:     match}
		acls[new] = struct{}{}
	}

	for _, acl := range ovsdbACLs {
		core := acl.Core
		if _, ok := acls[core]; ok {
			delete(acls, core)
			continue
		}

		err := ovsdbClient.DeleteACL(lSwitch, core.Direction, core.Priority,
			core.Match)
		if err != nil {
			log.WithError(err).Warn("Error deleting ACL")
		}
	}

	for acl := range acls {
		err := ovsdbClient.CreateACL(lSwitch, acl.Direction, acl.Priority,
			acl.Match, acl.Action, false)
		if err != nil {
			log.WithError(err).Warn("Error adding ACL")
		}
	}
}
コード例 #5
0
ファイル: network.go プロジェクト: NetSys/quilt
// The leader of the cluster is responsible for properly configuring OVN northd for
// container networking.  This simply means creating a logical port for each container
// and label.  The specialized OpenFlow rules Quilt requires are managed by the workers
// individuallly.
func runMaster(conn db.Conn) {
	var leader, init bool
	var labels []db.Label
	var containers []db.Container
	var connections []db.Connection
	conn.Transact(func(view db.Database) error {
		init = checkSupervisorInit(view)
		leader = view.EtcdLeader()

		labels = view.SelectFromLabel(func(label db.Label) bool {
			return label.IP != ""
		})

		containers = view.SelectFromContainer(func(dbc db.Container) bool {
			return dbc.Mac != "" && dbc.IP != ""
		})

		connections = view.SelectFromConnection(nil)
		return nil
	})

	if !init || !leader {
		return
	}

	var dbData []dbport
	for _, l := range labels {
		if l.MultiHost {
			dbData = append(dbData, dbport{
				bridge: lSwitch,
				ip:     l.IP,
				mac:    labelMac,
			})
		}
	}
	for _, c := range containers {
		dbData = append(dbData, dbport{bridge: lSwitch, ip: c.IP, mac: c.Mac})
	}

	ovsdbClient, err := ovsdb.Open()
	if err != nil {
		log.WithError(err).Error("Failed to connect to OVSDB.")
		return
	}
	defer ovsdbClient.Close()

	ovsdbClient.CreateLogicalSwitch(lSwitch)
	lports, err := ovsdbClient.ListLogicalPorts(lSwitch)
	if err != nil {
		log.WithError(err).Error("Failed to list OVN ports.")
		return
	}

	portKey := func(val interface{}) interface{} {
		port := val.(ovsdb.LPort)
		return fmt.Sprintf("bridge:%s\nname:%s", port.Bridge, port.Name)
	}

	dbKey := func(val interface{}) interface{} {
		dbPort := val.(dbport)
		return fmt.Sprintf("bridge:%s\nname:%s", dbPort.bridge, dbPort.ip)
	}

	_, ovsps, dbps := join.HashJoin(ovsdb.LPortSlice(lports), dbslice(dbData),
		portKey, dbKey)

	for _, dbp := range dbps {
		lport := dbp.(dbport)
		log.WithField("IP", lport.ip).Info("New logical port.")
		err := ovsdbClient.CreateLogicalPort(lport.bridge, lport.ip, lport.mac,
			lport.ip)
		if err != nil {
			log.WithError(err).Warnf("Failed to create port %s.", lport.ip)
		}
	}

	for _, ovsp := range ovsps {
		lport := ovsp.(ovsdb.LPort)
		log.Infof("Delete logical port %s.", lport.Name)
		if err := ovsdbClient.DeleteLogicalPort(lSwitch, lport); err != nil {
			log.WithError(err).Warn("Failed to delete logical port.")
		}
	}

	updateACLs(ovsdbClient, connections, labels)
}