Пример #1
0
func generateCurrentOpenFlow(dk docker.Client) (OFRuleSlice, error) {
	args := "ovs-ofctl dump-flows " + quiltBridge
	stdout, _, err := dk.ExecVerbose(
		supervisor.Ovsvswitchd, strings.Split(args, " ")...)

	if err != nil {
		return nil, fmt.Errorf("failed to list OpenFlow flows: %s",
			string(stdout))
	}

	scanner := bufio.NewScanner(bytes.NewReader(stdout))
	var flows OFRuleSlice

	// The first line isn't a flow, so skip it.
	scanner.Scan()

	for scanner.Scan() {
		line := strings.TrimSpace(scanner.Text())

		flow, err := makeOFRule(line)

		if err != nil {
			return nil, fmt.Errorf("failed to make OpenFlow rule: %s", err)
		}

		flows = append(flows, flow)
	}

	if err := scanner.Err(); err != nil {
		return nil, fmt.Errorf("scanner error while getting OpenFlow flows: %s",
			err)
	}

	return flows, nil
}
Пример #2
0
// Run blocks implementing the scheduler module.
func Run(conn db.Conn, dk docker.Client) {
	bootWait(conn)

	subnet := getMinionSubnet(conn)
	err := dk.ConfigureNetwork(plugin.NetworkName, subnet)
	if err != nil {
		log.WithError(err).Fatal("Failed to configure network plugin")
	}

	loopLog := util.NewEventTimer("Scheduler")
	trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable,
		db.PlacementTable, db.EtcdTable).C
	for range trig {
		loopLog.LogStart()
		minion, err := conn.MinionSelf()
		if err != nil {
			log.WithError(err).Warn("Missing self in the minion table.")
			continue
		}

		if minion.Role == db.Worker {
			subnet = updateNetwork(conn, dk, subnet)
			runWorker(conn, dk, minion.PrivateIP, subnet)
		} else if minion.Role == db.Master {
			runMaster(conn)
		}
		loopLog.LogEnd()
	}
}
Пример #3
0
// updateNameservers assigns each container the same nameservers as the host.
func updateNameservers(dk docker.Client, containers []db.Container) {
	hostResolv, err := ioutil.ReadFile("/etc/resolv.conf")
	if err != nil {
		log.WithError(err).Error("failed to read /etc/resolv.conf")
	}

	nsRE := regexp.MustCompile("nameserver\\s([0-9]{1,3}\\.){3}[0-9]{1,3}\\s+")
	matches := nsRE.FindAllString(string(hostResolv), -1)
	newNameservers := strings.Join(matches, "\n")

	for _, dbc := range containers {
		id := dbc.DockerID

		currNameservers, err := dk.GetFromContainer(id, "/etc/resolv.conf")
		if err != nil {
			log.WithError(err).Error("failed to get /etc/resolv.conf")
			return
		}

		if newNameservers != currNameservers {
			err = dk.WriteToContainer(id, newNameservers, "/etc",
				"resolv.conf", 0644)
			if err != nil {
				log.WithError(err).Error(
					"failed to update /etc/resolv.conf")
			}
		}
	}
}
Пример #4
0
func deleteOFRule(dk docker.Client, flow OFRule) error {
	args := fmt.Sprintf("ovs-ofctl del-flows --strict %s %s,%s",
		quiltBridge, flow.table, flow.match)
	err := dk.Exec(supervisor.Ovsvswitchd, strings.Split(args, " ")...)
	if err != nil {
		return err
	}
	return nil
}
Пример #5
0
func addOFRule(dk docker.Client, flow OFRule) error {
	args := fmt.Sprintf("ovs-ofctl add-flow %s %s,%s,actions=%s",
		quiltBridge, flow.table, flow.match, flow.actions)
	err := dk.Exec(supervisor.Ovsvswitchd, strings.Split(args, " ")...)
	if err != nil {
		return err
	}
	return nil
}
Пример #6
0
func dockerKill(dk docker.Client, in chan interface{}) {
	for i := range in {
		dkc := i.(docker.Container)
		log.WithField("container", dkc.ID).Info("Remove container")
		if err := dk.RemoveID(dkc.ID); err != nil {
			log.WithFields(log.Fields{
				"error": err,
				"id":    dkc.ID,
			}).Warning("Failed to remove container.")
		}
	}
}
Пример #7
0
func updateNetwork(conn db.Conn, dk docker.Client, subnet net.IPNet) net.IPNet {

	newSubnet := getMinionSubnet(conn)
	if subnet.String() == newSubnet.String() {
		return subnet
	}

	err := dk.ConfigureNetwork(plugin.NetworkName, newSubnet)
	if err != nil {
		log.WithError(err).Fatal("Failed to configure network plugin")
		return subnet
	}

	return newSubnet
}
Пример #8
0
// Delete stopped containers
//
// We do this because Docker Swarm will account for stopped containers
// when using its affinity filter, where our semantics don't consider
// stopped containers in its scheduling decisions.
func delStopped(dk docker.Client) error {
	containers, err := dk.List(map[string][]string{"status": {"exited"}})
	if err != nil {
		return fmt.Errorf("error listing stopped containers: %s", err)
	}
	for _, dkc := range containers {
		// Stopped containers show up with a "/" in front of the name
		name := dkc.Name[1:]
		if err := dk.Remove(name); err != nil {
			log.WithFields(log.Fields{
				"name": name,
				"err":  err,
			}).Error("error removing container")
			continue
		}
	}
	return nil
}
Пример #9
0
func dockerRun(dk docker.Client, in chan interface{}) {
	for i := range in {
		dbc := i.(db.Container)
		log.WithField("container", dbc).Info("Start container")
		_, err := dk.Run(docker.RunOptions{
			Image:       dbc.Image,
			Args:        dbc.Command,
			Env:         dbc.Env,
			Labels:      map[string]string{labelKey: labelValue},
			NetworkMode: plugin.NetworkName,
		})
		if err != nil {
			log.WithFields(log.Fields{
				"error":     err,
				"container": dbc,
			}).WithError(err).Warning("Failed to run container", dbc)
			continue
		}
	}
}
Пример #10
0
func runWorker(conn db.Conn, dk docker.Client, myIP string, subnet net.IPNet) {
	if myIP == "" {
		return
	}

	filter := map[string][]string{"label": {labelPair}}

	var toBoot, toKill []interface{}
	for i := 0; i < 2; i++ {
		dkcs, err := dk.List(filter)
		if err != nil {
			log.WithError(err).Warning("Failed to list docker containers.")
			return
		}

		conn.Transact(func(view db.Database) error {
			_, err := view.MinionSelf()
			if err != nil {
				return nil
			}

			dbcs := view.SelectFromContainer(func(dbc db.Container) bool {
				return dbc.Minion == myIP
			})

			dkcs, badDcks := filterOnSubnet(subnet, dkcs)

			var changed []db.Container
			changed, toBoot, toKill = syncWorker(dbcs, dkcs, subnet)
			for _, dbc := range changed {
				view.Commit(dbc)
			}

			toKill = append(toKill, badDcks...)
			return nil
		})

		doContainers(dk, toBoot, dockerRun)
		doContainers(dk, toKill, dockerKill)
	}
}
Пример #11
0
func updateEtcHosts(dk docker.Client, containers []db.Container, labels []db.Label,
	connections []db.Connection) {

	/* Map label name to its IP. */
	labelIP := make(map[string]string)

	/* Map label to a list of all labels it connect to. */
	conns := make(map[string][]string)

	for _, l := range labels {
		labelIP[l.Label] = l.IP
	}

	for _, conn := range connections {
		if conn.To == stitch.PublicInternetLabel ||
			conn.From == stitch.PublicInternetLabel {
			continue
		}
		conns[conn.From] = append(conns[conn.From], conn.To)
	}

	for _, dbc := range containers {
		id := dbc.DockerID

		currHosts, err := dk.GetFromContainer(id, "/etc/hosts")
		if err != nil {
			log.WithError(err).Error("Failed to get /etc/hosts")
			return
		}

		newHosts := generateEtcHosts(dbc, labelIP, conns)

		if newHosts != currHosts {
			err = dk.WriteToContainer(id, newHosts, "/etc", "hosts", 0644)
			if err != nil {
				log.WithError(err).Error("Failed to update /etc/hosts")
			}
		}
	}
}