Esempio n. 1
0
// Delete stopped containers
//
// We do this because Docker Swarm will account for stopped containers
// when using its affinity filter, where our semantics don't consider
// stopped containers in its scheduling decisions.
func delStopped(dk docker.Client) error {
	containers, err := dk.List(map[string][]string{"status": {"exited"}})
	if err != nil {
		return fmt.Errorf("error listing stopped containers: %s", err)
	}
	for _, dkc := range containers {
		// Stopped containers show up with a "/" in front of the name
		name := dkc.Name[1:]
		if err := dk.Remove(name); err != nil {
			log.WithFields(log.Fields{
				"name": name,
				"err":  err,
			}).Error("error removing container")
			continue
		}
	}
	return nil
}
Esempio n. 2
0
func runWorker(conn db.Conn, dk docker.Client, myIP string, subnet net.IPNet) {
	if myIP == "" {
		return
	}

	filter := map[string][]string{"label": {labelPair}}

	var toBoot, toKill []interface{}
	for i := 0; i < 2; i++ {
		dkcs, err := dk.List(filter)
		if err != nil {
			log.WithError(err).Warning("Failed to list docker containers.")
			return
		}

		conn.Transact(func(view db.Database) error {
			_, err := view.MinionSelf()
			if err != nil {
				return nil
			}

			dbcs := view.SelectFromContainer(func(dbc db.Container) bool {
				return dbc.Minion == myIP
			})

			dkcs, badDcks := filterOnSubnet(subnet, dkcs)

			var changed []db.Container
			changed, toBoot, toKill = syncWorker(dbcs, dkcs, subnet)
			for _, dbc := range changed {
				view.Commit(dbc)
			}

			toKill = append(toKill, badDcks...)
			return nil
		})

		doContainers(dk, toBoot, dockerRun)
		doContainers(dk, toKill, dockerKill)
	}
}