Ejemplo n.º 1
0
func (c *Cluster) runPingForHost(addr string, wg *sync.WaitGroup) {
	defer wg.Done()
	client, err := c.getNodeByAddr(addr)
	if err != nil {
		log.Errorf("[active-monitoring]: error creating client: %s", err.Error())
		return
	}
	client.HTTPClient = c.pingClient
	err = client.Ping()
	if err == nil {
		c.handleNodeSuccess(addr)
	} else {
		log.Errorf("[active-monitoring]: error in ping: %s", err.Error())
		c.handleNodeError(addr, err, true)
	}
}
Ejemplo n.º 2
0
// Similar to CreateContainer but allows arbritary options to be passed to
// the scheduler.
func (c *Cluster) CreateContainerSchedulerOpts(opts docker.CreateContainerOptions, schedulerOpts SchedulerOptions, nodes ...string) (string, *docker.Container, error) {
	var (
		addr      string
		container *docker.Container
		err       error
	)
	useScheduler := len(nodes) == 0
	maxTries := 5
	for ; maxTries > 0; maxTries-- {
		if useScheduler {
			node, scheduleErr := c.scheduler.Schedule(c, opts, schedulerOpts)
			if scheduleErr != nil {
				if err != nil {
					scheduleErr = fmt.Errorf("Error in scheduler after previous errors (%s) trying to create container: %s", err.Error(), scheduleErr.Error())
				}
				return addr, nil, scheduleErr
			}
			addr = node.Address
		} else {
			addr = nodes[0]
		}
		if addr == "" {
			return addr, nil, errors.New("CreateContainer needs a non empty node addr")
		}
		container, err = c.createContainerInNode(opts, addr)
		if err == nil {
			c.handleNodeSuccess(addr)
			break
		} else {
			log.Errorf("Error trying to create container in node %q: %s. Trying again in another node...", addr, err.Error())
			shouldIncrementFailures := false
			if nodeErr, ok := err.(DockerNodeError); ok {
				baseErr := nodeErr.BaseError()
				_, isNetErr := baseErr.(*net.OpError)
				if isNetErr || baseErr == docker.ErrConnectionRefused || nodeErr.cmd == "createContainer" {
					shouldIncrementFailures = true
				}
			}
			c.handleNodeError(addr, err, shouldIncrementFailures)
			if !useScheduler {
				return addr, nil, err
			}
		}
	}
	if err != nil {
		return addr, nil, fmt.Errorf("CreateContainer: maximum number of tries exceeded, last error: %s", err.Error())
	}
	err = c.storage().StoreContainer(container.ID, addr)
	return addr, container, err
}
Ejemplo n.º 3
0
// Similar to CreateContainer but allows arbritary options to be passed to
// the scheduler.
func (c *Cluster) CreateContainerSchedulerOpts(opts docker.CreateContainerOptions, schedulerOpts SchedulerOptions, nodes ...string) (string, *docker.Container, error) {
	var (
		addr      string
		container *docker.Container
		err       error
	)
	useScheduler := len(nodes) == 0
	maxTries := 5
	for ; maxTries > 0; maxTries-- {
		if useScheduler {
			node, scheduleErr := c.scheduler.Schedule(c, opts, schedulerOpts)
			if scheduleErr != nil {
				if err != nil {
					scheduleErr = fmt.Errorf("Error in scheduler after previous errors (%s) trying to create container: %s", err.Error(), scheduleErr.Error())
				}
				return addr, nil, scheduleErr
			}
			addr = node.Address
		} else {
			addr = nodes[0]
		}
		if addr == "" {
			return addr, nil, errors.New("CreateContainer needs a non empty node addr")
		}
		container, err = c.createContainerInNode(opts, addr)
		if err == nil {
			c.handleNodeSuccess(addr)
			break
		} else {
			log.Errorf("Error trying to create container in node %q: %s. Trying again in another node...", addr, err.Error())
			c.handleNodeError(addr, err)
			if !useScheduler {
				return addr, nil, err
			}
		}
	}
	if err != nil {
		return addr, nil, fmt.Errorf("CreateContainer: maximum number of tries exceeded, last error: %s", err.Error())
	}
	err = c.storage().StoreContainer(container.ID, addr)
	return addr, container, err
}
Ejemplo n.º 4
0
func (c *Cluster) runActiveMonitoring(updateInterval time.Duration) {
	log.Debugf("[active-monitoring]: active monitoring enabled, pinging hosts every %d seconds", updateInterval/time.Second)
	for {
		var nodes []Node
		var err error
		nodes, err = c.UnfilteredNodes()
		if err != nil {
			log.Errorf("[active-monitoring]: error in UnfilteredNodes: %s", err.Error())
		}
		wg := sync.WaitGroup{}
		for _, node := range nodes {
			wg.Add(1)
			go c.runPingForHost(node.Address, &wg)
		}
		wg.Wait()
		select {
		case <-c.monitoringDone:
			return
		case <-time.After(updateInterval):
		}
	}
}