Esempio n. 1
0
func listServices(ctx context.Context, dockerClient client.APIClient) ([]dockerData, error) {
	serviceList, err := dockerClient.ServiceList(ctx, dockertypes.ServiceListOptions{})
	if err != nil {
		return []dockerData{}, err
	}
	networkListArgs := filters.NewArgs()
	networkListArgs.Add("driver", "overlay")

	networkList, err := dockerClient.NetworkList(ctx, dockertypes.NetworkListOptions{Filters: networkListArgs})

	networkMap := make(map[string]*dockertypes.NetworkResource)
	if err != nil {
		log.Debug("Failed to network inspect on client for docker, error: %s", err)
		return []dockerData{}, err
	}
	for _, network := range networkList {
		networkToAdd := network
		networkMap[network.ID] = &networkToAdd
	}

	var dockerDataList []dockerData

	for _, service := range serviceList {
		dockerData := parseService(service, networkMap)

		dockerDataList = append(dockerDataList, dockerData)
	}
	return dockerDataList, err

}
Esempio n. 2
0
func getServices(
	ctx context.Context,
	apiclient client.APIClient,
	namespace string,
) ([]swarm.Service, error) {
	return apiclient.ServiceList(
		ctx,
		types.ServiceListOptions{Filter: getStackFilter(namespace)})
}
Esempio n. 3
0
func getNetworks(
	ctx context.Context,
	apiclient client.APIClient,
	namespace string,
) ([]types.NetworkResource, error) {
	return apiclient.NetworkList(
		ctx,
		types.NetworkListOptions{Filters: getStackFilter(namespace)})
}
Esempio n. 4
0
// GetContainer looks up the hosts containers with the specified ID
// or name and returns it, or an error.
func GetContainer(ctx context.Context, clientInstance client.APIClient, id string) (*types.ContainerJSON, error) {
	container, err := clientInstance.ContainerInspect(ctx, id)
	if err != nil {
		if client.IsErrContainerNotFound(err) {
			return nil, nil
		}
		return nil, err
	}
	return &container, nil
}
Esempio n. 5
0
func waitFor(once *sync.Once, client dockerclient.APIClient, endpoint string) {
	once.Do(func() {
		err := ClientOK(endpoint, func() bool {
			_, err := client.Info(context.Background())
			return err == nil
		})
		if err != nil {
			panic(err.Error())
		}
	})
}
Esempio n. 6
0
func containerRemove(client client.APIClient, ctx context.Context, id string) {
	var err error
	defer apexctx.GetLogger(ctx).WithField("id", id).Trace("removing").Stop(&err)

	removeOpts := types.ContainerRemoveOptions{}
	err = client.ContainerRemove(ctx, id, removeOpts)
}
Esempio n. 7
0
// GetContainersByFilter looks up the hosts containers with the specified filters and
// returns a list of container matching it, or an error.
func GetContainersByFilter(ctx context.Context, clientInstance client.APIClient, containerFilters ...map[string][]string) ([]types.Container, error) {
	filterArgs := filters.NewArgs()

	// FIXME(vdemeester) I don't like 3 for loops >_<
	for _, filter := range containerFilters {
		for key, filterValue := range filter {
			for _, value := range filterValue {
				filterArgs.Add(key, value)
			}
		}
	}

	return clientInstance.ContainerList(ctx, types.ContainerListOptions{
		All:    true,
		Filter: filterArgs,
	})
}
Esempio n. 8
0
func listContainers(dockerClient client.APIClient) ([]dockertypes.ContainerJSON, error) {
	containerList, err := dockerClient.ContainerList(context.Background(), dockertypes.ContainerListOptions{})
	if err != nil {
		return []dockertypes.ContainerJSON{}, err
	}
	containersInspected := []dockertypes.ContainerJSON{}

	// get inspect containers
	for _, container := range containerList {
		containerInspected, err := dockerClient.ContainerInspect(context.Background(), container.ID)
		if err != nil {
			log.Warnf("Failed to inpsect container %s, error: %s", container.ID, err)
		}
		containersInspected = append(containersInspected, containerInspected)
	}
	return containersInspected, nil
}
Esempio n. 9
0
func pullImage(client client.APIClient, service *Service, image string) error {
	distributionRef, err := reference.ParseNamed(image)
	if err != nil {
		return err
	}

	switch distributionRef.(type) {
	case reference.Canonical:
	case reference.NamedTagged:
	default:
		distributionRef, err = reference.WithTag(distributionRef, "latest")
		if err != nil {
			return err
		}
	}

	repoInfo, err := registry.ParseRepositoryInfo(distributionRef)
	if err != nil {
		return err
	}

	authConfig := types.AuthConfig{}
	if service.context.ConfigFile != nil && repoInfo != nil && repoInfo.Index != nil {
		authConfig = registry.ResolveAuthConfig(service.context.ConfigFile.AuthConfigs, repoInfo.Index)
	}

	encodedAuth, err := encodeAuthToBase64(authConfig)
	if err != nil {
		return err
	}

	options := types.ImagePullOptions{
		RegistryAuth: encodedAuth,
	}
	responseBody, err := client.ImagePull(context.Background(), distributionRef.String(), options)
	if err != nil {
		logrus.Errorf("Failed to pull image %s: %v", image, err)
		return err
	}
	defer responseBody.Close()

	var writeBuff io.Writer = os.Stdout

	outFd, isTerminalOut := term.GetFdInfo(os.Stdout)

	err = jsonmessage.DisplayJSONMessagesStream(responseBody, writeBuff, outFd, isTerminalOut, nil)
	if err != nil {
		if jerr, ok := err.(*jsonmessage.JSONError); ok {
			// If no error code is set, default to 1
			if jerr.Code == 0 {
				jerr.Code = 1
			}
			fmt.Fprintf(os.Stderr, "%s", writeBuff)
			return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code)
		}
	}
	return err
}
Esempio n. 10
0
// Reference returns the reference of a node. The special value "self" for a node
// reference is mapped to the current node, hence the node ID is retrieved using
// the `/info` endpoint.
func Reference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) {
	if ref == "self" {
		info, err := client.Info(ctx)
		if err != nil {
			return "", err
		}
		return info.Swarm.NodeID, nil
	}
	return ref, nil
}
Esempio n. 11
0
func nodeReference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) {
	// The special value "self" for a node reference is mapped to the current
	// node, hence the node ID is retrieved using the `/info` endpoint.
	if ref == "self" {
		info, err := client.Info(ctx)
		if err != nil {
			return "", err
		}
		return info.Swarm.NodeID, nil
	}
	return ref, nil
}
Esempio n. 12
0
// GetContainerByName looks up the hosts containers with the specified name and
// returns it, or an error.
func GetContainerByName(client client.APIClient, name string) (*types.Container, error) {
	filterArgs := filters.NewArgs()
	filterArgs.Add("label", fmt.Sprintf("%s=%s", NAME, name))

	containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{
		All:    true,
		Filter: filterArgs,
	})
	if err != nil {
		return nil, err
	}

	if len(containers) == 0 {
		return nil, nil
	}

	return &containers[0], nil
}
Esempio n. 13
0
// GetContainerByID looks up the hosts containers with the specified Id and
// returns it, or an error.
func GetContainerByID(client client.APIClient, id string) (*types.Container, error) {
	filterArgs := filters.NewArgs()
	filterArgs.Add("id", id)

	containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{
		All:    true,
		Filter: filterArgs,
	})
	if err != nil {
		return nil, err
	}

	if len(containers) == 0 {
		return nil, nil
	}

	return &containers[0], nil
}
Esempio n. 14
0
File: name.go Progetto: haj/kompose
// NewNamer returns a namer that returns names based on the specified project and
// service name and an inner counter, e.g. project_service_1, project_service_2…
func NewNamer(ctx context.Context, client client.APIClient, project, service string, oneOff bool) (Namer, error) {
	namer := &defaultNamer{
		project: project,
		service: service,
		oneOff:  oneOff,
	}

	filter := filters.NewArgs()
	filter.Add("label", fmt.Sprintf("%s=%s", labels.PROJECT.Str(), project))
	filter.Add("label", fmt.Sprintf("%s=%s", labels.SERVICE.Str(), service))
	if oneOff {
		filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "True"))
	} else {
		filter.Add("label", fmt.Sprintf("%s=%s", labels.ONEOFF.Str(), "False"))
	}

	containers, err := client.ContainerList(ctx, types.ContainerListOptions{
		All:    true,
		Filter: filter,
	})
	if err != nil {
		return nil, err
	}

	maxNumber := 0
	for _, container := range containers {
		number, err := strconv.Atoi(container.Labels[labels.NUMBER.Str()])
		if err != nil {
			return nil, err
		}
		if number > maxNumber {
			maxNumber = number
		}
	}
	namer.currentNumber = maxNumber + 1

	return namer, nil
}
Esempio n. 15
0
func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) {
	logrus.Debugf("collecting stats for %s", s.Name)
	var (
		getFirst       bool
		previousCPU    uint64
		previousSystem uint64
		u              = make(chan error, 1)
	)

	defer func() {
		// if error happens and we get nothing of stats, release wait group whatever
		if !getFirst {
			getFirst = true
			waitFirst.Done()
		}
	}()

	responseBody, err := cli.ContainerStats(ctx, s.Name, streamStats)
	if err != nil {
		s.mu.Lock()
		s.err = err
		s.mu.Unlock()
		return
	}
	defer responseBody.Close()

	dec := json.NewDecoder(responseBody)
	go func() {
		for {
			var v *types.StatsJSON

			if err := dec.Decode(&v); err != nil {
				dec = json.NewDecoder(io.MultiReader(dec.Buffered(), responseBody))
				u <- err
				continue
			}

			var memPercent = 0.0
			var cpuPercent = 0.0

			// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
			// got any data from cgroup
			if v.MemoryStats.Limit != 0 {
				memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
			}

			previousCPU = v.PreCPUStats.CPUUsage.TotalUsage
			previousSystem = v.PreCPUStats.SystemUsage
			cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
			blkRead, blkWrite := calculateBlockIO(v.BlkioStats)
			s.mu.Lock()
			s.CPUPercentage = cpuPercent
			s.Memory = float64(v.MemoryStats.Usage)
			s.MemoryLimit = float64(v.MemoryStats.Limit)
			s.MemoryPercentage = memPercent
			s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
			s.BlockRead = float64(blkRead)
			s.BlockWrite = float64(blkWrite)
			s.PidsCurrent = v.PidsStats.Current
			s.mu.Unlock()
			u <- nil
			if !streamStats {
				return
			}
		}
	}()
	for {
		select {
		case <-time.After(2 * time.Second):
			// zero out the values if we have not received an update within
			// the specified duration.
			s.mu.Lock()
			s.CPUPercentage = 0
			s.Memory = 0
			s.MemoryPercentage = 0
			s.MemoryLimit = 0
			s.NetworkRx = 0
			s.NetworkTx = 0
			s.BlockRead = 0
			s.BlockWrite = 0
			s.PidsCurrent = 0
			s.err = errors.New("timeout waiting for stats")
			s.mu.Unlock()
			// if this is the first stat you get, release WaitGroup
			if !getFirst {
				getFirst = true
				waitFirst.Done()
			}
		case err := <-u:
			if err != nil {
				s.mu.Lock()
				s.err = err
				s.mu.Unlock()
				continue
			}
			s.err = nil
			// if this is the first stat you get, release WaitGroup
			if !getFirst {
				getFirst = true
				waitFirst.Done()
			}
		}
		if !streamStats {
			return
		}
	}
}
Esempio n. 16
0
func removeImage(ctx context.Context, client client.APIClient, image string) error {
	_, err := client.ImageRemove(ctx, types.ImageRemoveOptions{
		ImageID: image,
	})
	return err
}
Esempio n. 17
0
func pullImage(ctx context.Context, client client.APIClient, service *Service, image string) error {
	fmt.Fprintf(os.Stderr, "Pulling %s (%s)...\n", service.name, image)
	distributionRef, err := reference.ParseNamed(image)
	if err != nil {
		return err
	}

	repoInfo, err := registry.ParseRepositoryInfo(distributionRef)
	if err != nil {
		return err
	}

	authConfig := service.context.AuthLookup.Lookup(repoInfo)

	encodedAuth, err := encodeAuthToBase64(authConfig)
	if err != nil {
		return err
	}

	options := types.ImagePullOptions{
		ImageID:      distributionRef.String(),
		Tag:          "latest",
		RegistryAuth: encodedAuth,
	}
	if named, ok := distributionRef.(reference.Named); ok {
		options.ImageID = named.FullName()
	}
	if tagged, ok := distributionRef.(reference.NamedTagged); ok {
		options.Tag = tagged.Tag()
	}

	timeoutsRemaining := 3
	for i := 0; i < 100; i++ {
		responseBody, err := client.ImagePull(ctx, options, nil)
		if err != nil {
			logrus.Errorf("Failed to pull image %s: %v", image, err)
			return err
		}

		var writeBuff io.Writer = os.Stderr

		outFd, isTerminalOut := term.GetFdInfo(os.Stderr)

		err = jsonmessage.DisplayJSONMessagesStream(responseBody, writeBuff, outFd, isTerminalOut, nil)
		responseBody.Close()
		if err == nil {
			return nil
		} else if strings.Contains(err.Error(), "timed out") {
			timeoutsRemaining -= 1
			if timeoutsRemaining == 0 {
				return err
			}
			continue
		} else if strings.Contains(err.Error(), "connection") || strings.Contains(err.Error(), "unreachable") {
			time.Sleep(300 * time.Millisecond)
			continue
		} else {
			if jerr, ok := err.(*jsonmessage.JSONError); ok {
				// If no error code is set, default to 1
				if jerr.Code == 0 {
					jerr.Code = 1
				}
				fmt.Fprintf(os.Stderr, "%s", writeBuff)
				return fmt.Errorf("Status: %s, Code: %d", jerr.Message, jerr.Code)
			}
		}
	}

	return err
}
Esempio n. 18
0
func (s *containerStats) Collect(cli client.APIClient, streamStats bool) {
	responseBody, err := cli.ContainerStats(context.Background(), s.Name, streamStats)
	if err != nil {
		s.mu.Lock()
		s.err = err
		s.mu.Unlock()
		return
	}
	defer responseBody.Close()

	var (
		previousCPU    uint64
		previousSystem uint64
		dec            = json.NewDecoder(responseBody)
		u              = make(chan error, 1)
	)
	go func() {
		for {
			var v *types.StatsJSON
			if err := dec.Decode(&v); err != nil {
				u <- err
				return
			}

			var memPercent = 0.0
			var cpuPercent = 0.0

			// MemoryStats.Limit will never be 0 unless the container is not running and we haven't
			// got any data from cgroup
			if v.MemoryStats.Limit != 0 {
				memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0
			}

			previousCPU = v.PreCPUStats.CPUUsage.TotalUsage
			previousSystem = v.PreCPUStats.SystemUsage
			cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v)
			blkRead, blkWrite := calculateBlockIO(v.BlkioStats)
			s.mu.Lock()
			s.CPUPercentage = cpuPercent
			s.Memory = float64(v.MemoryStats.Usage)
			s.MemoryLimit = float64(v.MemoryStats.Limit)
			s.MemoryPercentage = memPercent
			s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks)
			s.BlockRead = float64(blkRead)
			s.BlockWrite = float64(blkWrite)
			s.mu.Unlock()
			u <- nil
			if !streamStats {
				return
			}
		}
	}()
	for {
		select {
		case <-time.After(2 * time.Second):
			// zero out the values if we have not received an update within
			// the specified duration.
			s.mu.Lock()
			s.CPUPercentage = 0
			s.Memory = 0
			s.MemoryPercentage = 0
			s.MemoryLimit = 0
			s.NetworkRx = 0
			s.NetworkTx = 0
			s.BlockRead = 0
			s.BlockWrite = 0
			s.mu.Unlock()
		case err := <-u:
			if err != nil {
				s.mu.Lock()
				s.err = err
				s.mu.Unlock()
				return
			}
		}
		if !streamStats {
			return
		}
	}
}