コード例 #1
0
// Action makes a skydns REST API call based on the docker event
func Action(logger *log.Logger, action string, containerId string, docker *dockerapi.Client, TTL uint64, CONSUL string, DOMAIN string) {

	//if we fail on inspection, that is ok because we might
	//be checking for a crufty container that no longer exists
	//due to docker being shutdown uncleanly

	container, dockerErr := docker.InspectContainer(containerId)
	if dockerErr != nil {
		logger.Printf("unable to inspect container:%s %s", containerId, dockerErr)
		return
	}
	var hostname = container.Name[1:]
	var ipaddress = container.NetworkSettings.IPAddress

	if ipaddress == "" {
		logger.Println("no ipaddress returned for container: " + hostname)
		return
	}

	switch action {
	case "start":
		logger.Println("new container name=" + container.Name[1:] + " ip:" + ipaddress)
		Deregister(CONSUL, logger, hostname)
		service := Service{Name: hostname, Address: ipaddress}
		Register(CONSUL, logger, &service)
	case "stop":
		logger.Println("removing container name=" + container.Name[1:] + " ip:" + ipaddress)
		Deregister(CONSUL, logger, hostname)
	case "destroy":
		logger.Println("removing container name=" + container.Name[1:] + " ip:" + ipaddress)
		Deregister(CONSUL, logger, hostname)
	default:
	}

}
コード例 #2
0
ファイル: dockerfile.go プロジェクト: adamveld12/goku
func launchContainer(client *docker.Client, containerImageName, name string) (*docker.Container, error) {

	images, err := client.ListImages(docker.ListImagesOptions{Filter: containerImageName})

	if err != nil {
		return nil, err
	}

	targetImageId := images[0].ID
	container, err := client.CreateContainer(docker.CreateContainerOptions{
		Name: name,
		Config: &docker.Config{
			Image: targetImageId,
		},
	})

	if err != nil {
		return nil, err
	}

	if err := client.StartContainer(container.ID, &docker.HostConfig{PublishAllPorts: true}); err != nil {
		return nil, err
	}

	return client.InspectContainer(container.ID)
}
コード例 #3
0
ファイル: docker_test.go プロジェクト: raceli/resolvable
func containerAddress(client *dockerapi.Client, containerId string) (string, error) {
	container, err := client.InspectContainer(containerId)
	if err != nil {
		return "", err
	}
	return container.NetworkSettings.IPAddress, nil
}
コード例 #4
0
ファイル: boot.go プロジェクト: terminiter/earthquake
func Boot(client *docker.Client, opt *docker.CreateContainerOptions,
	exitCh chan error) (*docker.Container, error) {
	log.Debugf("Creating container for image %s", opt.Config.Image)
	container, err := client.CreateContainer(*opt)
	if err != nil {
		return container, err
	}

	log.Debugf("Starting container %s", container.ID)
	go func() {
		exitCh <- dockerpty.Start(client, container, opt.HostConfig)
	}()

	trial := 0
	for {
		container, err = client.InspectContainer(container.ID)
		if container.State.StartedAt.Unix() > 0 {
			break
		}
		if trial > 30 {
			return container, fmt.Errorf("container %s seems not started. state=%#v", container.ID, container.State)
		}
		trial += 1
		time.Sleep(time.Duration(trial*100) * time.Millisecond)
	}
	log.Debugf("container state=%#v", container.State)
	return container, nil
}
コード例 #5
0
ファイル: services.go プロジェクト: durl/registrator
func getContainers(client *docker.Client) []*docker.Container {
	allContainers, _ := client.ListContainers(docker.ListContainersOptions{All: false})
	var containers []*docker.Container
	for _, c := range allContainers {
		container, _ := client.InspectContainer(c.ID)
		containers = append(containers, container)
	}
	return containers
}
コード例 #6
0
ファイル: docker_client.go プロジェクト: cretzel/docker-gen
func getContainers(client *docker.Client) ([]*RuntimeContainer, error) {

	apiContainers, err := client.ListContainers(docker.ListContainersOptions{
		All:  false,
		Size: false,
	})
	if err != nil {
		return nil, err
	}

	containers := []*RuntimeContainer{}
	for _, apiContainer := range apiContainers {
		container, err := client.InspectContainer(apiContainer.ID)
		if err != nil {
			log.Printf("error inspecting container: %s: %s\n", apiContainer.ID, err)
			continue
		}

		registry, repository, tag := splitDockerImage(container.Config.Image)
		runtimeContainer := &RuntimeContainer{
			ID: container.ID,
			Image: DockerImage{
				Registry:   registry,
				Repository: repository,
				Tag:        tag,
			},
			Name:      strings.TrimLeft(container.Name, "/"),
			Gateway:   container.NetworkSettings.Gateway,
			Addresses: []Address{},
			Env:       make(map[string]string),
		}
		for k, v := range container.NetworkSettings.Ports {
			address := Address{
				IP:   container.NetworkSettings.IPAddress,
				Port: k.Port(),
			}
			if len(v) > 0 {
				address.HostPort = v[0].HostPort
			}
			runtimeContainer.Addresses = append(runtimeContainer.Addresses,
				address)

		}

		for _, entry := range container.Config.Env {
			parts := strings.Split(entry, "=")
			runtimeContainer.Env[parts[0]] = parts[1]
		}

		containers = append(containers, runtimeContainer)
	}
	return containers, nil

}
コード例 #7
0
ファイル: event.go プロジェクト: show168/dns-docker
//监听到容器启动时做的操作
func Start(client *docker.Client, event *docker.APIEvents) {
	fmt.Println("Received start event %s for container %s", event.Status, event.ID[:12])
	container, err := client.InspectContainer(event.ID[:12])
	common.ErrorHandle(err)
	ipAddress := container.NetworkSettings.IPAddress
	include := common.GetConfig("Section", "include")
	if include == "*" || strings.Contains(","+include+",", ","+common.SubstrAfter(container.Name, 0)+",") {
		common.AppendFile(common.GetConfig("Section", "hostFile"), ipAddress+"  "+getDomainName(container.Name))
		restartDns()
	}
}
コード例 #8
0
ファイル: container.go プロジェクト: Hellslicer/os
func inspect(client *dockerClient.Client, id string) (*dockerClient.Container, error) {
	c, err := client.InspectContainer(id)
	if err != nil {
		return nil, err
	}

	if strings.HasPrefix(c.Name, "/") {
		c.Name = c.Name[1:]
	}

	return c, err
}
コード例 #9
0
ファイル: docker_test.go プロジェクト: nak3/nomad
func waitForExist(t *testing.T, client *docker.Client, handle *DockerHandle) {
	tu.WaitForResult(func() (bool, error) {
		container, err := client.InspectContainer(handle.ContainerID())
		if err != nil {
			if _, ok := err.(*docker.NoSuchContainer); !ok {
				return false, err
			}
		}

		return container != nil, nil
	}, func(err error) {
		t.Fatalf("err: %v", err)
	})
}
コード例 #10
0
ファイル: docker_helpers.go プロジェクト: jojimt/contrib
// DoInspectContainerBenchmark does periodically InspectContainer with specific interval, returns latencies
// of all the calls in nanoseconds
func DoInspectContainerBenchmark(client *docker.Client, interval, testPeriod time.Duration, containerIDs []string) []int {
	startTime := time.Now()
	latencies := []int{}
	rand.Seed(time.Now().Unix())
	for {
		containerID := containerIDs[rand.Int()%len(containerIDs)]
		start := time.Now()
		client.InspectContainer(containerID)
		latencies = append(latencies, int(time.Since(start).Nanoseconds()))
		if time.Now().Sub(startTime) >= testPeriod {
			break
		}
		if interval != 0 {
			time.Sleep(interval)
		}
	}
	return latencies
}
コード例 #11
0
// dockerEventListener читает из канала события от докера и реагирует на них
func dockerEventListener(server *SSHServer, client *docker.Client, events chan *docker.APIEvents) {
	for {
		select {
		case event, ok := <-events:
			if !ok {
				log.Printf("Docker daemon connection interrupted")
				break
			}

			// Если был запущен контейнер, проверяем что у него есть 22 порт и добавляем
			if event.Status == "start" {
				log.Printf("Container %s was started", event.ID[:12])

				container, _ := client.InspectContainer(event.ID)
				if len(container.NetworkSettings.Ports) == 0 {
					continue
				}
				for port, mapping := range container.NetworkSettings.Ports {
					if port == "22/tcp" {
						log.Printf("Added %v: %s:%v\n", container.ID[:12], mapping[0].HostIP, mapping[0].HostPort)
						server.AddHost(container.ID, fmt.Sprintf("%s:%v", mapping[0].HostIP, mapping[0].HostPort))
					}
				}
			}

			// Если контейнер был удалён/убит, убераем его
			if event.Status == "stop" || event.Status == "die" {
				log.Printf("Container %s was removed", event.ID[:12])

				server.RemoveContainer(event.ID)
			}

		case <-time.After(10 * time.Second):
			if err := client.Ping(); err != nil {
				log.Printf("Unable to ping docker daemon: %s", err)
			}
		}
	}
}
コード例 #12
0
ファイル: plugin.go プロジェクト: yacloud-io/shield
func listContainers(client *docker.Client, all bool) (map[string]*docker.Container, error) {
	var opts docker.ListContainersOptions
	if all {
		opts.All = true
	} else {
		opts.Filters = map[string][]string{"status": []string{"running"}}
	}
	l, err := client.ListContainers(opts)
	if err != nil {
		return nil, err
	}

	m := make(map[string]*docker.Container, 0)
	for _, c := range l {
		info, err := client.InspectContainer(c.ID)
		if err != nil {
			DEBUG("failed to inspect container: %s", err)
		}
		m[info.Name] = info
	}

	return m, nil
}
コード例 #13
0
ファイル: event.go プロジェクト: show168/dns-docker
//监听到容器消亡时做的操作
func Die(client *docker.Client, event *docker.APIEvents) {
	fmt.Println("Received die event %s for container %s", event.Status, event.ID[:12])
	container, err := client.InspectContainer(event.ID[:12])
	common.ErrorHandle(err)
	include := common.GetConfig("Section", "include")
	if include == "*" || strings.Contains(","+include+",", ","+common.SubstrAfter(container.Name, 0)+",") {
		strData := common.ReadFile(common.GetConfig("Section", "hostFile"))
		arrData := strings.Split(strData, "\n")
		strData = ""
		for i := 0; i < len(arrData); i++ {
			if strings.Index(arrData[i], getDomainName(container.Name)) >= 0 {
				continue
			}
			if strData == "" {
				strData = arrData[i]
			} else {
				strData += "\n" + arrData[i]
			}
		}
		common.SaveFile(common.GetConfig("Section", "hostFile"), strData)
		restartDns()
	}
}
コード例 #14
0
ファイル: router_test.go プロジェクト: jhadvig/origin
// createAndStartRouterContainer is responsible for deploying the router image in docker.  It assumes that all router images
// will use a command line flag that can take --master which points to the master url
func createAndStartRouterContainer(dockerCli *dockerClient.Client, masterIp string) (containerId string, err error) {
	ports := []string{"80", "443"}
	portBindings := make(map[dockerClient.Port][]dockerClient.PortBinding)
	exposedPorts := map[dockerClient.Port]struct{}{}

	for _, p := range ports {
		dockerPort := dockerClient.Port(p + "/tcp")

		portBindings[dockerPort] = []dockerClient.PortBinding{
			{
				HostPort: p,
			},
		}

		exposedPorts[dockerPort] = struct{}{}
	}

	containerOpts := dockerClient.CreateContainerOptions{
		Config: &dockerClient.Config{
			Image:        getRouterImage(),
			Cmd:          []string{"--master=" + masterIp, "--loglevel=4"},
			ExposedPorts: exposedPorts,
		},
	}

	container, err := dockerCli.CreateContainer(containerOpts)

	if err != nil {
		return "", err
	}

	dockerHostCfg := &dockerClient.HostConfig{NetworkMode: "host", PortBindings: portBindings}
	err = dockerCli.StartContainer(container.ID, dockerHostCfg)

	if err != nil {
		return "", err
	}

	running := false

	//wait for it to start
	for i := 0; i < dockerRetries; i++ {
		c, err := dockerCli.InspectContainer(container.ID)

		if err != nil {
			return "", err
		}

		if c.State.Running {
			running = true
			break
		}
		time.Sleep(time.Second * dockerWaitSeconds)
	}

	if !running {
		return "", errors.New("Container did not start after 3 tries!")
	}

	return container.ID, nil
}
コード例 #15
0
ファイル: main.go プロジェクト: raceli/resolvable
func registerContainers(docker *dockerapi.Client, events chan *dockerapi.APIEvents, dns resolver.Resolver, containerDomain string, hostIP net.IP) error {
	// TODO add an options struct instead of passing all as parameters
	// though passing the events channel from an options struct was triggering
	// data race warnings within AddEventListener, so needs more investigation

	if events == nil {
		events = make(chan *dockerapi.APIEvents)
	}
	if err := docker.AddEventListener(events); err != nil {
		return err
	}

	if !strings.HasPrefix(containerDomain, ".") {
		containerDomain = "." + containerDomain
	}

	getAddress := func(container *dockerapi.Container) (net.IP, error) {
		for {
			if container.NetworkSettings.IPAddress != "" {
				return net.ParseIP(container.NetworkSettings.IPAddress), nil
			}

			if container.HostConfig.NetworkMode == "host" {
				if hostIP == nil {
					return nil, errors.New("IP not available with network mode \"host\"")
				} else {
					return hostIP, nil
				}
			}

			if strings.HasPrefix(container.HostConfig.NetworkMode, "container:") {
				otherId := container.HostConfig.NetworkMode[len("container:"):]
				var err error
				container, err = docker.InspectContainer(otherId)
				if err != nil {
					return nil, err
				}
				continue
			}

			return nil, fmt.Errorf("unknown network mode", container.HostConfig.NetworkMode)
		}
	}

	addContainer := func(containerId string) error {
		container, err := docker.InspectContainer(containerId)
		if err != nil {
			return err
		}
		addr, err := getAddress(container)
		if err != nil {
			return err
		}

		err = dns.AddHost(containerId, addr, container.Config.Hostname, container.Name[1:]+containerDomain)
		if err != nil {
			return err
		}

		env := parseContainerEnv(container.Config.Env, "DNS_")
		if dnsDomains, ok := env["DNS_RESOLVES"]; ok {
			if dnsDomains == "" {
				return errors.New("empty DNS_RESOLVES, should contain a comma-separated list with at least one domain")
			}

			port := 53
			if portString := env["DNS_PORT"]; portString != "" {
				port, err = strconv.Atoi(portString)
				if err != nil {
					return errors.New("invalid DNS_PORT \"" + portString + "\", should contain a number")
				}
			}

			domains := strings.Split(dnsDomains, ",")
			err = dns.AddUpstream(containerId, addr, port, domains...)
			if err != nil {
				return err
			}
		}

		if bridge := container.NetworkSettings.Bridge; bridge != "" {
			bridgeAddr := net.ParseIP(container.NetworkSettings.Gateway)
			err = dns.AddHost("bridge:"+bridge, bridgeAddr, bridge)
			if err != nil {
				return err
			}
		}

		return nil
	}

	containers, err := docker.ListContainers(dockerapi.ListContainersOptions{})
	if err != nil {
		return err
	}

	for _, listing := range containers {
		if err := addContainer(listing.ID); err != nil {
			log.Printf("error adding container %s: %s\n", listing.ID[:12], err)
		}
	}

	if err = dns.Listen(); err != nil {
		return err
	}
	defer dns.Close()

	for msg := range events {
		go func(msg *dockerapi.APIEvents) {
			switch msg.Status {
			case "start":
				if err := addContainer(msg.ID); err != nil {
					log.Printf("error adding container %s: %s\n", msg.ID[:12], err)
				}
			case "die":
				dns.RemoveHost(msg.ID)
				dns.RemoveUpstream(msg.ID)
			}
		}(msg)
	}

	return errors.New("docker event loop closed")
}
コード例 #16
0
ファイル: handler.go プロジェクト: johnmccawley/origin
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	fsInfo fs.FsInfo,
	storageDriver storageDriver,
	cgroupSubsystems *containerLibcontainer.CgroupSubsystems,
	inHostNamespace bool,
) (container.ContainerHandler, error) {
	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	// Generate the equivalent cgroup manager for this container.
	cgroupManager := &cgroup_fs.Manager{
		Cgroups: &libcontainerConfigs.Cgroup{
			Name: name,
		},
		Paths: cgroupPaths,
	}

	rootFs := "/"
	if !inHostNamespace {
		rootFs = "/rootfs"
	}

	id := ContainerNameToDockerId(name)

	storageDirs := []string{path.Join(*dockerRootDir, pathToAufsDir, id)}

	handler := &dockerContainerHandler{
		id:                 id,
		client:             client,
		name:               name,
		machineInfoFactory: machineInfoFactory,
		cgroupPaths:        cgroupPaths,
		cgroupManager:      cgroupManager,
		storageDriver:      storageDriver,
		fsInfo:             fsInfo,
		rootFs:             rootFs,
		storageDirs:        storageDirs,
		fsHandler:          newFsHandler(time.Minute, storageDirs, fsInfo),
	}

	switch storageDriver {
	case aufsStorageDriver:
		handler.fsHandler.start()
	}

	// We assume that if Inspect fails then the container is not known to docker.
	ctnr, err := client.InspectContainer(id)
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
	}
	handler.creationTime = ctnr.Created
	handler.pid = ctnr.State.Pid

	// Add the name and bare ID as aliases of the container.
	handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
	handler.labels = ctnr.Config.Labels
	handler.image = ctnr.Config.Image
	handler.networkMode = ctnr.HostConfig.NetworkMode

	return handler, nil
}
コード例 #17
0
// createAndExtractImage creates a docker container based on the option's image with containerName.
// It will then insepct the container and image and then attempt to extract the image to
// option's destination path.  If the destination path is empty it will write to a temp directory
// and update the option's destination path with a /var/tmp directory.  /var/tmp is used to
// try and ensure it is a non-in-memory tmpfs.
func (i *defaultImageInspector) createAndExtractImage(client *docker.Client, containerName string) (*docker.Image, error) {
	container, err := client.CreateContainer(docker.CreateContainerOptions{
		Name: containerName,
		Config: &docker.Config{
			Image: i.opts.Image,
			// For security purpose we don't define any entrypoint and command
			Entrypoint: []string{""},
			Cmd:        []string{""},
		},
	})
	if err != nil {
		return nil, fmt.Errorf("Unable to create docker container: %v\n", err)
	}

	// delete the container when we are done extracting it
	defer func() {
		client.RemoveContainer(docker.RemoveContainerOptions{
			ID: container.ID,
		})
	}()

	containerMetadata, err := client.InspectContainer(container.ID)
	if err != nil {
		return nil, fmt.Errorf("Unable to get docker container information: %v\n", err)
	}

	imageMetadata, err := client.InspectImage(containerMetadata.Image)
	if err != nil {
		return imageMetadata, fmt.Errorf("Unable to get docker image information: %v\n", err)
	}

	if i.opts.DstPath, err = createOutputDir(i.opts.DstPath, "image-inspector-"); err != nil {
		return imageMetadata, err
	}

	reader, writer := io.Pipe()
	// handle closing the reader/writer in the method that creates them
	defer writer.Close()
	defer reader.Close()

	log.Printf("Extracting image %s to %s", i.opts.Image, i.opts.DstPath)

	// start the copy function first which will block after the first write while waiting for
	// the reader to read.
	errorChannel := make(chan error)
	go func() {
		errorChannel <- client.CopyFromContainer(docker.CopyFromContainerOptions{
			Container:    container.ID,
			OutputStream: writer,
			Resource:     "/",
		})
	}()

	// block on handling the reads here so we ensure both the write and the reader are finished
	// (read waits until an EOF or error occurs).
	handleTarStream(reader, i.opts.DstPath)

	// capture any error from the copy, ensures both the handleTarStream and CopyFromContainer
	// are done.
	err = <-errorChannel
	if err != nil {
		return imageMetadata, fmt.Errorf("Unable to extract container: %v\n", err)
	}

	return imageMetadata, nil
}
コード例 #18
0
ファイル: cleanup.go プロジェクト: gloppenhosting/cleanup
func cleanVolumesDocker119(client *docker.Client, apiVersion string) {
	defer wg.Done()

	log.Printf("Vol Cleanup: starting volume cleanup(ver %s) ...", apiVersion)
	re := regexp.MustCompile(".*/([0-9a-fA-F]{64}).*")

	// volumesMap[id] = weight
	// weight = 0 ~ 99, increace on every iteration if it is not used
	// weight = 100, remove it
	volumesMap := make(map[string]int)
	volumeDir := path.Join(*pDockerRootDir, "volumes")
	for {
		containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
		if err != nil {
			log.Println("Vol Cleanup: cannot get container list", err)
			time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second)
			continue
		} else {
			inspect_error := false
			for _, container := range containers {
				containerInspect, err := client.InspectContainer(container.ID)
				if err != nil {
					inspect_error = true
					log.Println("Vol Cleanup: cannot get container inspect", err)
					break
				}
				for _, volPath := range containerInspect.Volumes {
					terms := re.FindStringSubmatch(volPath)
					if len(terms) == 2 {
						id := terms[1]
						volumesMap[id] = 0
					}
				}
			}
			if inspect_error {
				time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second)
				continue
			}
		}

		files, err := ioutil.ReadDir(volumeDir)
		if err != nil {
			log.Printf("Vol Cleanup: %s", err)
		} else {
			for _, f := range files {
				id := f.Name()
				weight := volumesMap[id]
				volumesMap[id] = weight + 1
			}
		}

		// Remove the unused volumes
		counter := 0
		for id, weight := range volumesMap {
			if weight >= 100 {
				volPath := path.Join(volumeDir, id)
				log.Printf("Vol Cleanup: removing volume %s", volPath)
				err := os.RemoveAll(volPath)
				if err != nil {
					log.Printf("Vol Cleanup: %s", err)
				} else {
					delete(volumesMap, id)
					counter += 1
				}
			}
		}
		log.Printf("Vol Cleanup: %d volumes have been removed", counter)

		// Sleep
		log.Printf("Vol Cleanup: next cleanup will be start in %d seconds", *pVolumeCleanInterval)
		time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second)
	}
}
コード例 #19
0
ファイル: router_test.go プロジェクト: ncdc/origin
// createAndStartRouterContainer is responsible for deploying the router image in docker.  It assumes that all router images
// will use a command line flag that can take --master which points to the master url
func createAndStartRouterContainer(dockerCli *dockerClient.Client, masterIp string, routerStatsPort int, reloadInterval int) (containerId string, err error) {
	ports := []string{"80", "443"}
	if routerStatsPort > 0 {
		ports = append(ports, fmt.Sprintf("%d", routerStatsPort))
	}

	portBindings := make(map[dockerClient.Port][]dockerClient.PortBinding)
	exposedPorts := map[dockerClient.Port]struct{}{}

	for _, p := range ports {
		dockerPort := dockerClient.Port(p + "/tcp")

		portBindings[dockerPort] = []dockerClient.PortBinding{
			{
				HostPort: p,
			},
		}

		exposedPorts[dockerPort] = struct{}{}
	}

	copyEnv := []string{
		"ROUTER_EXTERNAL_HOST_HOSTNAME",
		"ROUTER_EXTERNAL_HOST_USERNAME",
		"ROUTER_EXTERNAL_HOST_PASSWORD",
		"ROUTER_EXTERNAL_HOST_HTTP_VSERVER",
		"ROUTER_EXTERNAL_HOST_HTTPS_VSERVER",
		"ROUTER_EXTERNAL_HOST_INSECURE",
		"ROUTER_EXTERNAL_HOST_PRIVKEY",
	}

	env := []string{
		fmt.Sprintf("STATS_PORT=%d", routerStatsPort),
		fmt.Sprintf("STATS_USERNAME=%s", statsUser),
		fmt.Sprintf("STATS_PASSWORD=%s", statsPassword),
		fmt.Sprintf("DEFAULT_CERTIFICATE=%s", defaultCert),
	}

	reloadIntVar := fmt.Sprintf("RELOAD_INTERVAL=%ds", reloadInterval)
	env = append(env, reloadIntVar)

	for _, name := range copyEnv {
		val := os.Getenv(name)
		if len(val) > 0 {
			env = append(env, name+"="+val)
		}
	}

	vols := ""
	hostVols := []string{}

	privkeyFilename := os.Getenv("ROUTER_EXTERNAL_HOST_PRIVKEY")
	if len(privkeyFilename) != 0 {
		vols = privkeyFilename
		privkeyBindmount := fmt.Sprintf("%[1]s:%[1]s", privkeyFilename)
		hostVols = append(hostVols, privkeyBindmount)
	}

	binary := os.Getenv("ROUTER_OPENSHIFT_BINARY")
	if len(binary) != 0 {
		hostVols = append(hostVols, fmt.Sprintf("%[1]s:/usr/bin/openshift", binary))
	}

	containerOpts := dockerClient.CreateContainerOptions{
		Config: &dockerClient.Config{
			Image:        getRouterImage(),
			Cmd:          []string{"--master=" + masterIp, "--loglevel=4"},
			Env:          env,
			ExposedPorts: exposedPorts,
			VolumesFrom:  vols,
		},
		HostConfig: &dockerClient.HostConfig{
			Binds: hostVols,
		},
	}

	container, err := dockerCli.CreateContainer(containerOpts)

	if err != nil {
		return "", err
	}

	dockerHostCfg := &dockerClient.HostConfig{NetworkMode: "host", PortBindings: portBindings}
	err = dockerCli.StartContainer(container.ID, dockerHostCfg)

	if err != nil {
		return "", err
	}

	//wait for it to start
	if err := wait.Poll(time.Millisecond*100, time.Second*30, func() (bool, error) {
		c, err := dockerCli.InspectContainer(container.ID)
		if err != nil {
			return false, err
		}
		return c.State.Running, nil
	}); err != nil {
		return "", err
	}
	return container.ID, nil
}
コード例 #20
0
ファイル: docker_client.go プロジェクト: rmoorman/docker-gen
func getContainers(client *docker.Client) ([]*RuntimeContainer, error) {

	apiContainers, err := client.ListContainers(docker.ListContainersOptions{
		All:  false,
		Size: false,
	})
	if err != nil {
		return nil, err
	}

	containers := []*RuntimeContainer{}
	for _, apiContainer := range apiContainers {
		container, err := client.InspectContainer(apiContainer.ID)
		if err != nil {
			log.Printf("error inspecting container: %s: %s\n", apiContainer.ID, err)
			continue
		}

		registry, repository, tag := splitDockerImage(container.Config.Image)
		runtimeContainer := &RuntimeContainer{
			ID: container.ID,
			Image: DockerImage{
				Registry:   registry,
				Repository: repository,
				Tag:        tag,
			},
			Name:         strings.TrimLeft(container.Name, "/"),
			Hostname:     container.Config.Hostname,
			Gateway:      container.NetworkSettings.Gateway,
			Addresses:    []Address{},
			Env:          make(map[string]string),
			Volumes:      make(map[string]Volume),
			Node:         SwarmNode{},
			Labels:       make(map[string]string),
			IP:           container.NetworkSettings.IPAddress,
			IP6LinkLocal: container.NetworkSettings.LinkLocalIPv6Address,
			IP6Global:    container.NetworkSettings.GlobalIPv6Address,
		}
		for k, v := range container.NetworkSettings.Ports {
			address := Address{
				IP:           container.NetworkSettings.IPAddress,
				IP6LinkLocal: container.NetworkSettings.LinkLocalIPv6Address,
				IP6Global:    container.NetworkSettings.GlobalIPv6Address,
				Port:         k.Port(),
				Proto:        k.Proto(),
			}
			if len(v) > 0 {
				address.HostPort = v[0].HostPort
				address.HostIP = v[0].HostIP
			}
			runtimeContainer.Addresses = append(runtimeContainer.Addresses,
				address)

		}
		for k, v := range container.Volumes {
			runtimeContainer.Volumes[k] = Volume{
				Path:      k,
				HostPath:  v,
				ReadWrite: container.VolumesRW[k],
			}
		}
		if container.Node != nil {
			runtimeContainer.Node.ID = container.Node.ID
			runtimeContainer.Node.Name = container.Node.Name
			runtimeContainer.Node.Address = Address{
				IP: container.Node.IP,
			}
		}

		runtimeContainer.Env = splitKeyValueSlice(container.Config.Env)
		runtimeContainer.Labels = container.Config.Labels
		containers = append(containers, runtimeContainer)
	}
	return containers, nil

}
コード例 #21
0
ファイル: cleanup.go プロジェクト: gloppenhosting/cleanup
func cleanImages(client *docker.Client) {
	defer wg.Done()

	log.Printf("Img Cleanup: the following images will be locked: %s", *pImageLocked)
	log.Println("Img Cleanup: starting image cleanup ...")
	for {
		// imageIdMap[imageID] = isRemovable
		imageIdMap := make(map[string]bool)

		// Get the image ID list before the cleanup
		images, err := client.ListImages(docker.ListImagesOptions{All: false})
		if err != nil {
			log.Println("Img Cleanup: cannot get images list", err)
			time.Sleep(time.Duration(*pImageCleanInterval+*pImageCleanDelayed) * time.Second)
			continue
		}

		for _, image := range images {
			imageIdMap[image.ID] = true
		}

		// Get the image IDs used by all the containers
		containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
		if err != nil {
			log.Println("Img Cleanup: cannot get container list", err)
			time.Sleep(time.Duration(*pImageCleanInterval+*pImageCleanDelayed) * time.Second)
			continue
		} else {
			inspect_error := false
			for _, container := range containers {
				containerInspect, err := client.InspectContainer(container.ID)
				if err != nil {
					inspect_error = true
					log.Println("Img Cleanup: cannot get container inspect", err)
					break
				}
				delete(imageIdMap, containerInspect.Image)
			}
			if inspect_error {
				time.Sleep(time.Duration(*pImageCleanInterval+*pImageCleanDelayed) * time.Second)
				continue
			}
		}

		// Get all the locked image ID
		if *pImageLocked != "" {
			lockedImages := strings.Split(*pImageLocked, ",")
			for _, lockedImage := range lockedImages {
				imageInspect, err := client.InspectImage(strings.Trim(lockedImage, " "))
				if err == nil {
					delete(imageIdMap, imageInspect.ID)
				}

			}
		}

		// Sleep for the delay time
		log.Printf("Img Cleanup: wait %d seconds for the cleaning", *pImageCleanDelayed)
		time.Sleep(time.Duration(*pImageCleanDelayed) * time.Second)

		// Get the image IDs used by all the containers again after the delay time
		containersDelayed, err := client.ListContainers(docker.ListContainersOptions{All: true})
		if err != nil {
			log.Println("Img Cleanup: cannot get container list", err)
			time.Sleep(time.Duration(*pImageCleanInterval) * time.Second)
			continue
		} else {
			inspect_error := false
			for _, container := range containersDelayed {
				containerInspect, err := client.InspectContainer(container.ID)
				if err != nil {
					inspect_error = true
					log.Println("Img Cleanup: cannot get container inspect", err)
					break
				}
				delete(imageIdMap, containerInspect.Image)
			}
			if inspect_error {
				time.Sleep(time.Duration(*pImageCleanInterval) * time.Second)
				continue
			}
		}

		// Remove the unused images
		counter := 0
		for id, removable := range imageIdMap {
			if removable {
				log.Printf("Img Cleanup: removing image %s", id)
				err := client.RemoveImage(id)
				if err != nil {
					log.Printf("Img Cleanup: %s", err)
				}
				counter += 1
			}
		}
		log.Printf("Img Cleanup: %d images have been removed", counter)

		// Sleep again
		log.Printf("Img Cleanup: next cleanup will be start in %d seconds", *pImageCleanInterval)
		time.Sleep(time.Duration(*pImageCleanInterval) * time.Second)
	}
}
コード例 #22
0
ファイル: cleanup.go プロジェクト: gloppenhosting/cleanup
func cleanVolumesDocker118(client *docker.Client, apiVersion string) {
	defer wg.Done()

	log.Printf("Vol Cleanup: starting volume cleanup(ver %s) ...", apiVersion)

	// volumesMap[volPath] = weight
	// weight = 0 ~ 99, increace on every iteration if it is not used
	// weight = 100, remove it
	volumesMap := make(map[string]int)
	volumeDir1 := path.Join(*pDockerRootDir, "vfs/dir")
	volumeDir2 := path.Join(*pDockerRootDir, "volumes")
	for {
		containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
		if err != nil {
			log.Println("Vol Cleanup: cannot get container list", err)
			time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second)
			continue
		} else {
			inspect_error := false
			for _, container := range containers {
				containerInspect, err := client.InspectContainer(container.ID)
				if err != nil {
					inspect_error = true
					log.Println("Vol Cleanup: cannot get container inspect", err)
					break
				}
				for _, volPath := range containerInspect.Volumes {
					volumesMap[volPath] = 0
					if strings.Contains(volPath, "docker/vfs/dir") {
						volPath2 := strings.Replace(volPath, "vfs/dir", "volumes", 1)
						volumesMap[volPath2] = 0
					}
				}
			}
			if inspect_error {
				time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second)
				continue
			}
		}

		files, err := ioutil.ReadDir(volumeDir1)
		if err != nil {
			log.Printf("Vol Cleanup: %s", err)
		} else {
			for _, f := range files {
				volPath := path.Join(volumeDir1, f.Name())
				weight := volumesMap[volPath]
				volumesMap[volPath] = weight + 1
			}
		}

		files, err = ioutil.ReadDir(volumeDir2)
		if err != nil {
			log.Printf("Vol Cleanup: %s", err)
		} else {
			for _, f := range files {
				volPath := path.Join(volumeDir2, f.Name())
				weight := volumesMap[volPath]
				volumesMap[volPath] = weight + 1
			}
		}

		// Remove the unused volumes
		counter := 0
		for volPath, weight := range volumesMap {
			if weight == 100 {
				log.Printf("Vol Cleanup: removing volume %s", volPath)
				err := os.RemoveAll(volPath)
				if err != nil {
					log.Printf("Vol Cleanup: %s", err)
				}
				delete(volumesMap, volPath)
				counter += 1
			}
		}
		log.Printf("Vol Cleanup: %d volumes have been removed", counter)

		// Sleep
		log.Printf("Vol Cleanup: next cleanup will be start in %d seconds", *pVolumeCleanInterval)
		time.Sleep(time.Duration(*pVolumeCleanInterval) * time.Second)
	}
}
コード例 #23
0
ファイル: router_test.go プロジェクト: erinboyd/origin
// createAndStartRouterContainer is responsible for deploying the router image in docker.  It assumes that all router images
// will use a command line flag that can take --master which points to the master url
func createAndStartRouterContainer(dockerCli *dockerClient.Client, masterIp string, routerStatsPort int) (containerId string, err error) {
	ports := []string{"80", "443"}
	if routerStatsPort > 0 {
		ports = append(ports, fmt.Sprintf("%d", routerStatsPort))
	}

	portBindings := make(map[dockerClient.Port][]dockerClient.PortBinding)
	exposedPorts := map[dockerClient.Port]struct{}{}

	for _, p := range ports {
		dockerPort := dockerClient.Port(p + "/tcp")

		portBindings[dockerPort] = []dockerClient.PortBinding{
			{
				HostPort: p,
			},
		}

		exposedPorts[dockerPort] = struct{}{}
	}

	copyEnv := []string{
		"ROUTER_EXTERNAL_HOST_HOSTNAME",
		"ROUTER_EXTERNAL_HOST_USERNAME",
		"ROUTER_EXTERNAL_HOST_PASSWORD",
		"ROUTER_EXTERNAL_HOST_HTTP_VSERVER",
		"ROUTER_EXTERNAL_HOST_HTTPS_VSERVER",
		"ROUTER_EXTERNAL_HOST_INSECURE",
		"ROUTER_EXTERNAL_HOST_PRIVKEY",
	}

	env := []string{
		fmt.Sprintf("STATS_PORT=%d", routerStatsPort),
		fmt.Sprintf("STATS_USERNAME=%s", statsUser),
		fmt.Sprintf("STATS_PASSWORD=%s", statsPassword),
	}

	for _, name := range copyEnv {
		val := os.Getenv(name)
		if len(val) > 0 {
			env = append(env, name+"="+val)
		}
	}

	vols := ""
	hostVols := []string{}

	privkeyFilename := os.Getenv("ROUTER_EXTERNAL_HOST_PRIVKEY")
	if len(privkeyFilename) != 0 {
		vols = privkeyFilename
		privkeyBindmount := fmt.Sprintf("%[1]s:%[1]s", privkeyFilename)
		hostVols = append(hostVols, privkeyBindmount)
	}

	binary := os.Getenv("ROUTER_OPENSHIFT_BINARY")
	if len(binary) != 0 {
		hostVols = append(hostVols, fmt.Sprintf("%[1]s:/usr/bin/openshift", binary))
	}

	containerOpts := dockerClient.CreateContainerOptions{
		Config: &dockerClient.Config{
			Image:        getRouterImage(),
			Cmd:          []string{"--master=" + masterIp, "--loglevel=4"},
			Env:          env,
			ExposedPorts: exposedPorts,
			VolumesFrom:  vols,
		},
		HostConfig: &dockerClient.HostConfig{
			Binds: hostVols,
		},
	}

	container, err := dockerCli.CreateContainer(containerOpts)

	if err != nil {
		return "", err
	}

	dockerHostCfg := &dockerClient.HostConfig{NetworkMode: "host", PortBindings: portBindings}
	err = dockerCli.StartContainer(container.ID, dockerHostCfg)

	if err != nil {
		return "", err
	}

	running := false

	//wait for it to start
	for i := 0; i < dockerRetries; i++ {
		time.Sleep(time.Second * dockerWaitSeconds)

		c, err := dockerCli.InspectContainer(container.ID)

		if err != nil {
			return "", err
		}

		if c.State.Running {
			running = true
			break
		}
	}

	if !running {
		return "", errors.New("Container did not start after 3 tries!")
	}

	return container.ID, nil
}
コード例 #24
0
ファイル: nested.go プロジェクト: raceli/resolvable
func newDockerInDockerDaemon(rootClient *dockerapi.Client, endpoint *url.URL, clientInit ClientInit) (*DockerDaemon, error) {
	var err error

	dockerInDocker := &DockerInDocker{
		client: rootClient,
	}
	daemon := &DockerDaemon{
		Close: dockerInDocker.Close,
	}
	defer func() {
		// if there is an error, client will not be set, so clean up
		if daemon.Client == nil {
			daemon.Close()
		}
	}()

	port := dockerapi.Port("4444/tcp")

	dockerInDocker.containerId, err = runContainer(rootClient,
		dockerapi.CreateContainerOptions{
			Config: &dockerapi.Config{
				Image:        "jpetazzo/dind",
				Env:          []string{"PORT=" + port.Port()},
				ExposedPorts: map[dockerapi.Port]struct{}{port: {}},
			},
		}, &dockerapi.HostConfig{
			Privileged:      true,
			PublishAllPorts: true,
		},
	)
	if err != nil {
		return nil, err
	}

	container, err := rootClient.InspectContainer(dockerInDocker.containerId)
	if err != nil {
		return nil, err
	}

	var hostAddr, hostPort string

	if endpoint.Scheme == "unix" {
		hostAddr = container.NetworkSettings.IPAddress
		hostPort = port.Port()
	} else {
		portBinding := container.NetworkSettings.Ports[port][0]
		hostAddr, _, err = net.SplitHostPort(endpoint.Host)
		if err != nil {
			return nil, err
		}
		hostPort = portBinding.HostPort
	}

	dindEndpoint := fmt.Sprintf("tcp://%v:%v", hostAddr, hostPort)
	client, err := dockerapi.NewClient(dindEndpoint)
	if err != nil {
		return nil, err
	}

	b := backoff.NewExponentialBackOff()
	// retry a bit faster than the defaults
	b.InitialInterval = time.Second / 10
	b.Multiplier = 1.1
	b.RandomizationFactor = 0.2
	// don't need to wait a full minute to timeout
	b.MaxElapsedTime = 30 * time.Second

	if err = backoff.Retry(func() error { return clientInit(client) }, b); err != nil {
		return nil, err
	}

	daemon.Client = client
	return daemon, err
}
コード例 #25
0
// TriggerRefresh refreshes the logstash-forwarder configuration and restarts it.
func TriggerRefresh(client *docker.Client, logstashEndpoint string, configFile string, quiet bool) {
	defer utils.TimeTrack(time.Now(), "Config generation")

	log.Debug("Generating configuration...")
	forwarderConfig := getConfig(logstashEndpoint, configFile)

	containers, err := client.ListContainers(docker.ListContainersOptions{All: false})
	if err != nil {
		log.Fatalf("Unable to retrieve container list from docker: %s", err)
	}

	log.Debug("Found %d containers:", len(containers))
	for i, c := range containers {
		log.Debug("%d. %s", i+1, c.ID)

		container, err := client.InspectContainer(c.ID)
		if err != nil {
			log.Fatalf("Unable to inspect container %s: %s", c.ID, err)
		}

		forwarderConfig.AddContainerLogFile(container)

		containerConfig, err := config.NewFromContainer(container)
		if err != nil {
			if !os.IsNotExist(err) {
				log.Error("Unable to look for logstash-forwarder config in %s: %s", container.ID, err)
			}
		} else {
			for _, file := range containerConfig.Files {
				file.Fields["host"] = container.Config.Hostname
				forwarderConfig.Files = append(forwarderConfig.Files, file)
			}
		}
	}

	const configPath = "/tmp/logstash-forwarder.conf"
	fo, err := os.Create(configPath)
	if err != nil {
		log.Fatalf("Unable to open %s: %s", configPath, err)
	}
	defer fo.Close()

	j, err := json.MarshalIndent(forwarderConfig, "", "  ")
	if err != nil {
		log.Debug("Unable to MarshalIndent logstash-forwarder config: %s", err)
	}
	_, err = fo.Write(j)
	if err != nil {
		log.Fatalf("Unable to write logstash-forwarder config to %s: %s", configPath, err)
	}
	log.Info("Wrote logstash-forwarder config to %s", configPath)

	if running {
		log.Info("Waiting for logstash-forwarder to stop")
		// perhaps use SIGTERM first instead of just Kill()?
		//		if err := cmd.Process.Signal(syscall.SIGTERM); err != nil {
		if err := cmd.Process.Kill(); err != nil {
			log.Error("Unable to stop logstash-forwarder")
		}
		if _, err := cmd.Process.Wait(); err != nil {
			log.Error("Unable to wait for logstash-forwarder to stop: %s", err)
		}
		log.Info("Stopped logstash-forwarder")
	}
	cmd = exec.Command("logstash-forwarder", "-config", configPath, fmt.Sprintf("-quiet=%t", quiet))
	cmd.Stdout = os.Stdout
	cmd.Stderr = os.Stderr

	if err := cmd.Start(); err != nil {
		log.Fatalf("Unable to start logstash-forwarder: %s", err)
	}
	running = true
	log.Info("Starting logstash-forwarder...")
}
コード例 #26
0
ファイル: handler.go プロジェクト: sergiula/cadvisor
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	fsInfo fs.FsInfo,
	storageDriver storageDriver,
	cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
	inHostNamespace bool,
	metadataEnvs []string,
) (container.ContainerHandler, error) {
	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	// Generate the equivalent cgroup manager for this container.
	cgroupManager := &cgroupfs.Manager{
		Cgroups: &libcontainerconfigs.Cgroup{
			Name: name,
		},
		Paths: cgroupPaths,
	}

	rootFs := "/"
	if !inHostNamespace {
		rootFs = "/rootfs"
	}

	id := ContainerNameToDockerId(name)

	// Add the Containers dir where the log files are stored.
	storageDirs := []string{path.Join(*dockerRootDir, pathToContainersDir, id)}

	switch storageDriver {
	case aufsStorageDriver:
		// Add writable layer for aufs.
		storageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))
	case overlayStorageDriver:
		storageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToOverlayDir, id))
	}

	handler := &dockerContainerHandler{
		id:                 id,
		client:             client,
		name:               name,
		machineInfoFactory: machineInfoFactory,
		cgroupPaths:        cgroupPaths,
		cgroupManager:      cgroupManager,
		storageDriver:      storageDriver,
		fsInfo:             fsInfo,
		rootFs:             rootFs,
		storageDirs:        storageDirs,
		fsHandler:          newFsHandler(time.Minute, storageDirs, fsInfo),
	}

	// We assume that if Inspect fails then the container is not known to docker.
	ctnr, err := client.InspectContainer(id)
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
	}
	handler.creationTime = ctnr.Created
	handler.pid = ctnr.State.Pid

	// Add the name and bare ID as aliases of the container.
	handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
	handler.labels = ctnr.Config.Labels
	handler.image = ctnr.Config.Image
	handler.networkMode = ctnr.HostConfig.NetworkMode

	// split env vars to get metadata map.
	for _, exposedEnv := range metadataEnvs {
		for _, envVar := range ctnr.Config.Env {
			splits := strings.SplitN(envVar, "=", 2)
			if splits[0] == exposedEnv {
				handler.envs[strings.ToLower(exposedEnv)] = splits[1]
			}
		}
	}

	return handler, nil
}
コード例 #27
0
// StartDockerContainer starts a new Lever container for the specified
// environment and service.
func StartDockerContainer(
	docker *dockerapi.Client, environment string, service string,
	instanceID string, codeVersion int64, isAdmin bool,
	leverConfig *core.LeverConfig) (
	containerID string, node string, err error) {
	codeDir := HostCodeDirPath(environment, service, codeVersion)
	binds := []string{codeDir + ":/leveros/custcode:ro,Z"}
	env := []string{
		"LEVEROS_ENVIRONMENT=" + environment,
		"LEVEROS_SERVICE=" + service,
		"LEVEROS_INSTANCE_ID=" + instanceID,
		"LEVEROS_CODE_VERSION=" + strconv.Itoa(int(codeVersion)),
		"LEVEROS_INTERNAL_ENV_SUFFIX=" +
			core.InternalEnvironmentSuffixFlag.Get(),
	}
	if isAdmin {
		// This is used by admin to make deployments.
		binds = append(
			binds, LeverCodeHostDirFlag.Get()+":/leveros/custcodetree:Z")
	}

	// Configure logging.
	var logConfig dockerapi.LogConfig
	if devlogger.DisableFlag.Get() {
		logConfig.Type = "none"
	} else {
		// TODO: Should use scale.Dereference... to get IP of syslog server
		//       and shard by env+service.
		tag := fmt.Sprintf(
			"%s/%s/%d/%s", environment, service, codeVersion, instanceID)
		logConfig.Type = "syslog"
		logConfig.Config = map[string]string{
			"syslog-address":  "tcp://127.0.0.1:6514",
			"syslog-facility": "user",
			"tag":             tag,
			"syslog-format":   "rfc5424",
		}
	}

	memoryBytes := int64(leverConfig.InstanceMemoryMB) * 1000 * 1000
	memAndSwapBytes := memoryBytes     // No swap.
	kernelMemBytes := memoryBytes / 10 // Allow 10% memory for kernel.

	// Entry point.
	entry := leverConfig.EntryPoint
	if leverConfig.JSEntryPoint != "" {
		// Trigger GC in node when garbage reaches 90% of memory.
		maxGarbage := strconv.Itoa(
			int(float32(leverConfig.InstanceMemoryMB) * 0.9))
		// Set entry point for node.
		entry = []string{
			"node", "--optimize_for_size", "--max_old_space_size=" + maxGarbage,
			"--gc_interval=100",
			"/leveros/js/leveros-server/compiled/lib/serve.js",
			leverConfig.JSEntryPoint,
		}
	}

	container, err := docker.CreateContainer(dockerapi.CreateContainerOptions{
		Name: "leveros_" + instanceID,
		Config: &dockerapi.Config{
			Image:        "leveros/levercontainer:latest",
			Cmd:          entry,
			Env:          env,
			KernelMemory: kernelMemBytes,
			Labels: map[string]string{
				"com.leveros.environment": environment,
				"com.leveros.service":     service,
				"com.leveros.instanceid":  instanceID,
				"com.leveros.codeversion": strconv.Itoa(int(codeVersion)),
			},
		},
		// TODO: Documentation for these here:
		//       https://docs.docker.com/engine/reference/api/docker_remote_api_v1.23/#create-a-container
		// TODO: Also check if need to set limits on IO operations (blkio).
		// TODO: Should allow to write to disk, but limit amount of disk space.
		HostConfig: &dockerapi.HostConfig{
			ReadonlyRootfs:   true,
			Binds:            binds,
			CapDrop:          []string{"all"},
			NetworkMode:      "none",
			Ulimits:          []dockerapi.ULimit{},          // TODO
			SecurityOpt:      []string{"no-new-privileges"}, // TODO
			LogConfig:        logConfig,
			Memory:           memoryBytes,
			MemorySwap:       memAndSwapBytes,
			MemorySwappiness: 0,
			CPUShares:        0, // TODO
			CPUPeriod:        0, // TODO
			CPUQuota:         0, // TODO
		},
	})
	if err != nil {
		logger.WithFields("err", err).Debug(
			"Error trying to create container")
		return "", "", err
	}

	// Get info about the node it was allocated to by Docker Swarm.
	container, err = docker.InspectContainer(container.ID)
	if err != nil {
		removeErr := RemoveDockerContainer(docker, container.ID)
		if removeErr != nil {
			logger.WithFields(
				"containerID", containerID,
				"err", removeErr,
			).Error("Error trying to remove container after previous error")
		}
		return "", "", err
	}
	if container.Node != nil {
		node = container.Node.Name
	} else {
		// In a dev/testing (non-swarm) environment.
		logger.Warning(
			"Using non-swarm node. " +
				"YOU SHOULD NEVER SEE THIS IN PRODUCTION.")
		node = "leverosconsul"
	}

	// Start the container.
	err = docker.StartContainer(container.ID, nil)
	if err != nil {
		removeErr := RemoveDockerContainer(docker, container.ID)
		if removeErr != nil {
			logger.WithFields(
				"containerID", containerID,
				"err", removeErr,
			).Error("Error trying to remove container after failed to start")
		}
		return "", "", err
	}

	// Need to disconnect it from the "none" network before being able to
	// connect it to its local environment network.
	err = DisconnectFromDockerEnvNetwork(docker, container.ID, "none")
	if err != nil {
		removeErr := RemoveDockerContainer(docker, container.ID)
		if removeErr != nil {
			logger.WithFields(
				"containerID", containerID,
				"err", removeErr,
			).Error("Error trying to remove container after previous error")
		}
		return "", "", err
	}

	return container.ID, node, nil
}
コード例 #28
0
ファイル: handler.go プロジェクト: Clarifai/kubernetes
func newDockerContainerHandler(
	client *docker.Client,
	name string,
	machineInfoFactory info.MachineInfoFactory,
	fsInfo fs.FsInfo,
	storageDriver storageDriver,
	storageDir string,
	cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
	inHostNamespace bool,
	metadataEnvs []string,
	dockerVersion []int,
	ignoreMetrics container.MetricSet,
) (container.ContainerHandler, error) {
	// Create the cgroup paths.
	cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
	for key, val := range cgroupSubsystems.MountPoints {
		cgroupPaths[key] = path.Join(val, name)
	}

	// Generate the equivalent cgroup manager for this container.
	cgroupManager := &cgroupfs.Manager{
		Cgroups: &libcontainerconfigs.Cgroup{
			Name: name,
		},
		Paths: cgroupPaths,
	}

	rootFs := "/"
	if !inHostNamespace {
		rootFs = "/rootfs"
		storageDir = path.Join(rootFs, storageDir)
	}

	id := ContainerNameToDockerId(name)

	// Add the Containers dir where the log files are stored.
	// FIXME: Give `otherStorageDir` a more descriptive name.
	otherStorageDir := path.Join(storageDir, pathToContainersDir, id)

	rwLayerID, err := getRwLayerID(id, storageDir, storageDriver, dockerVersion)
	if err != nil {
		return nil, err
	}
	var rootfsStorageDir string
	switch storageDriver {
	case aufsStorageDriver:
		rootfsStorageDir = path.Join(storageDir, string(aufsStorageDriver), aufsRWLayer, rwLayerID)
	case overlayStorageDriver:
		rootfsStorageDir = path.Join(storageDir, string(overlayStorageDriver), rwLayerID)
	}

	handler := &dockerContainerHandler{
		id:                 id,
		client:             client,
		name:               name,
		machineInfoFactory: machineInfoFactory,
		cgroupPaths:        cgroupPaths,
		cgroupManager:      cgroupManager,
		storageDriver:      storageDriver,
		fsInfo:             fsInfo,
		rootFs:             rootFs,
		rootfsStorageDir:   rootfsStorageDir,
		envs:               make(map[string]string),
		ignoreMetrics:      ignoreMetrics,
	}

	if !ignoreMetrics.Has(container.DiskUsageMetrics) {
		handler.fsHandler = common.NewFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo)
	}

	// We assume that if Inspect fails then the container is not known to docker.
	ctnr, err := client.InspectContainer(id)
	if err != nil {
		return nil, fmt.Errorf("failed to inspect container %q: %v", id, err)
	}
	handler.creationTime = ctnr.Created
	handler.pid = ctnr.State.Pid

	// Add the name and bare ID as aliases of the container.
	handler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, "/"), id)
	handler.labels = ctnr.Config.Labels
	handler.image = ctnr.Config.Image
	handler.networkMode = ctnr.HostConfig.NetworkMode

	// split env vars to get metadata map.
	for _, exposedEnv := range metadataEnvs {
		for _, envVar := range ctnr.Config.Env {
			splits := strings.SplitN(envVar, "=", 2)
			if splits[0] == exposedEnv {
				handler.envs[strings.ToLower(exposedEnv)] = splits[1]
			}
		}
	}

	return handler, nil
}
コード例 #29
0
ファイル: monsipp.go プロジェクト: mangalaman93/nfs
func dockerListener(docker *dockerclient.Client, dokchan chan *dockerclient.APIEvents, done chan bool) {
	log.Println("[INFO] listening to docker container events")
	defer func() { done <- true }()

	influxchan := make(chan *influxdb.Point, NET_BUFFER_SIZE)
	donechan := make(chan bool, 1)
	go bgWrite(influxchan, donechan)
	defer func() {
		influxchan <- nil
		<-donechan
	}()

	defer func() { cleanupTails() }()
	for {
		event := <-dokchan
		log.Println("[INFO] event occurred: ", event)

		switch event.Status {
		case "EOF":
			return

		case "start":
			if _, ok := sippvols[event.ID]; ok {
				log.Println("[WARN] duplicate event for container ", event.ID)
				continue
			}

			if event.From == IMAGE_SIPP {
				cont, err := docker.InspectContainer(event.ID)
				if err != nil {
					log.Println("[WARN] unable to inspect container ", event.ID)
					continue
				}

				volume := cont.Volumes[PATH_VOL_SIPP]
				if volume == "" {
					for _, v := range cont.Mounts {
						if v.Destination == PATH_VOL_SIPP {
							volume = v.Source
							break
						}
					}
				}

				if volume == "" {
					log.Println("[WARN] unable to find volume for container ", event.ID)
					continue
				}

				tails := &Tails{
					contname: cont.Name[1:],
					vol:      volume,
					stopchan: make(chan bool, 1),
					waitchan: make(chan bool, 1),
				}

				sippvols[event.ID] = tails
				go tails.TailVolume(influxchan)
				log.Printf("[INFO] add volume %s to map for container %s\n", volume, event.ID)
			}

		case "die", "kill", "stop":
			if tails, ok := sippvols[event.ID]; ok {
				tails.StopTail()
				delete(sippvols, event.ID)
				log.Printf("[INFO] delete volume %s from map for container %s\n", tails.vol, event.ID)
			}
		}
	}
}