示例#1
0
func Handlews2(ws *websocket.Conn) {
	var stats types.Stats
	m := NewMonMan()
	var c []Containers
	var oc []Containers

	cli, err := client.NewEnvClient()
	if err != nil {
		fmt.Println(err)
	}
	for {
		fmt.Println("Elements copied: ", copy(oc, c))
		m.GetContainer()
		for n, cont := range m.ID {
			r, _ := cli.ContainerStats(context.Background(), cont, false)
			b, _ := ioutil.ReadAll(r.Body)
			json.Unmarshal(b, &stats)
			tmp := Containers{calculateCPUPercent(stats.PreCPUStats, stats.CPUStats), stats.MemoryStats.Usage, m.User[n], 0}
			c = append(c, tmp)
		}
		fc := UpdateList(oc, c)
		fmt.Println("New List: ", c)
		fmt.Println("Old List: ", oc)
		fmt.Println("Final List: ", fc)
		b, _ := json.Marshal(fc)
		ws.Write(b)
		time.Sleep(1000 * time.Millisecond)
	}
}
func NewClient(c chan int) *Client {
	var cli Client
	var err error
	var build types.ImageBuildResponse

	cli.c = c
	cli.Cont = make(map[string]*Container)

	cli.Pcli, err = client.NewEnvClient()
	if err != nil {
		Error.Println(err)
		return nil
	}

	cwd, _ := os.Getwd()
	ctx, err := os.Open(cwd + "/Dockerfile.tar.gz")
	if err != nil {
		Error.Println(err)
		return nil
	}
	build, err = cli.Pcli.ImageBuild(context.Background(), ctx, types.ImageBuildOptions{Tags: []string{"leadis_image"}, Remove: true, Context: ctx, SuppressOutput: false})
	if err != nil {
		Error.Println(err)
		return nil
	}

	// Test
	b, _ := ioutil.ReadAll(build.Body)
	fmt.Println(string(b))
	// End Test

	return &cli
}
示例#3
0
func getDaemonDockerInfo() (types.Info, error) {
	// FIXME(vdemeester) should be safe to use as is
	client, err := client.NewEnvClient()
	if err != nil {
		return types.Info{}, err
	}
	return client.Info(context.Background())
}
示例#4
0
func connectbridgenetwork() {
	//var poller_id,
	//var network_bridge_id string

	cli, err := client.NewEnvClient()
	if err != nil {
		panic(err)
	}

	networks, err := cli.NetworkList(context.Background(), types.NetworkListOptions{})
	if err != nil {
		panic(err)
	}
	fmt.Println(networks)

	/*  for _, network := range networks {
	        //fmt.Println(networks.Name)
	        if network.Name == "bridge" {
	            network_bridge_id = network.ID
	        }
	    }

	    network_bridge, err := cli.NetworkInspect(context.Background(), network_bridge_id)

	    network_container := network_bridge.Containers

	    for _, container_on_bridge := range network_container {
	      //if strings.Contains(container.Names[0], "cuelk_*-polling*") {
	        fmt.Println(container_on_bridge.Name)
	      //}
	    }

	    containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{})
	    if err != nil {
	        panic(err)
	    }

	    fmt.Println(containers)
	    /*for _, container := range containers {
	        //fmt.Printf("%s %s\n", container.ID[:10], container.Image)
	        if strings.Contains(container.Names[0], "_poller_") {
	            poller_id = container.ID[:10]
	            err = cli.NetworkConnect(context.Background(), network_bridge_id, poller_id, nil)
	            fmt.Println("Attach bridge interface to poller container")
	            if err != nil {
	                panic(err)
	            }
	        } else {
	          fmt.Println("bridge interface already attached to poller container")
	        }
	    }*/
}
// Get the endpoint information from Docker
func GetDockerEndpoint(container, network string) *network.EndpointSettings {
	os.Setenv("DOCKER_API_VERSION", "1.24")
	os.Setenv("DOCKER_HOST", "http://localhost:5375")
	defer os.Setenv("DOCKER_HOST", "")
	cli, err := dockerclient.NewEnvClient()
	if err != nil {
		panic(err)
	}

	info, err := cli.ContainerInspect(context.Background(), container)
	if err != nil {
		panic(err)
	}

	return info.NetworkSettings.Networks[network]
}
示例#6
0
func Handlews(ws *websocket.Conn) {
	var stats types.Stats
	var oc []Containers
	// TEST
	// m := NewMonMan()

	cli, err := client.NewEnvClient()
	if err != nil {
		fmt.Println(err)
	}
	options := types.ContainerListOptions{}

	for {
		fmt.Println("maine loop")
		c := []Containers{}
		containers, err := cli.ContainerList(context.Background(), options)
		if err != nil {
			fmt.Println(err)
		}
		fmt.Println("Containers: :", containers)
		for _, cont := range containers {
			fmt.Println("container loop")
			r, _ := cli.ContainerStats(context.Background(), cont.ID, false)
			b, _ := ioutil.ReadAll(r.Body)
			json.Unmarshal(b, &stats)
			tmp := Containers{calculateCPUPercent(stats.PreCPUStats, stats.CPUStats), stats.MemoryStats.Usage, cont.ID, 0}
			c = append(c, tmp)
		}
		fmt.Println("New List: ", c)
		fmt.Println("Old List: ", oc)

		tmp2 := c
		UpdateList(oc, c)
		oc = tmp2
		b, _ := json.Marshal(c)
		ws.Write(b)

		// m.GetContainer()
		// NewMonMan()

		time.Sleep(1000 * time.Millisecond)
	}
	// END TEST
}
示例#7
0
// CreateLocal creates a new local cockroach cluster. The stopper is used to
// gracefully shutdown the channel (e.g. when a signal arrives). The cluster
// must be started before being used and keeps logs in the specified logDir, if
// supplied.
func CreateLocal(
	ctx context.Context, cfg TestConfig, logDir string, privileged bool, stopper *stop.Stopper,
) *LocalCluster {
	select {
	case <-stopper.ShouldStop():
		// The stopper was already closed, exit early.
		os.Exit(1)
	default:
	}

	if *cockroachImage == builderImageFull && !exists(*cockroachBinary) {
		log.Fatalf(ctx, "\"%s\": does not exist", *cockroachBinary)
	}

	cli, err := client.NewEnvClient()
	maybePanic(err)

	retryingClient := retryingDockerClient{
		resilientDockerClient: resilientDockerClient{APIClient: cli},
		attempts:              10,
		timeout:               10 * time.Second,
	}

	clusterID := uuid.MakeV4()
	clusterIDS := clusterID.Short()
	// Only pass a nonzero logDir down to LocalCluster when instructed to keep
	// logs.
	var uniqueLogDir string
	if logDir != "" {
		uniqueLogDir = fmt.Sprintf("%s-%s", logDir, clusterIDS)
	}
	return &LocalCluster{
		clusterID: clusterIDS,
		client:    retryingClient,
		config:    cfg,
		stopper:   stopper,
		// TODO(tschottdorf): deadlocks will occur if these channels fill up.
		events:         make(chan Event, 1000),
		expectedEvents: make(chan Event, 1000),
		logDir:         uniqueLogDir,
		privileged:     privileged,
	}
}
func (s *DockerSuite) TestPostContainersAttach(c *check.C) {
	testRequires(c, DaemonIsLinux)

	expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) {
		defer conn.Close()
		expected := []byte("success")
		_, err := conn.Write(expected)
		c.Assert(err, checker.IsNil)

		conn.SetReadDeadline(time.Now().Add(time.Second))
		lenHeader := 0
		if !tty {
			lenHeader = 8
		}
		actual := make([]byte, len(expected)+lenHeader)
		_, err = io.ReadFull(br, actual)
		c.Assert(err, checker.IsNil)
		if !tty {
			fdMap := map[string]byte{
				"stdin":  0,
				"stdout": 1,
				"stderr": 2,
			}
			c.Assert(actual[0], checker.Equals, fdMap[stream])
		}
		c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream))
	}

	expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) {
		defer conn.Close()
		_, err := conn.Write([]byte{'t'})
		c.Assert(err, checker.IsNil)

		conn.SetReadDeadline(time.Now().Add(time.Second))
		actual := make([]byte, 1)
		_, err = io.ReadFull(br, actual)
		opErr, ok := err.(*net.OpError)
		c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err))
		c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream))
	}

	// Create a container that only emits stdout.
	cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat")
	cid = strings.TrimSpace(cid)
	// Attach to the container's stdout stream.
	conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain")
	c.Assert(err, checker.IsNil)
	// Check if the data from stdout can be received.
	expectSuccess(conn, br, "stdout", false)
	// Attach to the container's stderr stream.
	conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain")
	c.Assert(err, checker.IsNil)
	// Since the container only emits stdout, attaching to stderr should return nothing.
	expectTimeout(conn, br, "stdout")

	// Test the similar functions of the stderr stream.
	cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2")
	cid = strings.TrimSpace(cid)
	conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain")
	c.Assert(err, checker.IsNil)
	expectSuccess(conn, br, "stderr", false)
	conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain")
	c.Assert(err, checker.IsNil)
	expectTimeout(conn, br, "stderr")

	// Test with tty.
	cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2")
	cid = strings.TrimSpace(cid)
	// Attach to stdout only.
	conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain")
	c.Assert(err, checker.IsNil)
	expectSuccess(conn, br, "stdout", true)

	// Attach without stdout stream.
	conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain")
	c.Assert(err, checker.IsNil)
	// Nothing should be received because both the stdout and stderr of the container will be
	// sent to the client as stdout when tty is enabled.
	expectTimeout(conn, br, "stdout")

	// Test the client API
	// Make sure we don't see "hello" if Logs is false
	client, err := client.NewEnvClient()
	c.Assert(err, checker.IsNil)

	cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "echo hello; cat")
	cid = strings.TrimSpace(cid)

	attachOpts := types.ContainerAttachOptions{
		Stream: true,
		Stdin:  true,
		Stdout: true,
	}

	resp, err := client.ContainerAttach(context.Background(), cid, attachOpts)
	c.Assert(err, checker.IsNil)
	expectSuccess(resp.Conn, resp.Reader, "stdout", false)

	// Make sure we do see "hello" if Logs is true
	attachOpts.Logs = true
	resp, err = client.ContainerAttach(context.Background(), cid, attachOpts)
	c.Assert(err, checker.IsNil)

	defer resp.Conn.Close()
	resp.Conn.SetReadDeadline(time.Now().Add(time.Second))

	_, err = resp.Conn.Write([]byte("success"))
	c.Assert(err, checker.IsNil)

	actualStdout := new(bytes.Buffer)
	actualStderr := new(bytes.Buffer)
	stdcopy.StdCopy(actualStdout, actualStderr, resp.Reader)
	c.Assert(actualStdout.Bytes(), checker.DeepEquals, []byte("hello\nsuccess"), check.Commentf("Attach didn't return the expected data from stdout"))
}
func (d NetworkDriver) CreateEndpoint(request *network.CreateEndpointRequest) (*network.CreateEndpointResponse, error) {
	logutils.JSONMessage("CreateEndpoint", request)

	hostname, err := osutils.GetHostname()
	if err != nil {
		err = errors.Wrap(err, "Hostname fetching error")
		log.Errorln(err)
		return nil, err
	}

	log.Debugf("Creating endpoint %v\n", request.EndpointID)
	if request.Interface.Address == "" {
		err := errors.New("No address assigned for endpoint")
		log.Errorln(err)
		return nil, err
	}

	var addresses []caliconet.IPNet
	if request.Interface.Address != "" {
		// Parse the address this function was passed. Ignore the subnet - Calico always uses /32 (for IPv4)
		ip4, _, err := net.ParseCIDR(request.Interface.Address)
		log.Debugf("Parsed IP %v from (%v) \n", ip4, request.Interface.Address)

		if err != nil {
			err = errors.Wrapf(err, "Parsing %v as CIDR failed", request.Interface.Address)
			log.Errorln(err)
			return nil, err
		}

		addresses = append(addresses, caliconet.IPNet{IPNet: net.IPNet{IP: ip4, Mask: net.CIDRMask(32, 32)}})
	}

	endpoint := api.NewWorkloadEndpoint()
	endpoint.Metadata.Node = hostname
	endpoint.Metadata.Orchestrator = d.orchestratorID
	endpoint.Metadata.Workload = d.containerName
	endpoint.Metadata.Name = request.EndpointID
	endpoint.Spec.InterfaceName = "cali" + request.EndpointID[:mathutils.MinInt(11, len(request.EndpointID))]
	mac, _ := net.ParseMAC(d.fixedMac)
	endpoint.Spec.MAC = &caliconet.MAC{HardwareAddr: mac}
	endpoint.Spec.IPNetworks = append(endpoint.Spec.IPNetworks, addresses...)

	// Use the Docker API to fetch the network name (so we don't have to use an ID everywhere)
	dockerCli, err := dockerClient.NewEnvClient()
	if err != nil {
		err = errors.Wrap(err, "Error while attempting to instantiate docker client from env")
		log.Errorln(err)
		return nil, err
	}
	defer dockerCli.Close()
	networkData, err := dockerCli.NetworkInspect(context.Background(), request.NetworkID)
	if err != nil {
		err = errors.Wrapf(err, "Network %v inspection error", request.NetworkID)
		log.Errorln(err)
		return nil, err
	}

	// Now that we know the network name, set it on the endpoint.
	endpoint.Spec.Profiles = append(endpoint.Spec.Profiles, networkData.Name)

	// If a profile for the network name doesn't exist then it needs to be created.
	// We always attempt to create the profile and rely on the datastore to reject
	// the request if the profile already exists.
	profile := &api.Profile{
		Metadata: api.ProfileMetadata{
			Name: networkData.Name,
			Tags: []string{networkData.Name},
		},
		Spec: api.ProfileSpec{
			EgressRules:  []api.Rule{{Action: "allow"}},
			IngressRules: []api.Rule{{Action: "allow", Source: api.EntityRule{Tag: networkData.Name}}},
		},
	}
	if _, err := d.client.Profiles().Create(profile); err != nil {
		if _, ok := err.(libcalicoErrors.ErrorResourceAlreadyExists); !ok {
			log.Errorln(err)
			return nil, err
		}
	}

	// Create the endpoint last to minimize side-effects if something goes wrong.
	_, err = d.client.WorkloadEndpoints().Create(endpoint)
	if err != nil {
		err = errors.Wrapf(err, "Workload endpoints creation error, data: %+v", endpoint)
		log.Errorln(err)
		return nil, err
	}

	log.Debugf("Workload created, data: %+v\n", endpoint)

	response := &network.CreateEndpointResponse{
		Interface: &network.EndpointInterface{
			MacAddress: string(d.fixedMac),
		},
	}

	logutils.JSONMessage("CreateEndpoint response", response)

	return response, nil
}
示例#10
0
// Create creates a docker client based on the specified options.
func Create(c Options) (client.APIClient, error) {
	if c.Host == "" {
		if os.Getenv("DOCKER_API_VERSION") == "" {
			os.Setenv("DOCKER_API_VERSION", DefaultAPIVersion)
		}
		client, err := client.NewEnvClient()
		if err != nil {
			return nil, err
		}
		return client, nil
	}

	apiVersion := c.APIVersion
	if apiVersion == "" {
		apiVersion = DefaultAPIVersion
	}

	if c.TLSOptions.CAFile == "" {
		c.TLSOptions.CAFile = filepath.Join(dockerCertPath, defaultCaFile)
	}
	if c.TLSOptions.CertFile == "" {
		c.TLSOptions.CertFile = filepath.Join(dockerCertPath, defaultCertFile)
	}
	if c.TLSOptions.KeyFile == "" {
		c.TLSOptions.KeyFile = filepath.Join(dockerCertPath, defaultKeyFile)
	}
	if c.TrustKey == "" {
		c.TrustKey = filepath.Join(homedir.Get(), ".docker", defaultTrustKeyFile)
	}
	if c.TLSVerify {
		c.TLS = true
	}
	if c.TLS {
		c.TLSOptions.InsecureSkipVerify = !c.TLSVerify
	}

	var httpClient *http.Client
	if c.TLS {
		config, err := tlsconfig.Client(c.TLSOptions)
		if err != nil {
			return nil, err
		}
		tr := &http.Transport{
			TLSClientConfig: config,
		}
		proto, addr, _, err := client.ParseHost(c.Host)
		if err != nil {
			return nil, err
		}

		if err := sockets.ConfigureTransport(tr, proto, addr); err != nil {
			return nil, err
		}

		httpClient = &http.Client{
			Transport: tr,
		}
	}

	customHeaders := map[string]string{}
	customHeaders["User-Agent"] = fmt.Sprintf("Libcompose-Client/%s (%s)", version.VERSION, runtime.GOOS)

	client, err := client.NewClient(c.Host, apiVersion, httpClient, customHeaders)
	if err != nil {
		return nil, err
	}
	return client, nil
}