// ContainerInspect returns low-level information about a // container. Returns an error if the container cannot be found, or if // there is an error getting the data. func (c *Container) ContainerInspect(name string, size bool, version version.Version) (interface{}, error) { // Ignore version. We're supporting post-1.20 version. defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return nil, NotFoundError(name) } id := vc.ContainerID log.Debugf("Found %q in cache as %q", id, vc.ContainerID) client := c.containerProxy.Client() results, err := client.Containers.GetContainerInfo(containers.NewGetContainerInfoParamsWithContext(ctx).WithID(id)) if err != nil { switch err := err.(type) { case *containers.GetContainerInfoNotFound: cache.ContainerCache().DeleteContainer(id) return nil, NotFoundError(name) case *containers.GetContainerInfoInternalServerError: return nil, InternalServerError(err.Payload.Message) default: return nil, InternalServerError(err.Error()) } } var started time.Time var stopped time.Time if results.Payload.ProcessConfig.StartTime != nil && *results.Payload.ProcessConfig.StartTime > 0 { started = time.Unix(*results.Payload.ProcessConfig.StartTime, 0) } if results.Payload.ProcessConfig.StopTime != nil && *results.Payload.ProcessConfig.StopTime > 0 { stopped = time.Unix(*results.Payload.ProcessConfig.StopTime, 0) } // call to the dockerStatus function to retrieve the docker friendly exitCode exitCode, status := dockerStatus(int(*results.Payload.ProcessConfig.ExitCode), *results.Payload.ProcessConfig.Status, *results.Payload.ContainerConfig.State, started, stopped) // set the payload values exit := int32(exitCode) results.Payload.ProcessConfig.ExitCode = &exit results.Payload.ProcessConfig.Status = &status inspectJSON, err := ContainerInfoToDockerContainerInspect(vc, results.Payload, PortLayerName()) if err != nil { log.Errorf("containerInfoToDockerContainerInspect failed with %s", err) return nil, err } log.Debugf("ContainerInspect json config = %+v\n", inspectJSON.Config) if inspectJSON.NetworkSettings != nil { log.Debugf("Docker inspect - network settings = %#v", inspectJSON.NetworkSettings) } else { log.Debugf("Docker inspect - network settings = null") } return inspectJSON, nil }
// ContainerCreate creates a container. func (c *Container) ContainerCreate(config types.ContainerCreateConfig) (types.ContainerCreateResponse, error) { defer trace.End(trace.Begin("")) var err error // bail early if container name already exists if exists := cache.ContainerCache().GetContainer(config.Name); exists != nil { err := fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to re use that name.", config.Name, exists.ContainerID) log.Errorf("%s", err.Error()) return types.ContainerCreateResponse{}, derr.NewRequestConflictError(err) } // get the image from the cache image, err := cache.ImageCache().Get(config.Config.Image) if err != nil { // if no image found then error thrown and a pull // will be initiated by the docker client log.Errorf("ContainerCreate: image %s error: %s", config.Config.Image, err.Error()) return types.ContainerCreateResponse{}, derr.NewRequestNotFoundError(err) } setCreateConfigOptions(config.Config, image.Config) log.Debugf("config.Config = %+v", config.Config) if err = validateCreateConfig(&config); err != nil { return types.ContainerCreateResponse{}, err } // Create a container representation in the personality server. This representation // will be stored in the cache if create succeeds in the port layer. container, err := createInternalVicContainer(image, &config) if err != nil { return types.ContainerCreateResponse{}, err } // Create an actualized container in the VIC port layer id, err := c.containerCreate(container, config) if err != nil { return types.ContainerCreateResponse{}, err } // Container created ok, save the container id and save the config override from the API // caller and save this container internal representation in our personality server's cache copyConfigOverrides(container, config) container.ContainerID = id cache.ContainerCache().AddContainer(container) log.Debugf("Container create - name(%s), containerID(%s), config(%#v), host(%#v)", container.Name, container.ContainerID, container.Config, container.HostConfig) return types.ContainerCreateResponse{ID: id}, nil }
// ContainerLogs hooks up a container's stdout and stderr streams // configured with the given struct. func (c *Container) ContainerLogs(name string, config *backend.ContainerLogsConfig, started chan struct{}) error { defer trace.End(trace.Begin("")) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } name = vc.ContainerID tailLines, since, err := c.validateContainerLogsConfig(vc, config) if err != nil { return err } // Outstream modification (from Docker's code) so the stream is streamed with the // necessary headers that the CLI expects. This is Docker's scheme. wf := ioutils.NewWriteFlusher(config.OutStream) defer wf.Close() wf.Flush() outStream := io.Writer(wf) if !vc.Config.Tty { outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) } // Make a call to our proxy to handle the remoting err = c.containerProxy.StreamContainerLogs(name, outStream, started, config.Timestamps, config.Follow, since, tailLines) return err }
func (n *Network) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error { vc := cache.ContainerCache().GetContainer(containerName) if vc != nil { containerName = vc.ContainerID } return fmt.Errorf("%s does not implement network.DisconnectContainerFromNetwork", ProductName()) }
// syncContainerCache runs once at startup to populate the container cache func syncContainerCache() error { log.Debugf("Updating container cache") backend := NewContainerBackend() client := backend.containerProxy.Client() reqParams := containers.NewGetContainerListParamsWithContext(ctx).WithAll(swag.Bool(true)) containme, err := client.Containers.GetContainerList(reqParams) if err != nil { return errors.Errorf("Failed to retrieve container list from portlayer: %s", err) } log.Debugf("Found %d containers", len(containme.Payload)) cc := cache.ContainerCache() var errs []string for _, info := range containme.Payload { container := ContainerInfoToVicContainer(*info) cc.AddContainer(container) if err = setPortMapping(info, backend, container); err != nil { errs = append(errs, err.Error()) } } if len(errs) > 0 { return errors.Errorf("Failed to set port mapping: %s", strings.Join(errs, "\n")) } return nil }
func (m *MockContainerProxy) ContainerRunning(vc *viccontainer.VicContainer) (bool, error) { // Assume container is running if container in cache. If we need other conditions // in the future, we can add it, but for now, just assume running. container := cache.ContainerCache().GetContainer(vc.ContainerID) if container == nil { return false, nil } return true, nil }
// ContainerStop looks for the given container and terminates it, // waiting the given number of seconds before forcefully killing the // container. If a negative number of seconds is given, ContainerStop // will wait for a graceful termination. An error is returned if the // container is not found, is already stopped, or if there is a // problem stopping the container. func (c *Container) ContainerStop(name string, seconds int) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } return c.containerProxy.Stop(vc, name, seconds, true) }
func TestPortInformation(t *testing.T) { mockContainerInfo := &plmodels.ContainerInfo{} mockContainerConfig := &plmodels.ContainerConfig{} containerID := "foo" mockContainerConfig.ContainerID = &containerID mockHostConfig := &container.HostConfig{} portMap := nat.PortMap{} port, _ := nat.NewPort("tcp", "80") portBinding := nat.PortBinding{ HostIP: "127.0.0.1", HostPort: "8000", } portBindings := []nat.PortBinding{portBinding} portMap[port] = portBindings mockHostConfig.PortBindings = portMap mockContainerInfo.ContainerConfig = mockContainerConfig ip, _ := netlink.ParseAddr("192.168.1.1/24") ips := []netlink.Addr{*ip} co := viccontainer.NewVicContainer() co.HostConfig = mockHostConfig co.ContainerID = containerID co.Name = "bar" cache.ContainerCache().AddContainer(co) ports := portInformation(mockContainerInfo, ips) assert.NotEmpty(t, ports, "There should be bound IPs") assert.Equal(t, len(ports), 1, "Expected 1 port binding, found %d", len(ports)) port, _ = nat.NewPort("tcp", "80") portBinding = nat.PortBinding{ HostIP: "127.0.0.1", HostPort: "00", } portMap[port] = portBindings ports = portInformation(mockContainerInfo, ips) assert.NotEmpty(t, ports, "There should be 1 bound IP") assert.Equal(t, len(ports), 1, "Expected 1 port binding, found %d", len(ports)) port, _ = nat.NewPort("tcp", "800") portBinding = nat.PortBinding{ HostIP: "127.0.0.1", HostPort: "800", } portMap[port] = portBindings ports = portInformation(mockContainerInfo, ips) assert.Equal(t, len(ports), 2, "Expected 2 port binding, found %d", len(ports)) }
// ContainerRm removes the container id from the filesystem. An error // is returned if the container is not found, or if the remove // fails. If the remove succeeds, the container name is released, and // network links are removed. func (c *Container) ContainerRm(name string, config *types.ContainerRmConfig) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } id := vc.ContainerID // Get the portlayer Client API client := c.containerProxy.Client() // TODO: Pass this RemoveVolume flag to somewhere _ = &config.RemoveVolume // Use the force and stop the container first if config.ForceRemove { c.containerProxy.Stop(vc, name, 0, true) } //call the remove directly on the name. No need for using a handle. _, err := client.Containers.ContainerRemove(containers.NewContainerRemoveParamsWithContext(ctx).WithID(id)) if err != nil { switch err := err.(type) { case *containers.ContainerRemoveNotFound: cache.ContainerCache().DeleteContainer(id) return NotFoundError(name) case *containers.ContainerRemoveDefault: return InternalServerError(err.Payload.Message) case *containers.ContainerRemoveConflict: return derr.NewRequestConflictError(fmt.Errorf("You cannot remove a running container. Stop the container before attempting removal or use -f")) default: return InternalServerError(err.Error()) } } // delete container from the cache cache.ContainerCache().DeleteContainer(id) return nil }
// ContainerKill sends signal to the container // If no signal is given (sig 0), then Kill with SIGKILL and wait // for the container to exit. // If a signal is given, then just send it to the container and return. func (c *Container) ContainerKill(name string, sig uint64) error { defer trace.End(trace.Begin(fmt.Sprintf("%s, %d", name, sig))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } err := c.containerProxy.Signal(vc, sig) return err }
func AddMockContainerToCache() { AddMockImageToCache() image, err := cache.ImageCache().GetImage("e732471cb81a564575aad46b9510161c5945deaf18e9be3db344333d72f0b4b2") if err == nil { vc := viccontainer.NewVicContainer() vc.ImageID = image.ID vc.Config = image.Config //Set defaults. Overrides will get copied below. vc.Config.Tty = false vc.ContainerID = dummyContainerID cache.ContainerCache().AddContainer(vc) vc = viccontainer.NewVicContainer() vc.ImageID = image.ID vc.Config = image.Config vc.Config.Tty = true vc.ContainerID = dummyContainerID_tty cache.ContainerCache().AddContainer(vc) } }
func (c *Container) Handle(id, name string) (string, error) { resp, err := c.containerProxy.Client().Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(id)) if err != nil { switch err := err.(type) { case *containers.GetNotFound: cache.ContainerCache().DeleteContainer(id) return "", NotFoundError(name) case *containers.GetDefault: return "", InternalServerError(err.Payload.Message) default: return "", InternalServerError(err.Error()) } } return resp.Payload, nil }
// ContainerResize changes the size of the TTY of the process running // in the container with the given name to the given height and width. func (c *Container) ContainerResize(name string, height, width int) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } // Call the port layer to resize plHeight := int32(height) plWidth := int32(width) return c.containerProxy.Resize(vc, plHeight, plWidth) }
// returns port bindings as a slice of Docker Ports for return to the client // returns empty slice on error func portInformation(t *models.ContainerInfo, ips []netlink.Addr) []types.Port { // create a port for each IP on the interface (usually only 1, but could be more) // (works with both IPv4 and IPv6 addresses) var ports []types.Port container := cache.ContainerCache().GetContainer(*t.ContainerConfig.ContainerID) if container == nil { log.Errorf("Could not find container with ID %s", *t.ContainerConfig.ContainerID) return ports } for _, ip := range ips { ports = append(ports, types.Port{IP: ip.IP.String()}) } portBindings := container.HostConfig.PortBindings var resultPorts []types.Port var err error for _, port := range ports { for portBindingPrivatePort, hostPortBindings := range portBindings { portAndType := strings.SplitN(string(portBindingPrivatePort), "/", 2) port.PrivatePort, err = strconv.Atoi(portAndType[0]) if err != nil { log.Infof("Got an error trying to convert private port number to an int") continue } port.Type = portAndType[1] for i := 0; i < len(hostPortBindings); i++ { newport := port newport.PublicPort, err = strconv.Atoi(hostPortBindings[i].HostPort) if err != nil { log.Infof("Got an error trying to convert public port number to an int") continue } // sanity check -- sometimes these come back as 0 when no binding actually exists // that doesn't make sense, so in that case we don't want to report these bindings if newport.PublicPort != 0 && newport.PrivatePort != 0 { resultPorts = append(resultPorts, newport) } } } } return resultPorts }
// ContainerWait stops processing until the given container is // stopped. If the container is not found, an error is returned. On a // successful stop, the exit code of the container is returned. On a // timeout, an error is returned. If you want to wait forever, supply // a negative duration for the timeout. func (c *Container) ContainerWait(name string, timeout time.Duration) (int, error) { defer trace.End(trace.Begin(fmt.Sprintf("name(%s):timeout(%s)", name, timeout))) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return -1, NotFoundError(name) } processExitCode, processStatus, containerState, err := c.containerProxy.Wait(vc, timeout) if err != nil { return -1, err } // call to the dockerStatus function to retrieve the docker friendly exitCode // TODO: once started / finished time are available replace time.Now() exitCode, _ := dockerStatus(int(processExitCode), processStatus, containerState, time.Time{}, time.Time{}) return exitCode, nil }
// ContainerRestart stops and starts a container. It attempts to // gracefully stop the container within the given timeout, forcefully // stopping it if the timeout is exceeded. If given a negative // timeout, ContainerRestart will wait forever until a graceful // stop. Returns an error if the container cannot be found, or if // there is an underlying error at any stage of the restart. func (c *Container) ContainerRestart(name string, seconds int) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache ot get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } err := c.containerProxy.Stop(vc, name, seconds, false) if err != nil { return InternalServerError(fmt.Sprintf("Stop failed with: %s", err)) } err = c.containerStart(name, nil, false) if err != nil { return InternalServerError(fmt.Sprintf("Start failed with: %s", err)) } return nil }
// cleanupPortBindings gets port bindings for the container and // unmaps ports if the cVM that previously bound them isn't powered on func (c *Container) cleanupPortBindings(vc *viccontainer.VicContainer) error { for ctrPort, hostPorts := range vc.HostConfig.PortBindings { for _, hostPort := range hostPorts { hPort := hostPort.HostPort cbpLock.Lock() mappedCtr, mapped := containerByPort[hPort] cbpLock.Unlock() if !mapped { continue } log.Debugf("Container %q maps host port %s to container port %s", mappedCtr, hPort, ctrPort) // check state of the previously bound container with PL cc := cache.ContainerCache().GetContainer(mappedCtr) if cc == nil { return fmt.Errorf("Unable to find container %q in the cache, unable to get power state", mappedCtr) } running, err := c.containerProxy.IsRunning(cc) if err != nil { return fmt.Errorf("Failed to get container %q power state: %s", mappedCtr, err) } if running { log.Debugf("Running container %q still holds port %s", mappedCtr, hPort) continue } log.Debugf("Unmapping ports for powered off container %q", mappedCtr) err = UnmapPorts(cc.HostConfig) if err != nil { return fmt.Errorf("Failed to unmap host port %s for container %q: %s", hPort, mappedCtr, err) } } } return nil }
func (c *Container) containerStart(name string, hostConfig *containertypes.HostConfig, bind bool) error { var err error // Get an API client to the portlayer client := c.containerProxy.Client() // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } id := vc.ContainerID // handle legacy hostConfig if hostConfig != nil { // hostConfig exist for backwards compatibility. TODO: Figure out which parameters we // need to look at in hostConfig } else if vc != nil { hostConfig = vc.HostConfig } if vc != nil && hostConfig.NetworkMode.NetworkName() == "" { hostConfig.NetworkMode = vc.HostConfig.NetworkMode } // get a handle to the container handle, err := c.Handle(id, name) if err != nil { return err } var endpoints []*models.EndpointConfig // bind network if bind { var bindRes *scopes.BindContainerOK bindRes, err = client.Scopes.BindContainer(scopes.NewBindContainerParamsWithContext(ctx).WithHandle(handle)) if err != nil { switch err := err.(type) { case *scopes.BindContainerNotFound: cache.ContainerCache().DeleteContainer(id) return NotFoundError(name) case *scopes.BindContainerInternalServerError: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } handle = bindRes.Payload.Handle endpoints = bindRes.Payload.Endpoints // unbind in case we fail later defer func() { if err != nil { client.Scopes.UnbindContainer(scopes.NewUnbindContainerParamsWithContext(ctx).WithHandle(handle)) } }() // unmap ports that vc needs if they're not being used by previously mapped container err = c.cleanupPortBindings(vc) if err != nil { return err } } // change the state of the container // TODO: We need a resolved ID from the name var stateChangeRes *containers.StateChangeOK stateChangeRes, err = client.Containers.StateChange(containers.NewStateChangeParamsWithContext(ctx).WithHandle(handle).WithState("RUNNING")) if err != nil { switch err := err.(type) { case *containers.StateChangeNotFound: cache.ContainerCache().DeleteContainer(id) return NotFoundError(name) case *containers.StateChangeDefault: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } handle = stateChangeRes.Payload // map ports if bind { e := c.findPortBoundNetworkEndpoint(hostConfig, endpoints) if err = MapPorts(hostConfig, e, id); err != nil { return InternalServerError(fmt.Sprintf("error mapping ports: %s", err)) } defer func() { if err != nil { UnmapPorts(hostConfig) } }() } // commit the handle; this will reconfigure and start the vm _, err = client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(handle)) if err != nil { switch err := err.(type) { case *containers.CommitNotFound: cache.ContainerCache().DeleteContainer(id) return NotFoundError(name) case *containers.CommitConflict: return ConflictError(err.Error()) case *containers.CommitDefault: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } return nil }
func (n *Network) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *apinet.EndpointSettings) error { vc := cache.ContainerCache().GetContainer(containerName) if vc != nil { containerName = vc.ContainerID } client := PortLayerClient() getRes, err := client.Containers.Get(containers.NewGetParamsWithContext(ctx).WithID(containerName)) if err != nil { switch err := err.(type) { case *containers.GetNotFound: return derr.NewRequestNotFoundError(fmt.Errorf(err.Payload.Message)) case *containers.GetDefault: return derr.NewErrorWithStatusCode(fmt.Errorf(err.Payload.Message), http.StatusInternalServerError) default: return derr.NewErrorWithStatusCode(err, http.StatusInternalServerError) } } h := getRes.Payload nc := &models.NetworkConfig{NetworkName: networkName} if endpointConfig != nil { if endpointConfig.IPAMConfig != nil && endpointConfig.IPAMConfig.IPv4Address != "" { nc.Address = &endpointConfig.IPAMConfig.IPv4Address } // Pass Links and Aliases to PL nc.Aliases = vicendpoint.Alias(endpointConfig) } addConRes, err := client.Scopes.AddContainer(scopes.NewAddContainerParamsWithContext(ctx). WithScope(nc.NetworkName). WithConfig(&models.ScopesAddContainerConfig{ Handle: h, NetworkConfig: nc, })) if err != nil { switch err := err.(type) { case *scopes.AddContainerNotFound: return derr.NewRequestNotFoundError(fmt.Errorf(err.Payload.Message)) case *scopes.AddContainerInternalServerError: return derr.NewErrorWithStatusCode(fmt.Errorf(err.Payload.Message), http.StatusInternalServerError) default: return derr.NewErrorWithStatusCode(err, http.StatusInternalServerError) } } h = addConRes.Payload // only bind if the container is running // get the state of the container getStateRes, err := client.Containers.GetState(containers.NewGetStateParamsWithContext(ctx).WithHandle(h)) if err != nil { switch err := err.(type) { case *containers.GetStateNotFound: return derr.NewRequestNotFoundError(fmt.Errorf(err.Payload.Message)) case *containers.GetStateDefault: return derr.NewErrorWithStatusCode(fmt.Errorf(err.Payload.Message), http.StatusInternalServerError) default: return derr.NewErrorWithStatusCode(err, http.StatusInternalServerError) } } h = getStateRes.Payload.Handle if getStateRes.Payload.State == "RUNNING" { bindRes, err := client.Scopes.BindContainer(scopes.NewBindContainerParamsWithContext(ctx).WithHandle(h)) if err != nil { switch err := err.(type) { case *scopes.BindContainerNotFound: return derr.NewRequestNotFoundError(fmt.Errorf(err.Payload.Message)) case *scopes.BindContainerInternalServerError: return derr.NewErrorWithStatusCode(fmt.Errorf(err.Payload.Message), http.StatusInternalServerError) default: return derr.NewErrorWithStatusCode(err, http.StatusInternalServerError) } } defer func() { if err == nil { return } if _, err2 := client.Scopes.UnbindContainer(scopes.NewUnbindContainerParamsWithContext(ctx).WithHandle(h)); err2 != nil { log.Warnf("failed bind container rollback: %s", err2) } }() h = bindRes.Payload.Handle } // commit handle _, err = client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(h)) if err != nil { switch err := err.(type) { case *containers.CommitNotFound: return derr.NewRequestNotFoundError(fmt.Errorf(err.Payload.Message)) case *containers.CommitDefault: return derr.NewErrorWithStatusCode(fmt.Errorf(err.Payload.Message), http.StatusInternalServerError) default: return derr.NewErrorWithStatusCode(err, http.StatusInternalServerError) } } return nil }
// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. func (c *Container) ContainerAttach(name string, ca *backend.ContainerAttachConfig) error { defer trace.End(trace.Begin(name)) // Look up the container name in the metadata cache to get long ID vc := cache.ContainerCache().GetContainer(name) if vc == nil { return NotFoundError(name) } id := vc.ContainerID client := c.containerProxy.Client() handle, err := c.Handle(id, name) if err != nil { return err } bind, err := client.Interaction.InteractionBind(interaction.NewInteractionBindParamsWithContext(ctx). WithConfig(&models.InteractionBindConfig{ Handle: handle, })) if err != nil { return InternalServerError(err.Error()) } handle, ok := bind.Payload.Handle.(string) if !ok { return InternalServerError(fmt.Sprintf("Type assertion failed for %#+v", handle)) } // commit the handle; this will reconfigure the vm _, err = client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(handle)) if err != nil { switch err := err.(type) { case *containers.CommitNotFound: return NotFoundError(name) case *containers.CommitConflict: return ConflictError(err.Error()) case *containers.CommitDefault: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } clStdin, clStdout, clStderr, err := ca.GetStreams() if err != nil { return InternalServerError("Unable to get stdio streams for calling client") } defer clStdin.Close() if !vc.Config.Tty && ca.MuxStreams { // replace the stdout/stderr with Docker's multiplex stream if ca.UseStdout { clStderr = stdcopy.NewStdWriter(clStderr, stdcopy.Stderr) } if ca.UseStderr { clStdout = stdcopy.NewStdWriter(clStdout, stdcopy.Stdout) } } err = c.containerProxy.AttachStreams(context.Background(), vc, clStdin, clStdout, clStderr, ca) if err != nil { if _, ok := err.(DetachError); ok { log.Infof("Detach detected, tearing down connection") client = c.containerProxy.Client() handle, err = c.Handle(id, name) if err != nil { return err } unbind, err := client.Interaction.InteractionUnbind(interaction.NewInteractionUnbindParamsWithContext(ctx). WithConfig(&models.InteractionUnbindConfig{ Handle: handle, })) if err != nil { return InternalServerError(err.Error()) } handle, ok = unbind.Payload.Handle.(string) if !ok { return InternalServerError("type assertion failed") } // commit the handle; this will reconfigure the vm _, err = client.Containers.Commit(containers.NewCommitParamsWithContext(ctx).WithHandle(handle)) if err != nil { switch err := err.(type) { case *containers.CommitNotFound: return NotFoundError(name) case *containers.CommitDefault: return InternalServerError(err.Payload.Message) default: return InternalServerError(err.Error()) } } } return err } return nil }