func (r *Reporter) execHost(req xfer.Request) xfer.Response { cmd := exec.Command(r.hostShellCmd[0], r.hostShellCmd[1:]...) cmd.Env = []string{"TERM=xterm"} ptyPipe, err := pty.Start(cmd) if err != nil { return xfer.ResponseError(err) } id, pipe, err := controls.NewPipeFromEnds(nil, ptyPipe, r.pipes, req.AppID) if err != nil { return xfer.ResponseError(err) } pipe.OnClose(func() { if err := cmd.Process.Kill(); err != nil { log.Errorf("Error stopping host shell: %v", err) } if err := ptyPipe.Close(); err != nil { log.Errorf("Error closing host shell's pty: %v", err) } log.Info("Host shell closed.") }) go func() { if err := cmd.Wait(); err != nil { log.Errorf("Error waiting on host shell: %v", err) } pipe.Close() }() return xfer.Response{ Pipe: id, RawTTY: true, } }
func (r *registry) execContainer(containerID string, req xfer.Request) xfer.Response { exec, err := r.client.CreateExec(docker_client.CreateExecOptions{ AttachStdin: true, AttachStdout: true, AttachStderr: true, Tty: true, Cmd: []string{"/bin/sh", "-c", "TERM=xterm exec $( (type getent > /dev/null 2>&1 && getent passwd root | cut -d: -f7 2>/dev/null) || echo /bin/sh)"}, Container: containerID, }) if err != nil { return xfer.ResponseError(err) } id, pipe, err := controls.NewPipe(r.pipes, req.AppID) if err != nil { return xfer.ResponseError(err) } local, _ := pipe.Ends() cw, err := r.client.StartExecNonBlocking(exec.ID, docker_client.StartExecOptions{ Tty: true, RawTerminal: true, InputStream: local, OutputStream: local, ErrorStream: local, }) if err != nil { return xfer.ResponseError(err) } pipe.OnClose(func() { if err := cw.Close(); err != nil { log.Errorf("Error closing exec: %v", err) return } log.Infof("Exec on container %s closed.", containerID) }) go func() { if err := cw.Wait(); err != nil { log.Errorf("Error waiting on exec: %v", err) } pipe.Close() }() return xfer.Response{ Pipe: id, RawTTY: true, } }
func (r *Reporter) deletePod(req xfer.Request, namespaceID, podID string) xfer.Response { if err := r.client.DeletePod(namespaceID, podID); err != nil { return xfer.ResponseError(err) } return xfer.Response{ RemovedNode: req.NodeID, } }
func (r *registry) attachContainer(containerID string, req xfer.Request) xfer.Response { c, ok := r.GetContainer(containerID) if !ok { return xfer.ResponseErrorf("Not found: %s", containerID) } hasTTY := c.HasTTY() id, pipe, err := controls.NewPipe(r.pipes, req.AppID) if err != nil { return xfer.ResponseError(err) } local, _ := pipe.Ends() cw, err := r.client.AttachToContainerNonBlocking(docker_client.AttachToContainerOptions{ Container: containerID, RawTerminal: hasTTY, Stream: true, Stdin: true, Stdout: true, Stderr: true, InputStream: local, OutputStream: local, ErrorStream: local, }) if err != nil { return xfer.ResponseError(err) } pipe.OnClose(func() { if err := cw.Close(); err != nil { log.Errorf("Error closing attachment: %v", err) return } log.Infof("Attachment to container %s closed.", containerID) }) go func() { if err := cw.Wait(); err != nil { log.Errorf("Error waiting on exec: %v", err) } pipe.Close() }() return xfer.Response{ Pipe: id, RawTTY: hasTTY, } }
func (r *registry) removeContainer(containerID string, req xfer.Request) xfer.Response { log.Infof("Removing container %s", containerID) if err := r.client.RemoveContainer(docker_client.RemoveContainerOptions{ ID: containerID, }); err != nil { return xfer.ResponseError(err) } return xfer.Response{ RemovedNode: req.NodeID, } }
// GetLogs is the control to get the logs for a kubernetes pod func (r *Reporter) GetLogs(req xfer.Request, namespaceID, podID string) xfer.Response { readCloser, err := r.client.GetLogs(namespaceID, podID) if err != nil { return xfer.ResponseError(err) } readWriter := struct { io.Reader io.Writer }{ readCloser, ioutil.Discard, } id, pipe, err := controls.NewPipeFromEnds(nil, readWriter, r.pipes, req.AppID) if err != nil { return xfer.ResponseError(err) } pipe.OnClose(func() { readCloser.Close() }) return xfer.Response{ Pipe: id, } }
// handleProbeWS accepts websocket connections from the probe and registers // them in the control router, such that HandleControl calls can find them. func handleProbeWS(cr ControlRouter) CtxHandlerFunc { return func(ctx context.Context, w http.ResponseWriter, r *http.Request) { probeID := r.Header.Get(xfer.ScopeProbeIDHeader) if probeID == "" { respondWith(w, http.StatusBadRequest, xfer.ScopeProbeIDHeader) return } conn, err := xfer.Upgrade(w, r, nil) if err != nil { log.Printf("Error upgrading control websocket: %v", err) return } defer conn.Close() codec := xfer.NewJSONWebsocketCodec(conn) client := rpc.NewClientWithCodec(codec) defer client.Close() id, err := cr.Register(ctx, probeID, func(req xfer.Request) xfer.Response { var res xfer.Response if err := client.Call("control.Handle", req, &res); err != nil { return xfer.ResponseError(err) } return res }) if err != nil { respondWith(w, http.StatusBadRequest, err.Error()) return } defer cr.Deregister(ctx, probeID, id) if err := codec.WaitForReadError(); err != nil && err != io.EOF && !xfer.IsExpectedWSCloseError(err) { log.Errorf("Error on websocket: %v", err) } } }
func (r *registry) unpauseContainer(containerID string, _ xfer.Request) xfer.Response { log.Infof("Unpausing container %s", containerID) return xfer.ResponseError(r.client.UnpauseContainer(containerID)) }
func (r *registry) restartContainer(containerID string, _ xfer.Request) xfer.Response { log.Infof("Restarting container %s", containerID) return xfer.ResponseError(r.client.RestartContainer(containerID, waitTime)) }
// ScaleDown is the control to scale up a deployment func (r *Reporter) ScaleDown(req xfer.Request, resource, namespace, id string) xfer.Response { return xfer.ResponseError(r.client.ScaleDown(resource, namespace, id)) }