// TODO: should take a writer, not []byte func (writer *NsenterWriter) WriteFile(filename string, data []byte, perm os.FileMode) error { cmd := "nsenter" base_args := []string{ "--mount=/rootfs/proc/1/ns/mnt", "--", } echo_args := append(base_args, "sh", "-c", fmt.Sprintf("cat > %s", filename)) glog.V(5).Infof("Command to write data to file: %v %v", cmd, echo_args) command := exec.Command(cmd, echo_args...) command.Stdin = bytes.NewBuffer(data) outputBytes, err := command.CombinedOutput() if err != nil { glog.Errorf("Output from writing to %q: %v", filename, string(outputBytes)) return err } chmod_args := append(base_args, "chmod", fmt.Sprintf("%o", perm), filename) glog.V(5).Infof("Command to change permissions to file: %v %v", cmd, chmod_args) outputBytes, err = exec.Command(cmd, chmod_args...).CombinedOutput() if err != nil { glog.Errorf("Output from chmod command: %v", string(outputBytes)) return err } return nil }
// Checks if iptables version has a "wait" flag func getIptablesWaitFlag(vstring string) []string { version, err := semver.NewVersion(vstring) if err != nil { glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return nil } minVersion, err := semver.NewVersion(MinWaitVersion) if err != nil { glog.Errorf("MinWaitVersion (%s) is not a valid version string: %v", MinWaitVersion, err) return nil } if version.LessThan(*minVersion) { return nil } minVersion, err = semver.NewVersion(MinWait2Version) if err != nil { glog.Errorf("MinWait2Version (%s) is not a valid version string: %v", MinWait2Version, err) return nil } if version.LessThan(*minVersion) { return []string{"-w"} } else { return []string{"-w2"} } }
// handle implements a websocket handler. func (conn *Conn) handle(ws *websocket.Conn) { defer conn.Close() conn.initialize(ws) for { conn.resetTimeout() var data []byte if err := websocket.Message.Receive(ws, &data); err != nil { if err != io.EOF { glog.Errorf("Error on socket receive: %v", err) } break } if len(data) == 0 { continue } channel := data[0] if conn.codec == base64Codec { channel = channel - '0' } data = data[1:] if int(channel) >= len(conn.channels) { glog.V(6).Infof("Frame is targeted for a reader %d that is not valid, possible protocol error", channel) continue } if _, err := conn.channels[channel].DataFromSocket(data); err != nil { glog.Errorf("Unable to write frame to %d: %v\n%s", channel, err, string(data)) continue } } }
// IsLikelyNotMountPoint determines whether a path is a mountpoint by calling findmnt // in the host's root mount namespace. func (n *NsenterMounter) IsLikelyNotMountPoint(file string) (bool, error) { file, err := filepath.Abs(file) if err != nil { return true, err } args := []string{"--mount=/rootfs/proc/1/ns/mnt", "--", n.absHostPath("findmnt"), "-o", "target", "--noheadings", "--target", file} glog.V(5).Infof("findmnt command: %v %v", nsenterPath, args) exec := exec.New() out, err := exec.Command(nsenterPath, args...).CombinedOutput() if err != nil { glog.Errorf("Failed to nsenter mount, return file doesn't exist: %v", err) // If the command itself is correct, then if we encountered error // then most likely this means that the directory does not exist. return true, os.ErrNotExist } strOut := strings.TrimSuffix(string(out), "\n") glog.V(5).Infof("IsLikelyNotMountPoint findmnt output: %v", strOut) if strOut == file { return false, nil } return true, nil }
// receive reads result from the decoder in a loop and sends down the result channel. func (sw *StreamWatcher) receive() { defer close(sw.result) defer sw.Stop() defer util.HandleCrash() for { action, obj, err := sw.source.Decode() if err != nil { // Ignore expected error. if sw.stopping() { return } switch err { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: msg := "Unable to decode an event from the watch stream: %v" if util.IsProbableEOF(err) { glog.V(5).Infof(msg, err) } else { glog.Errorf(msg, err) } } return } sw.result <- Event{ Type: action, Object: obj, } } }
// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName. // Keeps trying to write until the process list of the cgroup stabilizes, or until maxTries tries. func (oomAdjuster *OOMAdjuster) applyOOMScoreAdjContainer(cgroupName string, oomScoreAdj, maxTries int) error { adjustedProcessSet := make(map[int]bool) for i := 0; i < maxTries; i++ { continueAdjusting := false pidList, err := oomAdjuster.pidLister(cgroupName) if err != nil { continueAdjusting = true glog.Errorf("Error getting process list for cgroup %s: %+v", cgroupName, err) } else if len(pidList) == 0 { continueAdjusting = true } else { for _, pid := range pidList { if !adjustedProcessSet[pid] { continueAdjusting = true if err = oomAdjuster.ApplyOOMScoreAdj(pid, oomScoreAdj); err == nil { adjustedProcessSet[pid] = true } } } } if !continueAdjusting { return nil } // There's a slight race. A process might have forked just before we write its OOM score adjust. // The fork might copy the parent process's old OOM score, then this function might execute and // update the parent's OOM score, but the forked process id might not be reflected in cgroup.procs // for a short amount of time. So this function might return without changing the forked process's // OOM score. Very unlikely race, so ignoring this for now. } return fmt.Errorf("exceeded maxTries, some processes might not have desired OOM score") }
// InClusterConfig returns a config object which uses the service account // kubernetes gives to pods. It's intended for clients that expect to be // running inside a pod running on kuberenetes. It will return an error if // called from a process not running in a kubernetes environment. func InClusterConfig() (*Config, error) { host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") if len(host) == 0 || len(port) == 0 { return nil, fmt.Errorf("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") } token, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountTokenKey) if err != nil { return nil, err } tlsClientConfig := TLSClientConfig{} rootCAFile := "/var/run/secrets/kubernetes.io/serviceaccount/" + api.ServiceAccountRootCAKey if _, err := util.CertPoolFromFile(rootCAFile); err != nil { glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } return &Config{ // TODO: switch to using cluster DNS. Host: "https://" + net.JoinHostPort(host, port), BearerToken: string(token), TLSClientConfig: tlsClientConfig, }, nil }
// Checks if iptables has the "-C" flag func getIptablesHasCheckCommand(vstring string) bool { minVersion, err := semver.NewVersion(MinCheckVersion) if err != nil { glog.Errorf("MinCheckVersion (%s) is not a valid version string: %v", MinCheckVersion, err) return true } version, err := semver.NewVersion(vstring) if err != nil { glog.Errorf("vstring (%s) is not a valid version string: %v", vstring, err) return true } if version.LessThan(*minVersion) { return false } return true }
// rewriteResponse modifies an HTML response by updating absolute links referring // to the original host to instead refer to the proxy transport. func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) { origBody := resp.Body defer origBody.Close() newContent := &bytes.Buffer{} var reader io.Reader = origBody var writer io.Writer = newContent encoding := resp.Header.Get("Content-Encoding") switch encoding { case "gzip": var err error reader, err = gzip.NewReader(reader) if err != nil { return nil, fmt.Errorf("errorf making gzip reader: %v", err) } gzw := gzip.NewWriter(writer) defer gzw.Close() writer = gzw // TODO: support flate, other encodings. case "": // This is fine default: // Some encoding we don't understand-- don't try to parse this glog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) return resp, nil } urlRewriter := func(targetUrl string) string { return t.rewriteURL(targetUrl, req.URL) } err := rewriteHTML(reader, writer, urlRewriter) if err != nil { glog.Errorf("Failed to rewrite URLs: %v", err) return resp, err } resp.Body = ioutil.NopCloser(newContent) // Update header node with new content-length // TODO: Remove any hash/signature headers here? resp.Header.Del("Content-Length") resp.ContentLength = int64(newContent.Len()) return resp, err }
// logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { callers := "" for i := 0; true; i++ { _, file, line, ok := runtime.Caller(i) if !ok { break } callers = callers + fmt.Sprintf("%v:%v\n", file, line) } glog.Errorf("Recovered from panic: %#v (%v)\n%v", r, r, callers) }
// Close asynchronously closes all tunnels in the list after waiting for 1 // minute. Tunnels will still be open upon this function's return, but should // no longer be used. func (l *SSHTunnelList) Close() { for ix := range l.entries { entry := l.entries[ix] go func() { defer HandleCrash() time.Sleep(1 * time.Minute) if err := entry.Tunnel.Close(); err != nil { glog.Errorf("Failed to close tunnel %v: %v", entry, err) } }() } }
func (realExiter) Exitf(format string, args ...interface{}) { func() { defer func() { // Let's just be extra sure we die, even if Exitf panics if r := recover(); r != nil { glog.Errorf(format, args...) os.Exit(2) } }() glog.Exitf(format, args...) }() }
func MakeSSHTunnels(user, keyfile string, addresses []string) *SSHTunnelList { tunnels := []SSHTunnelEntry{} for ix := range addresses { addr := addresses[ix] tunnel, err := NewSSHTunnel(user, keyfile, addr) if err != nil { glog.Errorf("Failed to create tunnel for %q: %v", addr, err) continue } tunnels = append(tunnels, SSHTunnelEntry{addr, tunnel}) } return &SSHTunnelList{tunnels} }
// Open attempts to open all tunnels in the list, and removes any tunnels that // failed to open. func (l *SSHTunnelList) Open() error { var openTunnels []SSHTunnelEntry for ix := range l.entries { if err := l.entries[ix].Tunnel.Open(); err != nil { glog.Errorf("Failed to open tunnel %v: %v", l.entries[ix], err) } else { openTunnels = append(openTunnels, l.entries[ix]) } } l.entries = openTunnels if len(l.entries) == 0 { return errors.New("Failed to open any tunnels.") } return nil }
func (e *streamProtocolV1) stream(conn httpstream.Connection) error { doneChan := make(chan struct{}, 2) errorChan := make(chan error) cp := func(s string, dst io.Writer, src io.Reader) { glog.V(6).Infof("Copying %s", s) defer glog.V(6).Infof("Done copying %s", s) if _, err := io.Copy(dst, src); err != nil && err != io.EOF { glog.Errorf("Error copying %s: %v", s, err) } if s == api.StreamTypeStdout || s == api.StreamTypeStderr { doneChan <- struct{}{} } } var ( err error errorStream, remoteStdin, remoteStdout, remoteStderr httpstream.Stream ) // set up all the streams first headers := http.Header{} headers.Set(api.StreamType, api.StreamTypeError) errorStream, err = conn.CreateStream(headers) if err != nil { return err } defer errorStream.Reset() // Create all the streams first, then start the copy goroutines. The server doesn't start its copy // goroutines until it's received all of the streams. If the client creates the stdin stream and // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't // getting processed because the server hasn't started its copying, and it won't do that until it // gets all the streams. By creating all the streams first, we ensure that the server is ready to // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info. if e.stdin != nil { headers.Set(api.StreamType, api.StreamTypeStdin) remoteStdin, err = conn.CreateStream(headers) if err != nil { return err } defer remoteStdin.Reset() } if e.stdout != nil { headers.Set(api.StreamType, api.StreamTypeStdout) remoteStdout, err = conn.CreateStream(headers) if err != nil { return err } defer remoteStdout.Reset() } if e.stderr != nil && !e.tty { headers.Set(api.StreamType, api.StreamTypeStderr) remoteStderr, err = conn.CreateStream(headers) if err != nil { return err } defer remoteStderr.Reset() } // now that all the streams have been created, proceed with reading & copying // always read from errorStream go func() { message, err := ioutil.ReadAll(errorStream) if err != nil && err != io.EOF { errorChan <- fmt.Errorf("Error reading from error stream: %s", err) return } if len(message) > 0 { errorChan <- fmt.Errorf("Error executing remote command: %s", message) return } }() if e.stdin != nil { // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) // because stdin is not closed until the process exits. If we try to call // stdin.Close(), it returns no error but doesn't unblock the copy. It will // exit when the process exits, instead. go cp(api.StreamTypeStdin, remoteStdin, e.stdin) } waitCount := 0 completedStreams := 0 if e.stdout != nil { waitCount++ go cp(api.StreamTypeStdout, e.stdout, remoteStdout) } if e.stderr != nil && !e.tty { waitCount++ go cp(api.StreamTypeStderr, e.stderr, remoteStderr) } Loop: for { select { case <-doneChan: completedStreams++ if completedStreams == waitCount { break Loop } case err := <-errorChan: return err } } return nil }
func (s *SSHTunnel) copyBytes(out io.Writer, in io.Reader) { if _, err := io.Copy(out, in); err != nil { glog.Errorf("Error in SSH tunnel: %v", err) } }