func (e *Exporter) exportImage(name string, artifact *ct.Artifact) error { log := e.log.New("name", name) for _, rootfs := range artifact.Manifest().Rootfs { for _, layer := range rootfs.Layers { log.Info("exporting layer", "id", layer.ID) if err := e.exportLayer(layer); err != nil { log.Error("error exporting layer", "id", layer.ID, "err", err) return err } } } path := e.imagePath(artifact.Manifest().ID()) if _, err := os.Stat(path); err == nil { log.Info("manifest already exists") return nil } log.Info("writing image manifest", "path", path) if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { log.Error("error writing image manifest", "path", path, "err", err) return err } if err := ioutil.WriteFile(path, artifact.RawManifest, 0644); err != nil { log.Error("error writing image manifest", "path", path, "err", err) return err } return nil }
func ServeRequest(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "Method not allowed", 405) return } if r.Header.Get("Origin") != "http://"+r.Host { http.Error(w, "Origin not allowed", 403) return } ws, err := websocket.Upgrade(w, r.Header, nil, 1024, 1024) if _, ok := err.(websocket.HandshakeError); ok { http.Error(w, "Not a websocket handshake", 400) return } else if err != nil { log.Println(err) return } tid, err := uuid.NewV4() if err != nil { log.Error("could not create sessionid") return } id := tid.String() c := &connection{SessionId: id, send: make(chan []byte, 256), ws: ws} h.register <- c go c.writePump() c.readPump() }
func main() { log := upto.New("") log.Error("123") test() }
func testStackFrames(t *testing.T, skip int, mustHave, mustNotHave []string) { buf := new(bytes.Buffer) w := Redirect(buf) defer Redirect(w) log := NewLogger(SkipStackFrames(skip)) log.Error("foo") log.Fatal("bar") mustNotHave = append(mustNotHave, []string{ "goroutine", "/say.go:", }...) got := buf.String() for _, s := range mustHave { if !strings.Contains(got, s) { t.Errorf("%q does not appear in the stack frames (skip=%d):\n%s", s, skip, got) } } for _, s := range mustNotHave { if strings.Contains(got, s) { t.Errorf("%q should not appear in the stack frames (skip=%d):\n%s", s, skip, got) } } }
func TestNewLogger(t *testing.T) { expect(t, func() { SetData("foo", "bar") log := NewLogger() log.Error("oops") }, []string{ `ERROR oops | foo="bar"`, }) }
func export(args *docopt.Args) { log := log15.New() log.Info("decoding manifest") var manifest map[string]*ct.Artifact if err := cliutil.DecodeJSONArg(args.String["<manifest>"], &manifest); err != nil { log.Error("error decoding manifest", "err", err) os.Exit(1) } exporter := Exporter{ dir: args.String["<dir>"], log: log15.New(), } log.Info(fmt.Sprintf("exporting %d images to %s", len(manifest), exporter.dir)) if err := exporter.Export(manifest); err != nil { log.Error("error exporting images", "err", err) os.Exit(1) } }
func main() { f, err := os.OpenFile("dclog.txt", os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0777) defer f.Close() //log.SetOutput(f) log.SetOutput(io.MultiWriter(f, os.Stdout)) log.SetFlags(log.LstdFlags | log.Lshortfile) txType := "ChangeHost" txTime := "1427383713" userId := []byte("2") var blockId int64 = 1288 var txSlice [][]byte // hash txSlice = append(txSlice, []byte("22cb812e53e22ee539af4a1d39b4596d")) // type txSlice = append(txSlice, utils.Int64ToByte(utils.TypeInt(txType))) // time txSlice = append(txSlice, []byte(txTime)) // user_id txSlice = append(txSlice, userId) // promised_amount_id txSlice = append(txSlice, []byte(`http://sffs.ru/`)) // sign txSlice = append(txSlice, []byte("11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")) blockData := new(utils.BlockData) blockData.BlockId = blockId blockData.Time = utils.StrToInt64(txTime) blockData.UserId = utils.BytesToInt64(userId) dir, err := utils.GetCurrentDir() if err != nil { log.Error("%v", utils.ErrInfo(err)) } configIni_, err := config.NewConfig("ini", dir+"/config.ini") if err != nil { fmt.Println(err) } configIni, err := configIni_.GetSection("default") db := utils.DbConnect(configIni) // делаем снимок БД в виде хэшей до начала тестов hashesStart, err := tests_utils.AllHashes(db) err = tests_utils.MakeTest(txSlice, blockData, txType, hashesStart, db, "work_and_rollback") if err != nil { fmt.Println(err) } }
func main() { conf := &terraformer.Config{} mc := multiconfig.New() mc.Loader = multiconfig.MultiLoader( &multiconfig.TagLoader{}, &multiconfig.EnvironmentLoader{}, &multiconfig.EnvironmentLoader{Prefix: "KONFIG_TERRAFORMER"}, &multiconfig.FlagLoader{}, ) mc.MustLoad(conf) if !conf.TerraformDebug { // hashicorp.terraform outputs many logs, discard them log.SetOutput(ioutil.Discard) } log := logging.NewCustom(terraformer.Name, conf.Debug) // init terraformer t, err := terraformer.New(conf, log) if err != nil { log.Fatal(err.Error()) } k, err := terraformer.NewKite(t, conf) if err != nil { log.Fatal(err.Error()) } if err := k.RegisterForever(k.RegisterURL(true)); err != nil { log.Fatal(err.Error()) } go k.Run() <-k.ServerReadyNotify() log.Debug("Kite Started Listening") // terraformer can only be closed with signals, wait for any signal if err := t.Wait(); err != nil { log.Error("Err after waiting terraformer %s", err) } k.Close() }
func cli_db(fn func(*Db) error) func(*cli.Context) { return func(c *cli.Context) { env := cmd.Env(c) log := logging.Open(env) db, err := Open(env) if err == nil { db.Logger = log err = fn(db) } if err == nil { log.Info("Done!") } else { log.Error(err.Error()) } } }
// readPump pumps messages from the websocket connection to the hub. func (c *connection) readPump() { defer func() { h.unregister <- c c.ws.Close() }() c.ws.SetReadLimit(maxMessageSize) c.ws.SetReadDeadline(time.Now().Add(readWait)) welcomeMsg := &WelcomeMsg{} welcomeb, err := welcomeMsg.Marshal(c.SessionId) if err != nil { log.Error("could not create welcome message: %s", err) return } c.send <- welcomeb for { op, r, err := c.ws.NextReader() if err != nil { break } switch op { case websocket.OpPong: c.ws.SetReadDeadline(time.Now().Add(readWait)) case websocket.OpText: message, err := ioutil.ReadAll(r) if err != nil { break } var data []interface{} err := json.Unmarshal(message, &data) if err != nil { break } c.handleMessage(data) } } }
// Run as pid 1 and monitor the contained process to return its exit code. func containerInitApp(c *Config, logFile *os.File) error { log := logger.New("fn", "containerInitApp") init := newContainerInit(c, logFile) log.Info("registering RPC server") if err := rpcplus.Register(init); err != nil { log.Error("error registering RPC server", "err", err) return err } init.mtx.Lock() defer init.mtx.Unlock() // Prepare the cmd based on the given args // If this fails we report that below cmdPath, cmdErr := getCmdPath(c) cmd := exec.Command(cmdPath, c.Args[1:]...) cmd.Dir = c.WorkDir cmd.Env = make([]string, 0, len(c.Env)) for k, v := range c.Env { cmd.Env = append(cmd.Env, k+"="+v) } // App runs in its own session cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true} // Console setup. Hook up the container app's stdin/stdout/stderr to // either a pty or pipes. The FDs for the controlling side of the // pty/pipes will be passed to flynn-host later via a UNIX socket. if c.TTY { log.Info("creating PTY") ptyMaster, ptySlave, err := pty.Open() if err != nil { log.Info("error creating PTY", "err", err) return err } init.ptyMaster = ptyMaster cmd.Stdout = ptySlave cmd.Stderr = ptySlave if c.OpenStdin { log.Info("attaching stdin to PTY") cmd.Stdin = ptySlave cmd.SysProcAttr.Setctty = true } } else { // we use syscall.Socketpair (rather than cmd.StdoutPipe) to make it easier // for flynn-host to do non-blocking I/O (via net.FileConn) so that no // read(2) calls can succeed after closing the logs during an update. log.Info("creating stdout pipe") var err error cmd.Stdout, init.stdout, err = newSocketPair("stdout") if err != nil { log.Error("error creating stdout pipe", "err", err) return err } log.Info("creating stderr pipe") cmd.Stderr, init.stderr, err = newSocketPair("stderr") if err != nil { log.Error("error creating stderr pipe", "err", err) return err } log.Info("creating FD proxies") if err := createFDProxies(cmd); err != nil { log.Error("error creating FD proxies", "err", err) return err } if c.OpenStdin { // Can't use cmd.StdinPipe() here, since in Go 1.2 it // returns an io.WriteCloser with the underlying object // being an *exec.closeOnce, neither of which provides // a way to convert to an FD. log.Info("creating stdin pipe") pipeRead, pipeWrite, err := os.Pipe() if err != nil { log.Error("creating stdin pipe", "err", err) return err } cmd.Stdin = pipeRead init.stdin = pipeWrite } } go runRPCServer() // Wait for flynn-host to tell us to start init.mtx.Unlock() // Allow calls log.Info("waiting to be resumed") <-init.resume log.Info("resuming") init.mtx.Lock() if cmdErr != nil { log.Error("command failed", "err", cmdErr) init.changeState(StateFailed, cmdErr.Error(), -1) init.exit(1) } // Container setup log.Info("setting up the container") if err := setupCommon(c, log); err != nil { log.Error("error setting up the container", "err", err) init.changeState(StateFailed, err.Error(), -1) init.exit(1) } // Start the app log.Info("starting the command") if err := cmd.Start(); err != nil { log.Error("error starting the command", "err", err) init.changeState(StateFailed, err.Error(), -1) init.exit(1) } log.Info("setting state to running") init.process = cmd.Process init.changeState(StateRunning, "", -1) init.mtx.Unlock() // Allow calls // monitor services hbs := make([]discoverd.Heartbeater, 0, len(c.Ports)) for _, port := range c.Ports { if port.Service == nil { continue } log = log.New("service", port.Service.Name, "port", port.Port, "proto", port.Proto) log.Info("monitoring service") hb, err := monitor(port, init, c.Env, log) if err != nil { log.Error("error monitoring service", "err", err) os.Exit(70) } hbs = append(hbs, hb) } exitCode := babySit(init.process) log.Info("command exited", "status", exitCode) init.mtx.Lock() for _, hb := range hbs { hb.Close() } init.changeState(StateExited, "", exitCode) init.mtx.Unlock() // Allow calls log.Info("exiting") init.exit(exitCode) return nil }
func runDaemon(args *docopt.Args) { hostname, _ := os.Hostname() httpPort := args.String["--http-port"] externalIP := args.String["--external-ip"] listenIP := args.String["--listen-ip"] stateFile := args.String["--state"] hostID := args.String["--id"] tags := parseTagArgs(args.String["--tags"]) force := args.Bool["--force"] volPath := args.String["--volpath"] volProvider := args.String["--vol-provider"] backendName := args.String["--backend"] flynnInit := args.String["--flynn-init"] logDir := args.String["--log-dir"] discoveryToken := args.String["--discovery"] bridgeName := args.String["--bridge-name"] logger, err := setupLogger(logDir) if err != nil { shutdown.Fatalf("error setting up logger: %s", err) } var peerIPs []string if args.String["--peer-ips"] != "" { peerIPs = strings.Split(args.String["--peer-ips"], ",") } if hostID == "" { hostID = strings.Replace(hostname, "-", "", -1) } var maxJobConcurrency uint64 = 4 if m, err := strconv.ParseUint(args.String["--max-job-concurrency"], 10, 64); err == nil { maxJobConcurrency = m } var partitionCGroups = make(map[string]int64) // name -> cpu shares for _, p := range strings.Split(args.String["--partitions"], " ") { nameShares := strings.Split(p, "=cpu_shares:") if len(nameShares) != 2 { shutdown.Fatalf("invalid partition specifier: %q", p) } shares, err := strconv.ParseInt(nameShares[1], 10, 64) if err != nil || shares < 2 { shutdown.Fatalf("invalid cpu shares specifier: %q", shares) } partitionCGroups[nameShares[0]] = shares } for _, s := range []string{"user", "system", "background"} { if _, ok := partitionCGroups[s]; !ok { shutdown.Fatalf("missing mandatory resource partition: %s", s) } } log := logger.New("fn", "runDaemon", "host.id", hostID) log.Info("starting daemon") log.Info("validating host ID") if strings.Contains(hostID, "-") { shutdown.Fatal("host id must not contain dashes") } if externalIP == "" { log.Info("detecting external IP") var err error externalIP, err = config.DefaultExternalIP() if err != nil { log.Error("error detecting external IP", "err", err) shutdown.Fatal(err) } log.Info("using external IP " + externalIP) } publishAddr := net.JoinHostPort(externalIP, httpPort) if discoveryToken != "" { // TODO: retry log.Info("registering with cluster discovery service", "token", discoveryToken, "addr", publishAddr, "name", hostID) discoveryID, err := discovery.RegisterInstance(discovery.Info{ ClusterURL: discoveryToken, InstanceURL: "http://" + publishAddr, Name: hostID, }) if err != nil { log.Error("error registering with cluster discovery service", "err", err) shutdown.Fatal(err) } log.Info("registered with cluster discovery service", "id", discoveryID) } state := NewState(hostID, stateFile) shutdown.BeforeExit(func() { state.CloseDB() }) log.Info("initializing volume manager", "provider", volProvider) var newVolProvider func() (volume.Provider, error) switch volProvider { case "zfs": newVolProvider = func() (volume.Provider, error) { // use a zpool backing file size of either 70% of the device on which // volumes will reside, or 100GB if that can't be determined. log.Info("determining ZFS zpool size") var size int64 var dev syscall.Statfs_t if err := syscall.Statfs(volPath, &dev); err == nil { size = (dev.Bsize * int64(dev.Blocks) * 7) / 10 } else { size = 100000000000 } log.Info(fmt.Sprintf("using ZFS zpool size %d", size)) return zfsVolume.NewProvider(&zfsVolume.ProviderConfig{ DatasetName: "flynn-default", Make: &zfsVolume.MakeDev{ BackingFilename: filepath.Join(volPath, "zfs/vdev/flynn-default-zpool.vdev"), Size: size, }, WorkingDir: filepath.Join(volPath, "zfs"), }) } case "mock": newVolProvider = func() (volume.Provider, error) { return nil, nil } default: shutdown.Fatalf("unknown volume provider: %q", volProvider) } vman := volumemanager.New( filepath.Join(volPath, "volumes.bolt"), newVolProvider, ) shutdown.BeforeExit(func() { vman.CloseDB() }) mux := logmux.New(hostID, logDir, logger.New("host.id", hostID, "component", "logmux")) log.Info("initializing job backend", "type", backendName) var backend Backend switch backendName { case "libcontainer": backend, err = NewLibcontainerBackend(state, vman, bridgeName, flynnInit, mux, partitionCGroups, logger.New("host.id", hostID, "component", "backend", "backend", "libcontainer")) case "mock": backend = MockBackend{} default: shutdown.Fatalf("unknown backend %q", backendName) } if err != nil { shutdown.Fatal(err) } backend.SetDefaultEnv("EXTERNAL_IP", externalIP) backend.SetDefaultEnv("LISTEN_IP", listenIP) var buffers host.LogBuffers discoverdManager := NewDiscoverdManager(backend, mux, hostID, publishAddr, tags) publishURL := "http://" + publishAddr host := &Host{ id: hostID, url: publishURL, status: &host.HostStatus{ ID: hostID, PID: os.Getpid(), URL: publishURL, Tags: tags, Version: version.String(), }, state: state, backend: backend, vman: vman, discMan: discoverdManager, log: logger.New("host.id", hostID), maxJobConcurrency: maxJobConcurrency, } backend.SetHost(host) // restore the host status if set in the environment if statusEnv := os.Getenv("FLYNN_HOST_STATUS"); statusEnv != "" { log.Info("restoring host status from parent") if err := json.Unmarshal([]byte(statusEnv), &host.status); err != nil { log.Error("error restoring host status from parent", "err", err) shutdown.Fatal(err) } pid := os.Getpid() log.Info("setting status PID", "pid", pid) host.status.PID = pid // keep the same tags as the parent discoverdManager.UpdateTags(host.status.Tags) } log.Info("creating HTTP listener") l, err := newHTTPListener(net.JoinHostPort(listenIP, httpPort)) if err != nil { log.Error("error creating HTTP listener", "err", err) shutdown.Fatal(err) } host.listener = l shutdown.BeforeExit(func() { host.Close() }) // if we have a control socket FD, wait for a "resume" message before // opening state DBs and serving requests. var controlFD int if fdEnv := os.Getenv("FLYNN_CONTROL_FD"); fdEnv != "" { log.Info("parsing control socket file descriptor") controlFD, err = strconv.Atoi(fdEnv) if err != nil { log.Error("error parsing control socket file descriptor", "err", err) shutdown.Fatal(err) } log.Info("waiting for resume message from parent") msg := make([]byte, len(ControlMsgResume)) if _, err := syscall.Read(controlFD, msg); err != nil { log.Error("error waiting for resume message from parent", "err", err) shutdown.Fatal(err) } log.Info("validating resume message") if !bytes.Equal(msg, ControlMsgResume) { log.Error(fmt.Sprintf("unexpected resume message from parent: %v", msg)) shutdown.ExitWithCode(1) } log.Info("receiving log buffers from parent") if err := json.NewDecoder(&controlSock{controlFD}).Decode(&buffers); err != nil { log.Error("error receiving log buffers from parent", "err", err) shutdown.Fatal(err) } } log.Info("opening state databases") if err := host.OpenDBs(); err != nil { log.Error("error opening state databases", "err", err) shutdown.Fatal(err) } // stopJobs stops all jobs, leaving discoverd until the end so other // jobs can unregister themselves on shutdown. stopJobs := func() (err error) { var except []string host.statusMtx.RLock() if host.status.Discoverd != nil && host.status.Discoverd.JobID != "" { except = []string{host.status.Discoverd.JobID} } host.statusMtx.RUnlock() log.Info("stopping all jobs except discoverd") if err := backend.Cleanup(except); err != nil { log.Error("error stopping all jobs except discoverd", "err", err) return err } for _, id := range except { log.Info("stopping discoverd") if e := backend.Stop(id); e != nil { log.Error("error stopping discoverd", "err", err) err = e } } return } log.Info("restoring state") resurrect, err := state.Restore(backend, buffers) if err != nil { log.Error("error restoring state", "err", err) shutdown.Fatal(err) } shutdown.BeforeExit(func() { // close discoverd before stopping jobs so we can unregister first log.Info("unregistering with service discovery") if err := discoverdManager.Close(); err != nil { log.Error("error unregistering with service discovery", "err", err) } stopJobs() }) log.Info("serving HTTP requests") host.ServeHTTP() if controlFD > 0 { // now that we are serving requests, send an "ok" message to the parent log.Info("sending ok message to parent") if _, err := syscall.Write(controlFD, ControlMsgOK); err != nil { log.Error("error sending ok message to parent", "err", err) shutdown.Fatal(err) } log.Info("closing control socket") if err := syscall.Close(controlFD); err != nil { log.Error("error closing control socket", "err", err) } } if force { log.Info("forcibly stopping existing jobs") if err := stopJobs(); err != nil { log.Error("error forcibly stopping existing jobs", "err", err) shutdown.Fatal(err) } } if discoveryToken != "" { log.Info("getting cluster peer IPs") instances, err := discovery.GetCluster(discoveryToken) if err != nil { // TODO(titanous): retry? log.Error("error getting discovery cluster", "err", err) shutdown.Fatal(err) } peerIPs = make([]string, 0, len(instances)) for _, inst := range instances { u, err := url.Parse(inst.URL) if err != nil { continue } ip, _, err := net.SplitHostPort(u.Host) if err != nil || ip == externalIP { continue } peerIPs = append(peerIPs, ip) } log.Info("got cluster peer IPs", "peers", peerIPs) } log.Info("connecting to cluster peers") if err := discoverdManager.ConnectPeer(peerIPs); err != nil { log.Info("no cluster peers available") } if !args.Bool["--no-resurrect"] { log.Info("resurrecting jobs") resurrect() } monitor := NewMonitor(host.discMan, externalIP, logger) shutdown.BeforeExit(func() { monitor.Shutdown() }) go monitor.Run() log.Info("blocking main goroutine") <-make(chan struct{}) }
// Private method which handles behavior for wait for response for daemon and non-daemon modes. func waitHandling(p pluginExecutor, timeout time.Duration, logpath string) (*Response, error) { log := execLogger.WithField("_block", "waitHandling") /* Bit of complex behavior so some notes: A. We need to wait for three scenarios depending on the daemon setting 1) plugin is killed (like a safe exit in non-daemon) causing WaitForExit to fire 2) plugin timeout fires calling Kill() and causing WaitForExit to fire 3) A response is returned before either 1 or 2 occur notes: * In daemon mode (daemon == true) we want to wait until (1) or (2 then 1) or (3) occurs and stop waiting right after. * In non-daemon mode (daemon == false) we want to return on (1) or (2 then 1) regardless of whether (3) occurs before or after. B. We will start three go routines to handle 1) waiting for timeout, on timeout we signal timeout and then kill plugin 2) wait for exit, also known as wait for kill, on kill we fire proper code to waitChannel 3) wait for response, on response we fire proper code to waitChannel C. The wait behavior loops collecting 1) timeout signal, this is used to mark exit by timeout 2) killed signal, signal the plugin has stopped - this exits the loop for all scenarios 3) response received, signal the plugin has responded - this exits the loop if daemon == true, otherwise waits for (2) 4) response received but corrupt */ // wait channel waitChannel := make(chan waitSignalValue, 3) // send timeout signal to our channel on timeout log.Debug("timeout chan start") go waitForPluginTimeout(timeout, p, waitChannel) // send response received signal to our channel on response log.Debug("response chan start") go waitForResponseFromPlugin(p.ResponseReader(), waitChannel, logpath) // log stderr from the plugin go logStdErr(p.ErrorResponseReader(), logpath) // send killed plugin signal to our channel on kill log.Debug("kill chan start") go waitForKilledPlugin(p, waitChannel) // flag to indicate a timeout occurred var timeoutFlag bool // error value indicating a bad response was found var errResponse *error // var holding a good response (or nil if none was returned) var response *Response // Loop to wait for signals and return for { w := <-waitChannel switch w.Signal { case pluginTimeout: // plugin timeout signal received log.Debug("plugin timeout signal received") // If timeout received after response we are ok with it and // don't need to flip the timeout flag. if response == nil { log.Debug("timeout flag set") // We got a timeout without getting a response // set the flag timeoutFlag = true // Kill the plugin. p.Kill() break } log.Debug("timeout flag ignored because of response") case pluginKilled: // plugin killed signal received log.Error("plugin kill signal received") // We check a few scenarios and return based on how things worked out to this point // 1) If a bad response was received we return signalling this with an error (fail) if errResponse != nil { log.Error("returning with error (bad response)") return nil, *errResponse } // 2) If a timeout occurred we return that as error (fail) if timeoutFlag { log.Error("returning with error (timeout)") return nil, errors.New("timeout waiting for response") } // 3) If a good response was returned we return that with no error (success) if response != nil { log.Error("returning with response (after wait for kill)") return response, nil } // 4) otherwise we return no response and an error that no response was received (fail) log.Error("returning with error (killed without response)") // The kill could have been without error so we check if ExitError was returned and return // our own if not. if *w.Error != nil { return nil, *w.Error } else { return nil, errors.New("plugin died without sending response") } case pluginResponseOk: // plugin response (valid) signal received log.Debug("plugin response (ok) received") // If in daemon mode we can return now (succes) since the plugin will continue to run // if not we let the loop continue (to wait for kill) response = w.Response return response, nil case pluginResponseBad: // plugin response (invalid) signal received log.Error("plugin response (bad) received") // A bad response is end of game in all scerarios and indictive of an unhealthy or unsupported plugin // We save the response bad error var (for handling later on plugin kill) errResponse = w.Error } } }
// Run as pid 1 and monitor the contained process to return its exit code. func containerInitApp(c *Config, logFile *os.File) error { log := logger.New() init := newContainerInit(c, logFile) log.Debug("registering RPC server") if err := rpcplus.Register(init); err != nil { log.Error("error registering RPC server", "err", err) return err } init.mtx.Lock() defer init.mtx.Unlock() // Prepare the cmd based on the given args // If this fails we report that below cmdPath, cmdErr := getCmdPath(c) cmd := exec.Command(cmdPath, c.Args[1:]...) cmd.Dir = c.WorkDir cmd.Env = make([]string, 0, len(c.Env)) for k, v := range c.Env { cmd.Env = append(cmd.Env, k+"="+v) } // App runs in its own session cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true} if c.Uid != nil || c.Gid != nil { cmd.SysProcAttr.Credential = &syscall.Credential{} if c.Uid != nil { cmd.SysProcAttr.Credential.Uid = *c.Uid } if c.Gid != nil { cmd.SysProcAttr.Credential.Gid = *c.Gid } } // Console setup. Hook up the container app's stdin/stdout/stderr to // either a pty or pipes. The FDs for the controlling side of the // pty/pipes will be passed to flynn-host later via a UNIX socket. if c.TTY { log.Debug("creating PTY") ptyMaster, ptySlave, err := pty.Open() if err != nil { log.Error("error creating PTY", "err", err) return err } init.ptyMaster = ptyMaster cmd.Stdout = ptySlave cmd.Stderr = ptySlave if c.OpenStdin { log.Debug("attaching stdin to PTY") cmd.Stdin = ptySlave cmd.SysProcAttr.Setctty = true } if c.Uid != nil && c.Gid != nil { if err := syscall.Fchown(int(ptySlave.Fd()), int(*c.Uid), int(*c.Gid)); err != nil { log.Error("error changing PTY ownership", "err", err) return err } } } else { // We copy through a socketpair (rather than using cmd.StdoutPipe directly) to make // it easier for flynn-host to do non-blocking I/O (via net.FileConn) so that no // read(2) calls can succeed after closing the logs during an update. // // We also don't assign the socketpair directly to fd 1 because that prevents jobs // using /dev/stdout (calling open(2) on a socket leads to an ENXIO error, see // http://marc.info/?l=ast-users&m=120978595414993). newPipe := func(pipeFn func() (io.ReadCloser, error), name string) (*os.File, error) { pipe, err := pipeFn() if err != nil { return nil, err } if c.Uid != nil && c.Gid != nil { if err := syscall.Fchown(int(pipe.(*os.File).Fd()), int(*c.Uid), int(*c.Gid)); err != nil { return nil, err } } sockR, sockW, err := newSocketPair(name) if err != nil { return nil, err } go func() { defer sockW.Close() for { // copy data from the pipe to the socket using splice(2) // (rather than io.Copy) to avoid a needless copy through // user space n, err := syscall.Splice(int(pipe.(*os.File).Fd()), nil, int(sockW.Fd()), nil, 65535, 0) if err != nil || n == 0 { return } } }() return sockR, nil } log.Debug("creating stdout pipe") var err error init.stdout, err = newPipe(cmd.StdoutPipe, "stdout") if err != nil { log.Error("error creating stdout pipe", "err", err) return err } log.Debug("creating stderr pipe") init.stderr, err = newPipe(cmd.StderrPipe, "stderr") if err != nil { log.Error("error creating stderr pipe", "err", err) return err } if c.OpenStdin { // Can't use cmd.StdinPipe() here, since in Go 1.2 it // returns an io.WriteCloser with the underlying object // being an *exec.closeOnce, neither of which provides // a way to convert to an FD. log.Debug("creating stdin pipe") pipeRead, pipeWrite, err := os.Pipe() if err != nil { log.Error("creating stdin pipe", "err", err) return err } cmd.Stdin = pipeRead init.stdin = pipeWrite } } go runRPCServer() // Wait for flynn-host to tell us to start init.mtx.Unlock() // Allow calls log.Debug("waiting to be resumed") <-init.resume log.Debug("resuming") init.mtx.Lock() log.Info("starting the job", "args", cmd.Args) if cmdErr != nil { log.Error("error starting the job", "err", cmdErr) init.changeState(StateFailed, cmdErr.Error(), -1) init.exit(1) } if err := cmd.Start(); err != nil { log.Error("error starting the job", "err", err) init.changeState(StateFailed, err.Error(), -1) init.exit(1) } log.Debug("setting state to running") init.process = cmd.Process init.changeState(StateRunning, "", -1) init.mtx.Unlock() // Allow calls // monitor services hbs := make([]discoverd.Heartbeater, 0, len(c.Ports)) for _, port := range c.Ports { if port.Service == nil { continue } log := log.New("name", port.Service.Name, "port", port.Port, "proto", port.Proto) log.Info("monitoring service") hb, err := monitor(port, init, c.Env, log) if err != nil { log.Error("error monitoring service", "err", err) os.Exit(70) } hbs = append(hbs, hb) } exitCode := babySit(init, hbs) log.Info("job exited", "status", exitCode) init.mtx.Lock() init.changeState(StateExited, "", exitCode) init.mtx.Unlock() // Allow calls log.Info("exiting") init.exit(exitCode) return nil }
func babySit(init *ContainerInit, hbs []discoverd.Heartbeater) int { log := logger.New() var shutdownOnce sync.Once hbDone := make(chan struct{}) closeHBs := func() { for _, hb := range hbs { if err := hb.Close(); err != nil { log.Error("error deregistering service", "addr", hb.Addr(), "err", err) } else { log.Info("service deregistered", "addr", hb.Addr()) } } close(hbDone) } // Close the heartbeaters if requested to do so go func() { <-init.deregister log.Info("received deregister request") shutdownOnce.Do(closeHBs) }() // Forward all signals to the app sigchan := make(chan os.Signal, 1) sigutil.CatchAll(sigchan) go func() { for sig := range sigchan { log.Info("received signal", "type", sig) if sig == syscall.SIGCHLD { continue } if sig == syscall.SIGTERM || sig == syscall.SIGINT { shutdownOnce.Do(closeHBs) } log.Info("forwarding signal to job", "type", sig) init.process.Signal(sig) } }() // Wait for the app to exit. Also, as pid 1 it's our job to reap all // orphaned zombies. var wstatus syscall.WaitStatus for { pid, err := syscall.Wait4(-1, &wstatus, 0, nil) if err == nil && pid == init.process.Pid { break } } // Ensure that the heartbeaters are closed even if the app wasn't signaled shutdownOnce.Do(closeHBs) select { case <-hbDone: case <-time.After(5 * time.Second): log.Error("timed out waiting for services to be deregistered") } if wstatus.Signaled() { log.Debug("job exited due to signal") return 0 } return wstatus.ExitStatus() }
func ExampleLogger_SkipStackFrames() { log := say.NewLogger(say.SkipStackFrames(-1)) // Disable stack traces. log.Error("Oops") // Output: // ERROR Oops }