Exemplo n.º 1
0
func (c *ContainerInit) StreamState(arg struct{}, stream rpcplus.Stream) error {
	log := logger.New("fn", "StreamState")
	log.Debug("starting to stream state")

	ch := make(chan StateChange)
	c.streamsMtx.Lock()
	c.mtx.Lock()
	select {
	case stream.Send <- StateChange{State: c.state, Error: c.error, ExitStatus: c.exitStatus}:
		log.Debug("sent initial state")
	case <-stream.Error:
		c.mtx.Unlock()
		c.streamsMtx.Unlock()
		return nil
	}
	c.mtx.Unlock()
	c.streams[ch] = struct{}{}
	c.streamsMtx.Unlock()
	defer func() {
		log.Debug("cleanup")
		go func() {
			// drain to prevent deadlock while removing the listener
			for range ch {
			}
		}()
		c.streamsMtx.Lock()
		delete(c.streams, ch)
		c.streamsMtx.Unlock()
		close(ch)
	}()

	log.Debug("waiting for state changes")
	for {
		select {
		case change := <-ch:
			select {
			case stream.Send <- change:
				log.Debug("sent state change", "state", change.State)
			case <-stream.Error:
				return nil
			}
		case <-stream.Error:
			return nil
		}
	}
}
Exemplo n.º 2
0
func RunLogBenchmarkFixedString(b *testing.B) {
	log := MustGetLogger("test")

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		log.Debug("some random fixed text")
	}
}
Exemplo n.º 3
0
func RunLogBenchmark(b *testing.B) {
	password := Password("foo")
	log := MustGetLogger("test")

	b.ResetTimer()
	for i := 0; i < b.N; i++ {
		log.Debug("log line for %d and this is rectified: %s", i, password)
	}
}
Exemplo n.º 4
0
func main() {
	http.HandleFunc("/", sayHelloName)
	http.HandleFunc("/login", login)
	http.HandleFunc("/upload", upload)
	http.HandleFunc("/view", view)
	http.HandleFunc("/list", view)
	err := http.ListenAndServe(":80", nil)
	if err != nil {
		log.Fatal("ListenAndS erve:", err)
		log.Debug("fancy")
	}
}
Exemplo n.º 5
0
func main() {
	conf := &terraformer.Config{}

	mc := multiconfig.New()
	mc.Loader = multiconfig.MultiLoader(
		&multiconfig.TagLoader{},
		&multiconfig.EnvironmentLoader{},
		&multiconfig.EnvironmentLoader{Prefix: "KONFIG_TERRAFORMER"},
		&multiconfig.FlagLoader{},
	)

	mc.MustLoad(conf)

	if !conf.TerraformDebug {
		// hashicorp.terraform outputs many logs, discard them
		log.SetOutput(ioutil.Discard)
	}

	log := logging.NewCustom(terraformer.Name, conf.Debug)

	// init terraformer
	t, err := terraformer.New(conf, log)
	if err != nil {
		log.Fatal(err.Error())
	}

	k, err := terraformer.NewKite(t, conf)
	if err != nil {
		log.Fatal(err.Error())
	}

	if err := k.RegisterForever(k.RegisterURL(true)); err != nil {
		log.Fatal(err.Error())
	}

	go k.Run()
	<-k.ServerReadyNotify()
	log.Debug("Kite Started Listening")

	// terraformer can only be closed with signals, wait for any signal
	if err := t.Wait(); err != nil {
		log.Error("Err after waiting terraformer %s", err)
	}

	k.Close()
}
Exemplo n.º 6
0
// StartService bootstraps the metadata service
func StartService(configFile, address, profileName, MFA string, port int, fake bool) {
	log := &ConsoleLogger{}
	config := Config{}

	// TODO: Move to function and use a default configuration file
	if configFile != "" {
		// Parse in options from the given config file.
		log.Debug("Loading configuration: %s\n", configFile)
		configContents, configErr := ioutil.ReadFile(configFile)
		if configErr != nil {
			log.Fatalf("Error reading config: %s\n", configErr.Error())
		}

		configParseErr := yaml.Unmarshal(configContents, &config)
		if configParseErr != nil {
			log.Fatalf("Error in parsing config file: %s\n", configParseErr.Error())
		}

		if len(config.Profiles) == 0 {
			log.Info("No profiles found, falling back to old config format.\n")
			configParseErr := yaml.Unmarshal(configContents, &config.Profiles)
			if configParseErr != nil {
				log.Fatalf("Error in parsing config file: %s\n", configParseErr.Error())
			}
			if len(config.Profiles) > 0 {
				log.Warning("WARNING: old deprecated config format is used.\n")
			}
		}
	} else {
		log.Debug("No configuration file given\n")
	}

	defer func() {
		log.Debug("Removing socket: %v\n", address)
		os.Remove(address)
	}()

	if port == 0 {
		port = config.Port
	}

	if port == 0 {
		port = 80
	}

	// Startup the HTTP server and respond to requests.
	listener, err := net.ListenTCP("tcp", &net.TCPAddr{
		IP:   net.ParseIP("169.254.169.254"),
		Port: port,
	})
	if err != nil {
		log.Fatalf("Failed to bind to socket: %s\n", err)
	}

	var credsManager CredentialsManager
	if fake {
		credsManager = &FakeCredentialsManager{}
	} else {
		credsManager = NewCredentialsExpirationManager(profileName, config, MFA)
	}

	log.Info("Starting web service: %v:%v\n", "169.254.169.254", port)
	mds, metadataError := NewMetadataService(listener, credsManager)
	if metadataError != nil {
		log.Fatalf("Failed to start metadata service: %s\n", metadataError.Error())
	}
	mds.Start()

	stop := make(chan struct{})
	agentServer := NewCliHandler(address, credsManager, stop, config)
	err = agentServer.Start()
	if err != nil {
		log.Fatalf("Failed to start agentServer: %s\n", err.Error())
	}

	// Wait for a graceful shutdown signal
	terminate := make(chan os.Signal)
	signal.Notify(terminate, syscall.SIGINT, syscall.SIGTERM)

	log.Info("Service: online\n")
	defer log.Info("Caught signal: shutting down.\n")

	for {
		select {
		case <-stop:
			return
		case <-terminate:
			return
		}
	}
}
Exemplo n.º 7
0
// Private method which handles behavior for wait for response for daemon and non-daemon modes.
func waitHandling(p pluginExecutor, timeout time.Duration, logpath string) (*Response, error) {
	log := execLogger.WithField("_block", "waitHandling")

	/*
		Bit of complex behavior so some notes:
			A. We need to wait for three scenarios depending on the daemon setting
					1)	plugin is killed (like a safe exit in non-daemon)
						causing WaitForExit to fire
					2) 	plugin timeout fires calling Kill() and causing
						WaitForExit to fire
					3)	A response is returned before either 1 or 2 occur

				notes:
					*	In daemon mode (daemon == true) we want to wait until (1) or
						(2 then 1) or (3) occurs and stop waiting right after.
					*	In non-daemon mode (daemon == false) we want to return on (1)
						or (2 then 1) regardless of whether (3) occurs before or after.

			B. We will start three go routines to handle
					1)	waiting for timeout, on timeout we signal timeout and then
						kill plugin
					2)	wait for exit, also known as wait for kill, on kill we fire
						proper code to waitChannel
					3)	wait for response, on response we fire proper code to waitChannel

			C. The wait behavior loops collecting
					1)	timeout signal, this is used to mark exit by timeout
					2)	killed signal, signal the plugin has stopped - this exits
						the loop for all scenarios
					3)	response received, signal the plugin has responded - this exits
						the loop if daemon == true, otherwise waits for (2)
					4)	response received but corrupt
	*/

	// wait channel
	waitChannel := make(chan waitSignalValue, 3)

	// send timeout signal to our channel on timeout
	log.Debug("timeout chan start")
	go waitForPluginTimeout(timeout, p, waitChannel)

	// send response received signal to our channel on response
	log.Debug("response chan start")
	go waitForResponseFromPlugin(p.ResponseReader(), waitChannel, logpath)

	// log stderr from the plugin
	go logStdErr(p.ErrorResponseReader(), logpath)

	// send killed plugin signal to our channel on kill
	log.Debug("kill chan start")
	go waitForKilledPlugin(p, waitChannel)

	// flag to indicate a timeout occurred
	var timeoutFlag bool
	// error value indicating a bad response was found
	var errResponse *error
	// var holding a good response (or nil if none was returned)
	var response *Response
	// Loop to wait for signals and return
	for {
		w := <-waitChannel
		switch w.Signal {
		case pluginTimeout: // plugin timeout signal received
			log.Debug("plugin timeout signal received")
			// If timeout received after response we are ok with it and
			// don't need to flip the timeout flag.
			if response == nil {
				log.Debug("timeout flag set")
				// We got a timeout without getting a response
				// set the flag
				timeoutFlag = true
				// Kill the plugin.
				p.Kill()
				break
			}
			log.Debug("timeout flag ignored because of response")

		case pluginKilled: // plugin killed signal received
			log.Error("plugin kill signal received")
			// We check a few scenarios and return based on how things worked out to this point
			// 1) If a bad response was received we return signalling this with an error (fail)
			if errResponse != nil {
				log.Error("returning with error (bad response)")
				return nil, *errResponse
			}
			// 2) If a timeout occurred we return that as error (fail)
			if timeoutFlag {
				log.Error("returning with error (timeout)")
				return nil, errors.New("timeout waiting for response")
			}
			// 3) If a good response was returned we return that with no error (success)
			if response != nil {
				log.Error("returning with response (after wait for kill)")
				return response, nil
			}
			// 4) otherwise we return no response and an error that no response was received (fail)
			log.Error("returning with error (killed without response)")
			// The kill could have been without error so we check if ExitError was returned and return
			// our own if not.
			if *w.Error != nil {
				return nil, *w.Error
			} else {
				return nil, errors.New("plugin died without sending response")
			}

		case pluginResponseOk: // plugin response (valid) signal received
			log.Debug("plugin response (ok) received")
			// If in daemon mode we can return now (succes) since the plugin will continue to run
			// if not we let the loop continue (to wait for kill)
			response = w.Response
			return response, nil

		case pluginResponseBad: // plugin response (invalid) signal received
			log.Error("plugin response (bad) received")
			// A bad response is end of game in all scerarios and indictive of an unhealthy or unsupported plugin
			// We save the response bad error var (for handling later on plugin kill)
			errResponse = w.Error
		}
	}
}
Exemplo n.º 8
0
// Run as pid 1 and monitor the contained process to return its exit code.
func containerInitApp(c *Config, logFile *os.File) error {
	log := logger.New()

	init := newContainerInit(c, logFile)
	log.Debug("registering RPC server")
	if err := rpcplus.Register(init); err != nil {
		log.Error("error registering RPC server", "err", err)
		return err
	}
	init.mtx.Lock()
	defer init.mtx.Unlock()

	// Prepare the cmd based on the given args
	// If this fails we report that below
	cmdPath, cmdErr := getCmdPath(c)
	cmd := exec.Command(cmdPath, c.Args[1:]...)
	cmd.Dir = c.WorkDir

	cmd.Env = make([]string, 0, len(c.Env))
	for k, v := range c.Env {
		cmd.Env = append(cmd.Env, k+"="+v)
	}

	// App runs in its own session
	cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}

	if c.Uid != nil || c.Gid != nil {
		cmd.SysProcAttr.Credential = &syscall.Credential{}
		if c.Uid != nil {
			cmd.SysProcAttr.Credential.Uid = *c.Uid
		}
		if c.Gid != nil {
			cmd.SysProcAttr.Credential.Gid = *c.Gid
		}
	}

	// Console setup.  Hook up the container app's stdin/stdout/stderr to
	// either a pty or pipes.  The FDs for the controlling side of the
	// pty/pipes will be passed to flynn-host later via a UNIX socket.
	if c.TTY {
		log.Debug("creating PTY")
		ptyMaster, ptySlave, err := pty.Open()
		if err != nil {
			log.Error("error creating PTY", "err", err)
			return err
		}
		init.ptyMaster = ptyMaster
		cmd.Stdout = ptySlave
		cmd.Stderr = ptySlave
		if c.OpenStdin {
			log.Debug("attaching stdin to PTY")
			cmd.Stdin = ptySlave
			cmd.SysProcAttr.Setctty = true
		}
		if c.Uid != nil && c.Gid != nil {
			if err := syscall.Fchown(int(ptySlave.Fd()), int(*c.Uid), int(*c.Gid)); err != nil {
				log.Error("error changing PTY ownership", "err", err)
				return err
			}
		}
	} else {
		// We copy through a socketpair (rather than using cmd.StdoutPipe directly) to make
		// it easier for flynn-host to do non-blocking I/O (via net.FileConn) so that no
		// read(2) calls can succeed after closing the logs during an update.
		//
		// We also don't assign the socketpair directly to fd 1 because that prevents jobs
		// using /dev/stdout (calling open(2) on a socket leads to an ENXIO error, see
		// http://marc.info/?l=ast-users&m=120978595414993).
		newPipe := func(pipeFn func() (io.ReadCloser, error), name string) (*os.File, error) {
			pipe, err := pipeFn()
			if err != nil {
				return nil, err
			}
			if c.Uid != nil && c.Gid != nil {
				if err := syscall.Fchown(int(pipe.(*os.File).Fd()), int(*c.Uid), int(*c.Gid)); err != nil {
					return nil, err
				}
			}
			sockR, sockW, err := newSocketPair(name)
			if err != nil {
				return nil, err
			}
			go func() {
				defer sockW.Close()
				for {
					// copy data from the pipe to the socket using splice(2)
					// (rather than io.Copy) to avoid a needless copy through
					// user space
					n, err := syscall.Splice(int(pipe.(*os.File).Fd()), nil, int(sockW.Fd()), nil, 65535, 0)
					if err != nil || n == 0 {
						return
					}
				}
			}()
			return sockR, nil
		}

		log.Debug("creating stdout pipe")
		var err error
		init.stdout, err = newPipe(cmd.StdoutPipe, "stdout")
		if err != nil {
			log.Error("error creating stdout pipe", "err", err)
			return err
		}

		log.Debug("creating stderr pipe")
		init.stderr, err = newPipe(cmd.StderrPipe, "stderr")
		if err != nil {
			log.Error("error creating stderr pipe", "err", err)
			return err
		}

		if c.OpenStdin {
			// Can't use cmd.StdinPipe() here, since in Go 1.2 it
			// returns an io.WriteCloser with the underlying object
			// being an *exec.closeOnce, neither of which provides
			// a way to convert to an FD.
			log.Debug("creating stdin pipe")
			pipeRead, pipeWrite, err := os.Pipe()
			if err != nil {
				log.Error("creating stdin pipe", "err", err)
				return err
			}
			cmd.Stdin = pipeRead
			init.stdin = pipeWrite
		}
	}

	go runRPCServer()

	// Wait for flynn-host to tell us to start
	init.mtx.Unlock() // Allow calls
	log.Debug("waiting to be resumed")
	<-init.resume
	log.Debug("resuming")
	init.mtx.Lock()

	log.Info("starting the job", "args", cmd.Args)
	if cmdErr != nil {
		log.Error("error starting the job", "err", cmdErr)
		init.changeState(StateFailed, cmdErr.Error(), -1)
		init.exit(1)
	}
	if err := cmd.Start(); err != nil {
		log.Error("error starting the job", "err", err)
		init.changeState(StateFailed, err.Error(), -1)
		init.exit(1)
	}
	log.Debug("setting state to running")
	init.process = cmd.Process
	init.changeState(StateRunning, "", -1)

	init.mtx.Unlock() // Allow calls
	// monitor services
	hbs := make([]discoverd.Heartbeater, 0, len(c.Ports))
	for _, port := range c.Ports {
		if port.Service == nil {
			continue
		}
		log := log.New("name", port.Service.Name, "port", port.Port, "proto", port.Proto)
		log.Info("monitoring service")
		hb, err := monitor(port, init, c.Env, log)
		if err != nil {
			log.Error("error monitoring service", "err", err)
			os.Exit(70)
		}
		hbs = append(hbs, hb)
	}
	exitCode := babySit(init, hbs)
	log.Info("job exited", "status", exitCode)
	init.mtx.Lock()
	init.changeState(StateExited, "", exitCode)
	init.mtx.Unlock() // Allow calls

	log.Info("exiting")
	init.exit(exitCode)
	return nil
}
Exemplo n.º 9
0
func babySit(init *ContainerInit, hbs []discoverd.Heartbeater) int {
	log := logger.New()

	var shutdownOnce sync.Once
	hbDone := make(chan struct{})
	closeHBs := func() {
		for _, hb := range hbs {
			if err := hb.Close(); err != nil {
				log.Error("error deregistering service", "addr", hb.Addr(), "err", err)
			} else {
				log.Info("service deregistered", "addr", hb.Addr())
			}
		}
		close(hbDone)
	}

	// Close the heartbeaters if requested to do so
	go func() {
		<-init.deregister
		log.Info("received deregister request")
		shutdownOnce.Do(closeHBs)
	}()

	// Forward all signals to the app
	sigchan := make(chan os.Signal, 1)
	sigutil.CatchAll(sigchan)
	go func() {
		for sig := range sigchan {
			log.Info("received signal", "type", sig)
			if sig == syscall.SIGCHLD {
				continue
			}
			if sig == syscall.SIGTERM || sig == syscall.SIGINT {
				shutdownOnce.Do(closeHBs)
			}
			log.Info("forwarding signal to job", "type", sig)
			init.process.Signal(sig)
		}
	}()

	// Wait for the app to exit.  Also, as pid 1 it's our job to reap all
	// orphaned zombies.
	var wstatus syscall.WaitStatus
	for {
		pid, err := syscall.Wait4(-1, &wstatus, 0, nil)
		if err == nil && pid == init.process.Pid {
			break
		}
	}

	// Ensure that the heartbeaters are closed even if the app wasn't signaled
	shutdownOnce.Do(closeHBs)
	select {
	case <-hbDone:
	case <-time.After(5 * time.Second):
		log.Error("timed out waiting for services to be deregistered")
	}

	if wstatus.Signaled() {
		log.Debug("job exited due to signal")
		return 0
	}

	return wstatus.ExitStatus()
}